Fastforwarding datarmnet CRT:data-kernel.lnx.1.2-211122 to data-kernel.lnx.2.0
Bu işleme şunda yer alıyor:
@@ -12,7 +12,6 @@
|
||||
#include "dfc.h"
|
||||
|
||||
#define QMAP_DFC_VER 1
|
||||
#define QMAP_PS_MAX_BEARERS 32
|
||||
|
||||
struct qmap_dfc_config {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
@@ -102,7 +101,7 @@ struct qmap_dfc_powersave_req {
|
||||
__be32 ep_type;
|
||||
__be32 iface_id;
|
||||
u8 num_bearers;
|
||||
u8 bearer_id[QMAP_PS_MAX_BEARERS];
|
||||
u8 bearer_id[PS_MAX_BEARERS];
|
||||
u8 reserved4[3];
|
||||
} __aligned(1);
|
||||
|
||||
@@ -440,8 +439,8 @@ static int dfc_qmap_send_powersave(u8 enable, u8 num_bearers, u8 *bearer_id)
|
||||
dfc_powersave->mode = enable ? 1 : 0;
|
||||
|
||||
if (enable && num_bearers) {
|
||||
if (unlikely(num_bearers > QMAP_PS_MAX_BEARERS))
|
||||
num_bearers = QMAP_PS_MAX_BEARERS;
|
||||
if (unlikely(num_bearers > PS_MAX_BEARERS))
|
||||
num_bearers = PS_MAX_BEARERS;
|
||||
dfc_powersave->allow = 1;
|
||||
dfc_powersave->autoshut = 1;
|
||||
dfc_powersave->num_bearers = num_bearers;
|
||||
|
@@ -44,6 +44,7 @@ struct dfc_ack_cmd {
|
||||
} __aligned(1);
|
||||
|
||||
static void dfc_svc_init(struct work_struct *work);
|
||||
extern int dfc_ps_ext;
|
||||
|
||||
/* **************************************************** */
|
||||
#define DFC_SERVICE_ID_V01 0x4E
|
||||
@@ -775,8 +776,11 @@ dfc_indication_register_req(struct qmi_handle *dfc_handle,
|
||||
|
||||
req->report_flow_status_valid = 1;
|
||||
req->report_flow_status = reg;
|
||||
req->report_tx_link_status_valid = 1;
|
||||
req->report_tx_link_status = reg;
|
||||
|
||||
if (!dfc_ps_ext) {
|
||||
req->report_tx_link_status_valid = 1;
|
||||
req->report_tx_link_status = reg;
|
||||
}
|
||||
|
||||
ret = qmi_send_request(dfc_handle, ssctl, &txn,
|
||||
QMI_DFC_INDICATION_REGISTER_REQ_V01,
|
||||
@@ -1003,7 +1007,9 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
|
||||
u32 adjusted_grant;
|
||||
|
||||
itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
|
||||
if (!itm)
|
||||
|
||||
/* cache the bearer assuming it is a new bearer */
|
||||
if (unlikely(!itm && !is_query && fc_info->num_bytes))
|
||||
itm = qmi_rmnet_get_bearer_noref(qos, fc_info->bearer_id);
|
||||
|
||||
if (itm) {
|
||||
|
159
core/qmi_rmnet.c
159
core/qmi_rmnet.c
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@@ -24,7 +25,6 @@
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/alarmtimer.h>
|
||||
|
||||
#define NLMSG_FLOW_ACTIVATE 1
|
||||
#define NLMSG_FLOW_DEACTIVATE 2
|
||||
@@ -226,21 +226,6 @@ int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq)
|
||||
{
|
||||
struct Qdisc *qdisc;
|
||||
|
||||
if (unlikely(txq >= dev->num_tx_queues))
|
||||
return;
|
||||
|
||||
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, txq)->qdisc);
|
||||
if (qdisc) {
|
||||
spin_lock_bh(qdisc_lock(qdisc));
|
||||
qdisc_reset(qdisc);
|
||||
spin_unlock_bh(qdisc_lock(qdisc));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* qmi_rmnet_watchdog_fn - watchdog timer func
|
||||
*/
|
||||
@@ -371,15 +356,13 @@ static void __qmi_rmnet_bearer_put(struct net_device *dev,
|
||||
|
||||
mq->bearer = NULL;
|
||||
mq->is_ll_ch = false;
|
||||
if (reset) {
|
||||
qmi_rmnet_reset_txq(dev, i);
|
||||
qmi_rmnet_flow_control(dev, i, 1);
|
||||
mq->drop_on_remove = reset;
|
||||
smp_mb();
|
||||
|
||||
if (dfc_mode == DFC_MODE_SA) {
|
||||
j = i + ACK_MQ_OFFSET;
|
||||
qmi_rmnet_reset_txq(dev, j);
|
||||
qmi_rmnet_flow_control(dev, j, 1);
|
||||
}
|
||||
qmi_rmnet_flow_control(dev, i, 1);
|
||||
if (dfc_mode == DFC_MODE_SA) {
|
||||
j = i + ACK_MQ_OFFSET;
|
||||
qmi_rmnet_flow_control(dev, j, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -404,6 +387,8 @@ static void __qmi_rmnet_update_mq(struct net_device *dev,
|
||||
if (!mq->bearer) {
|
||||
mq->bearer = bearer;
|
||||
mq->is_ll_ch = bearer->ch_switch.current_ch;
|
||||
mq->drop_on_remove = false;
|
||||
smp_mb();
|
||||
|
||||
if (dfc_mode == DFC_MODE_SA) {
|
||||
bearer->mq_idx = itm->mq_idx;
|
||||
@@ -412,12 +397,15 @@ static void __qmi_rmnet_update_mq(struct net_device *dev,
|
||||
bearer->mq_idx = itm->mq_idx;
|
||||
}
|
||||
|
||||
qmi_rmnet_flow_control(dev, itm->mq_idx,
|
||||
bearer->grant_size > 0 ? 1 : 0);
|
||||
|
||||
/* Always enable flow for the newly associated bearer */
|
||||
if (!bearer->grant_size) {
|
||||
bearer->grant_size = DEFAULT_GRANT;
|
||||
bearer->grant_thresh =
|
||||
qmi_rmnet_grant_per(DEFAULT_GRANT);
|
||||
}
|
||||
qmi_rmnet_flow_control(dev, itm->mq_idx, 1);
|
||||
if (dfc_mode == DFC_MODE_SA)
|
||||
qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
|
||||
bearer->grant_size > 0 ? 1 : 0);
|
||||
qmi_rmnet_flow_control(dev, bearer->ack_mq_idx, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -634,7 +622,6 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
|
||||
if (!qmi)
|
||||
return -ENOMEM;
|
||||
|
||||
qmi->ws = wakeup_source_register(NULL, "RMNET_DFC");
|
||||
rmnet_init_qmi_pt(port, qmi);
|
||||
}
|
||||
|
||||
@@ -684,7 +671,6 @@ __qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
|
||||
|
||||
if (!qmi_rmnet_has_client(qmi) && !qmi_rmnet_has_pending(qmi)) {
|
||||
rmnet_reset_qmi_pt(port);
|
||||
wakeup_source_unregister(qmi->ws);
|
||||
kfree(qmi);
|
||||
return 0;
|
||||
}
|
||||
@@ -757,7 +743,6 @@ int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
|
||||
!qmi_rmnet_has_client(qmi) &&
|
||||
!qmi_rmnet_has_pending(qmi)) {
|
||||
rmnet_reset_qmi_pt(port);
|
||||
wakeup_source_unregister(qmi->ws);
|
||||
kfree(qmi);
|
||||
}
|
||||
|
||||
@@ -955,8 +940,8 @@ void qmi_rmnet_prepare_ps_bearers(struct net_device *dev, u8 *num_bearers,
|
||||
EXPORT_SYMBOL(qmi_rmnet_prepare_ps_bearers);
|
||||
|
||||
#ifdef CONFIG_QTI_QMI_DFC
|
||||
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
bool qmi_rmnet_get_flow_state(struct net_device *dev, struct sk_buff *skb,
|
||||
bool *drop, bool *is_low_latency)
|
||||
{
|
||||
struct qos_info *qos = rmnet_get_qos_pt(dev);
|
||||
int txq = skb->queue_mapping;
|
||||
@@ -967,9 +952,15 @@ bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
|
||||
if (unlikely(!qos || txq >= MAX_MQ_NUM))
|
||||
return false;
|
||||
|
||||
return qos->mq[txq].is_ll_ch;
|
||||
/* If the bearer is gone, packets may need to be dropped */
|
||||
*drop = (txq != DEFAULT_MQ_NUM && !READ_ONCE(qos->mq[txq].bearer) &&
|
||||
READ_ONCE(qos->mq[txq].drop_on_remove));
|
||||
|
||||
*is_low_latency = READ_ONCE(qos->mq[txq].is_ll_ch);
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_flow_is_low_latency);
|
||||
EXPORT_SYMBOL(qmi_rmnet_get_flow_state);
|
||||
|
||||
void qmi_rmnet_burst_fc_check(struct net_device *dev,
|
||||
int ip_type, u32 mark, unsigned int len)
|
||||
@@ -1152,7 +1143,6 @@ static u8 ps_bearer_id[32];
|
||||
|
||||
struct rmnet_powersave_work {
|
||||
struct delayed_work work;
|
||||
struct alarm atimer;
|
||||
void *port;
|
||||
u64 old_rx_pkts;
|
||||
u64 old_tx_pkts;
|
||||
@@ -1209,7 +1199,8 @@ done:
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_ps_ind_deregister);
|
||||
|
||||
int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
|
||||
int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable, u8 num_bearers,
|
||||
u8 *bearer_id)
|
||||
{
|
||||
int rc = -EINVAL;
|
||||
struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
|
||||
@@ -1217,7 +1208,8 @@ int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
|
||||
if (!qmi || !qmi->wda_client)
|
||||
return rc;
|
||||
|
||||
rc = wda_set_powersave_mode(qmi->wda_client, enable);
|
||||
rc = wda_set_powersave_mode(qmi->wda_client, enable, num_bearers,
|
||||
bearer_id);
|
||||
if (rc < 0) {
|
||||
pr_err("%s() failed set powersave mode[%u], err=%d\n",
|
||||
__func__, enable, rc);
|
||||
@@ -1236,32 +1228,6 @@ static void qmi_rmnet_work_restart(void *port)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static enum alarmtimer_restart qmi_rmnet_work_alarm(struct alarm *atimer,
|
||||
ktime_t now)
|
||||
{
|
||||
struct rmnet_powersave_work *real_work;
|
||||
|
||||
real_work = container_of(atimer, struct rmnet_powersave_work, atimer);
|
||||
qmi_rmnet_work_restart(real_work->port);
|
||||
return ALARMTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static void dfc_wakelock_acquire(struct qmi_info *qmi)
|
||||
{
|
||||
if (qmi && !qmi->wakelock_active) {
|
||||
__pm_stay_awake(qmi->ws);
|
||||
qmi->wakelock_active = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void dfc_wakelock_release(struct qmi_info *qmi)
|
||||
{
|
||||
if (qmi && qmi->wakelock_active) {
|
||||
__pm_relax(qmi->ws);
|
||||
qmi->wakelock_active = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void qmi_rmnet_check_stats(struct work_struct *work)
|
||||
{
|
||||
struct rmnet_powersave_work *real_work;
|
||||
@@ -1269,7 +1235,6 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
|
||||
u64 rxd, txd;
|
||||
u64 rx, tx;
|
||||
bool dl_msg_active;
|
||||
bool use_alarm_timer = true;
|
||||
|
||||
real_work = container_of(to_delayed_work(work),
|
||||
struct rmnet_powersave_work, work);
|
||||
@@ -1281,8 +1246,6 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
|
||||
if (unlikely(!qmi))
|
||||
return;
|
||||
|
||||
dfc_wakelock_release(qmi);
|
||||
|
||||
rmnet_get_packets(real_work->port, &rx, &tx);
|
||||
rxd = rx - real_work->old_rx_pkts;
|
||||
txd = tx - real_work->old_tx_pkts;
|
||||
@@ -1298,7 +1261,8 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
|
||||
qmi->ps_ignore_grant = false;
|
||||
|
||||
/* Register to get QMI DFC and DL marker */
|
||||
if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0)
|
||||
if (qmi_rmnet_set_powersave_mode(real_work->port, 0,
|
||||
0, NULL) < 0)
|
||||
goto end;
|
||||
|
||||
qmi->ps_enabled = false;
|
||||
@@ -1317,13 +1281,12 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
|
||||
* (likely in RLF), no need to enter powersave
|
||||
*/
|
||||
if (!dl_msg_active &&
|
||||
!rmnet_all_flows_enabled(real_work->port)) {
|
||||
use_alarm_timer = false;
|
||||
!rmnet_all_flows_enabled(real_work->port))
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Deregister to suppress QMI DFC and DL marker */
|
||||
if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0)
|
||||
if (qmi_rmnet_set_powersave_mode(real_work->port, 1,
|
||||
0, NULL) < 0)
|
||||
goto end;
|
||||
|
||||
qmi->ps_enabled = true;
|
||||
@@ -1344,21 +1307,9 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
|
||||
}
|
||||
end:
|
||||
rcu_read_lock();
|
||||
if (!rmnet_work_quit) {
|
||||
if (use_alarm_timer) {
|
||||
/* Suspend will fail and get delayed for 2s if
|
||||
* alarmtimer expires within 2s. Hold a wakelock
|
||||
* for the actual timer duration to prevent suspend
|
||||
*/
|
||||
if (PS_INTERVAL_MS < 2000)
|
||||
dfc_wakelock_acquire(qmi);
|
||||
alarm_start_relative(&real_work->atimer,
|
||||
PS_INTERVAL_KT);
|
||||
} else {
|
||||
queue_delayed_work(rmnet_ps_wq, &real_work->work,
|
||||
PS_INTERVAL_JF);
|
||||
}
|
||||
}
|
||||
if (!rmnet_work_quit)
|
||||
queue_delayed_work(rmnet_ps_wq, &real_work->work,
|
||||
PS_INTERVAL_JF);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@@ -1369,6 +1320,7 @@ static void qmi_rmnet_check_stats_2(struct work_struct *work)
|
||||
u64 rxd, txd;
|
||||
u64 rx, tx;
|
||||
u8 num_bearers;
|
||||
int rc;
|
||||
|
||||
real_work = container_of(to_delayed_work(work),
|
||||
struct rmnet_powersave_work, work);
|
||||
@@ -1380,9 +1332,6 @@ static void qmi_rmnet_check_stats_2(struct work_struct *work)
|
||||
if (unlikely(!qmi))
|
||||
return;
|
||||
|
||||
if (PS_INTERVAL_MS < 2000)
|
||||
dfc_wakelock_acquire(qmi);
|
||||
|
||||
rmnet_get_packets(real_work->port, &rx, &tx);
|
||||
rxd = rx - real_work->old_rx_pkts;
|
||||
txd = tx - real_work->old_tx_pkts;
|
||||
@@ -1395,7 +1344,12 @@ static void qmi_rmnet_check_stats_2(struct work_struct *work)
|
||||
qmi->ps_ignore_grant = false;
|
||||
|
||||
/* Out of powersave */
|
||||
if (dfc_qmap_set_powersave(0, 0, NULL))
|
||||
if (dfc_qmap)
|
||||
rc = dfc_qmap_set_powersave(0, 0, NULL);
|
||||
else
|
||||
rc = qmi_rmnet_set_powersave_mode(real_work->port, 0,
|
||||
0, NULL);
|
||||
if (rc)
|
||||
goto end;
|
||||
|
||||
qmi->ps_enabled = false;
|
||||
@@ -1419,20 +1373,22 @@ static void qmi_rmnet_check_stats_2(struct work_struct *work)
|
||||
ps_bearer_id);
|
||||
|
||||
/* Enter powersave */
|
||||
dfc_qmap_set_powersave(1, num_bearers, ps_bearer_id);
|
||||
if (dfc_qmap)
|
||||
dfc_qmap_set_powersave(1, num_bearers, ps_bearer_id);
|
||||
else
|
||||
qmi_rmnet_set_powersave_mode(real_work->port, 1,
|
||||
num_bearers, ps_bearer_id);
|
||||
|
||||
if (rmnet_get_powersave_notif(real_work->port))
|
||||
qmi_rmnet_ps_on_notify(real_work->port);
|
||||
|
||||
dfc_wakelock_release(qmi);
|
||||
return;
|
||||
}
|
||||
end:
|
||||
rcu_read_lock();
|
||||
if (!rmnet_work_quit)
|
||||
alarm_start_relative(&real_work->atimer, PS_INTERVAL_KT);
|
||||
else
|
||||
dfc_wakelock_release(qmi);
|
||||
queue_delayed_work(rmnet_ps_wq, &real_work->work,
|
||||
PS_INTERVAL_JF);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@@ -1468,13 +1424,12 @@ void qmi_rmnet_work_init(void *port)
|
||||
return;
|
||||
}
|
||||
|
||||
if (dfc_qmap && dfc_ps_ext)
|
||||
INIT_DEFERRABLE_WORK(&rmnet_work->work,
|
||||
if (dfc_ps_ext)
|
||||
INIT_DELAYED_WORK(&rmnet_work->work,
|
||||
qmi_rmnet_check_stats_2);
|
||||
else
|
||||
INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
|
||||
INIT_DELAYED_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
|
||||
|
||||
alarm_init(&rmnet_work->atimer, ALARM_BOOTTIME, qmi_rmnet_work_alarm);
|
||||
rmnet_work->port = port;
|
||||
rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
|
||||
&rmnet_work->old_tx_pkts);
|
||||
@@ -1509,14 +1464,12 @@ void qmi_rmnet_work_exit(void *port)
|
||||
synchronize_rcu();
|
||||
|
||||
rmnet_work_inited = false;
|
||||
alarm_cancel(&rmnet_work->atimer);
|
||||
cancel_delayed_work_sync(&rmnet_work->work);
|
||||
destroy_workqueue(rmnet_ps_wq);
|
||||
qmi_rmnet_work_set_active(port, 0);
|
||||
rmnet_ps_wq = NULL;
|
||||
kfree(rmnet_work);
|
||||
rmnet_work = NULL;
|
||||
dfc_wakelock_release((struct qmi_info *)rmnet_get_qmi_pt(port));
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_work_exit);
|
||||
|
||||
|
@@ -72,8 +72,8 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev,
|
||||
struct net_device *vnd_dev, u8 mux_id);
|
||||
void qmi_rmnet_qos_exit_pre(void *qos);
|
||||
void qmi_rmnet_qos_exit_post(void);
|
||||
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
|
||||
struct sk_buff *skb);
|
||||
bool qmi_rmnet_get_flow_state(struct net_device *dev, struct sk_buff *skb,
|
||||
bool *drop, bool *is_low_latency);
|
||||
void qmi_rmnet_burst_fc_check(struct net_device *dev,
|
||||
int ip_type, u32 mark, unsigned int len);
|
||||
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
|
||||
@@ -93,8 +93,10 @@ static inline void qmi_rmnet_qos_exit_post(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
static inline bool qmi_rmnet_get_flow_state(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
bool *drop,
|
||||
bool *is_low_latency)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@@ -113,7 +115,8 @@ static inline int qmi_rmnet_get_queue(struct net_device *dev,
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
|
||||
int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable);
|
||||
int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable, u8 num_bearers,
|
||||
u8 *bearer_id);
|
||||
void qmi_rmnet_work_init(void *port);
|
||||
void qmi_rmnet_work_exit(void *port);
|
||||
void qmi_rmnet_work_maybe_restart(void *port);
|
||||
@@ -128,7 +131,8 @@ void qmi_rmnet_ps_off_notify(void *port);
|
||||
void qmi_rmnet_ps_on_notify(void *port);
|
||||
|
||||
#else
|
||||
static inline int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
|
||||
static inline int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable,
|
||||
u8 num_bearers, u8 *bearer_id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@@ -17,7 +18,6 @@
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/pm_wakeup.h>
|
||||
#include <uapi/linux/rtnetlink.h>
|
||||
#include <linux/soc/qcom/qmi.h>
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#define INVALID_MQ 0xFF
|
||||
|
||||
#define DFC_MODE_SA 4
|
||||
#define PS_MAX_BEARERS 32
|
||||
|
||||
#define CONFIG_QTI_QMI_RMNET 1
|
||||
#define CONFIG_QTI_QMI_DFC 1
|
||||
@@ -114,6 +115,7 @@ struct svc_info {
|
||||
struct mq_map {
|
||||
struct rmnet_bearer_map *bearer;
|
||||
bool is_ll_ch;
|
||||
bool drop_on_remove;
|
||||
};
|
||||
|
||||
struct qos_info {
|
||||
@@ -141,8 +143,6 @@ struct qmi_info {
|
||||
bool dl_msg_active;
|
||||
bool ps_ignore_grant;
|
||||
int ps_ext;
|
||||
bool wakelock_active;
|
||||
struct wakeup_source *ws;
|
||||
};
|
||||
|
||||
enum data_ep_type_enum_v01 {
|
||||
@@ -268,7 +268,8 @@ static int rmnet_ll_switch(struct net_device *dev,
|
||||
int
|
||||
wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi);
|
||||
void wda_qmi_client_exit(void *wda_data);
|
||||
int wda_set_powersave_mode(void *wda_data, u8 enable);
|
||||
int wda_set_powersave_mode(void *wda_data, u8 enable, u8 num_bearers,
|
||||
u8 *bearer_id);
|
||||
void qmi_rmnet_flush_ps_wq(void);
|
||||
void wda_qmi_client_release(void *wda_data);
|
||||
int dfc_qmap_set_powersave(u8 enable, u8 num_bearers, u8 *bearer_id);
|
||||
@@ -283,7 +284,8 @@ static inline void wda_qmi_client_exit(void *wda_data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int wda_set_powersave_mode(void *wda_data, u8 enable)
|
||||
static inline int wda_set_powersave_mode(void *wda_data, u8 enable,
|
||||
u8 num_bearers, u8 *bearer_id)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -35,6 +35,13 @@
|
||||
sizeof(struct rmnet_map_header) + \
|
||||
sizeof(struct rmnet_map_control_command_header))
|
||||
|
||||
#define rmnet_descriptor_for_each_frag(p, desc) \
|
||||
list_for_each_entry(p, &desc->frags, list)
|
||||
#define rmnet_descriptor_for_each_frag_safe(p, tmp, desc) \
|
||||
list_for_each_entry_safe(p, tmp, &desc->frags, list)
|
||||
#define rmnet_descriptor_for_each_frag_safe_reverse(p, tmp, desc) \
|
||||
list_for_each_entry_safe_reverse(p, tmp, &desc->frags, list)
|
||||
|
||||
typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port);
|
||||
typedef void (*rmnet_perf_chain_hook_t)(void);
|
||||
@@ -81,7 +88,7 @@ void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
|
||||
|
||||
list_del(&frag_desc->list);
|
||||
|
||||
list_for_each_entry_safe(frag, tmp, &frag_desc->frags, list) {
|
||||
rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
|
||||
struct page *page = skb_frag_page(&frag->frag);
|
||||
|
||||
if (page)
|
||||
@@ -112,7 +119,7 @@ void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(frag, tmp, &frag_desc->frags, list) {
|
||||
rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
|
||||
u32 frag_size = skb_frag_size(&frag->frag);
|
||||
|
||||
if (!size)
|
||||
@@ -162,7 +169,7 @@ void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
|
||||
|
||||
/* Compute number of bytes to remove from the end */
|
||||
eat = frag_desc->len - size;
|
||||
list_for_each_entry_safe_reverse(frag, tmp, &frag_desc->frags, list) {
|
||||
rmnet_descriptor_for_each_frag_safe_reverse(frag, tmp, frag_desc) {
|
||||
u32 frag_size = skb_frag_size(&frag->frag);
|
||||
|
||||
if (!eat)
|
||||
@@ -206,7 +213,7 @@ static int rmnet_frag_copy_data(struct rmnet_frag_descriptor *frag_desc,
|
||||
return -EINVAL;
|
||||
|
||||
/* Copy 'len' bytes into the bufer starting from 'off' */
|
||||
list_for_each_entry(frag, &frag_desc->frags, list) {
|
||||
rmnet_descriptor_for_each_frag(frag, frag_desc) {
|
||||
if (!len)
|
||||
break;
|
||||
|
||||
@@ -241,7 +248,7 @@ void *rmnet_frag_header_ptr(struct rmnet_frag_descriptor *frag_desc, u32 off,
|
||||
|
||||
/* Find the starting fragment */
|
||||
offset = off;
|
||||
list_for_each_entry(frag, &frag_desc->frags, list) {
|
||||
rmnet_descriptor_for_each_frag(frag, frag_desc) {
|
||||
frag_size = skb_frag_size(&frag->frag);
|
||||
if (off < frag_size) {
|
||||
start = skb_frag_address(&frag->frag) + off;
|
||||
@@ -298,7 +305,7 @@ int rmnet_frag_descriptor_add_frags_from(struct rmnet_frag_descriptor *to,
|
||||
if (off > from->len || len > from->len || off + len > from->len)
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry(frag, &from->frags, list) {
|
||||
rmnet_descriptor_for_each_frag(frag, from) {
|
||||
u32 frag_size;
|
||||
|
||||
if (!len)
|
||||
@@ -798,6 +805,7 @@ static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct sk_buff *head_skb, *current_skb, *skb;
|
||||
struct skb_shared_info *shinfo;
|
||||
struct rmnet_fragment *frag, *tmp;
|
||||
struct rmnet_skb_cb *cb;
|
||||
|
||||
/* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
|
||||
if (frag_desc->hdrs_valid) {
|
||||
@@ -840,7 +848,7 @@ static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
|
||||
current_skb = head_skb;
|
||||
|
||||
/* Add in the page fragments */
|
||||
list_for_each_entry_safe(frag, tmp, &frag_desc->frags, list) {
|
||||
rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
|
||||
struct page *p = skb_frag_page(&frag->frag);
|
||||
u32 frag_size = skb_frag_size(&frag->frag);
|
||||
|
||||
@@ -874,6 +882,9 @@ add_frag:
|
||||
skip_frags:
|
||||
head_skb->dev = frag_desc->dev;
|
||||
rmnet_set_skb_proto(head_skb);
|
||||
cb = RMNET_SKB_CB(head_skb);
|
||||
cb->coal_bytes = frag_desc->coal_bytes;
|
||||
cb->coal_bufsize = frag_desc->coal_bufsize;
|
||||
|
||||
/* Handle any header metadata that needs to be updated after RSB/RSC
|
||||
* segmentation
|
||||
@@ -971,7 +982,7 @@ skip_frags:
|
||||
}
|
||||
|
||||
if (frag_desc->flush_shs)
|
||||
head_skb->cb[0] = 1;
|
||||
cb->flush_shs = 1;
|
||||
|
||||
/* Handle coalesced packets */
|
||||
if (frag_desc->gso_segs > 1)
|
||||
@@ -1150,6 +1161,10 @@ static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
|
||||
coal_desc->pkt_id = pkt_id + 1;
|
||||
coal_desc->gso_segs = 0;
|
||||
|
||||
/* Only relevant for the first segment to avoid overcoutning */
|
||||
coal_desc->coal_bytes = 0;
|
||||
coal_desc->coal_bufsize = 0;
|
||||
|
||||
list_add_tail(&new_desc->list, list);
|
||||
return;
|
||||
|
||||
@@ -1191,9 +1206,7 @@ static bool rmnet_frag_validate_csum(struct rmnet_frag_descriptor *frag_desc)
|
||||
return !csum_fold(csum);
|
||||
}
|
||||
|
||||
/* Converts the coalesced frame into a list of descriptors.
|
||||
* NLOs containing csum erros will not be included.
|
||||
*/
|
||||
/* Converts the coalesced frame into a list of descriptors */
|
||||
static void
|
||||
rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
|
||||
u64 nlo_err_mask, struct rmnet_port *port,
|
||||
@@ -1201,6 +1214,7 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
|
||||
struct rmnet_map_v5_coal_header coal_hdr;
|
||||
struct rmnet_fragment *frag;
|
||||
u8 *version;
|
||||
u16 pkt_len;
|
||||
u8 pkt, total_pkt = 0;
|
||||
@@ -1317,6 +1331,10 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
|
||||
}
|
||||
|
||||
coal_desc->hdrs_valid = 1;
|
||||
coal_desc->coal_bytes = coal_desc->len;
|
||||
rmnet_descriptor_for_each_frag(frag, coal_desc)
|
||||
coal_desc->coal_bufsize +=
|
||||
page_size(skb_frag_page(&frag->frag));
|
||||
|
||||
if (rmnet_map_v5_csum_buggy(&coal_hdr) && !zero_csum) {
|
||||
/* Mark the checksum as valid if it checks out */
|
||||
|
@@ -36,6 +36,8 @@ struct rmnet_frag_descriptor {
|
||||
struct list_head list;
|
||||
struct list_head frags;
|
||||
struct net_device *dev;
|
||||
u32 coal_bufsize;
|
||||
u32 coal_bytes;
|
||||
u32 len;
|
||||
u32 hash;
|
||||
u32 priority;
|
||||
|
@@ -427,6 +427,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
|
||||
struct sk_buff *skb = *pskb;
|
||||
struct rmnet_port *port;
|
||||
struct net_device *dev;
|
||||
struct rmnet_skb_cb *cb;
|
||||
int (*rmnet_core_shs_switch)(struct sk_buff *skb,
|
||||
struct rmnet_shs_clnt_s *cfg);
|
||||
|
||||
@@ -451,9 +452,10 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
|
||||
|
||||
rcu_read_lock();
|
||||
rmnet_core_shs_switch = rcu_dereference(rmnet_shs_switch);
|
||||
if (rmnet_core_shs_switch && !skb->cb[1] &&
|
||||
cb = RMNET_SKB_CB(skb);
|
||||
if (rmnet_core_shs_switch && !cb->qmap_steer &&
|
||||
skb->priority != 0xda1a) {
|
||||
skb->cb[1] = 1;
|
||||
cb->qmap_steer = 1;
|
||||
rmnet_core_shs_switch(skb, &port->phy_shs_cfg);
|
||||
rcu_read_unlock();
|
||||
return RX_HANDLER_CONSUMED;
|
||||
|
@@ -1,4 +1,5 @@
|
||||
/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@@ -23,7 +24,7 @@
|
||||
|
||||
static struct rmnet_ll_stats rmnet_ll_stats;
|
||||
/* For TX sync with DMA operations */
|
||||
static DEFINE_SPINLOCK(rmnet_ll_tx_lock);
|
||||
DEFINE_SPINLOCK(rmnet_ll_tx_lock);
|
||||
|
||||
/* Client operations for respective underlying HW */
|
||||
extern struct rmnet_ll_client_ops rmnet_ll_client;
|
||||
|
@@ -1,4 +1,5 @@
|
||||
/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@@ -29,6 +30,11 @@ struct rmnet_ll_stats {
|
||||
u64 rx_oom;
|
||||
u64 rx_pkts;
|
||||
u64 rx_tmp_allocs;
|
||||
u64 tx_disabled;
|
||||
u64 tx_enabled;
|
||||
u64 tx_fc_queued;
|
||||
u64 tx_fc_sent;
|
||||
u64 tx_fc_err;
|
||||
};
|
||||
|
||||
int rmnet_ll_send_skb(struct sk_buff *skb);
|
||||
|
@@ -1,4 +1,5 @@
|
||||
/* Copyright (c) 2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@@ -16,17 +17,69 @@
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/ipa.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/version.h>
|
||||
#include "rmnet_ll.h"
|
||||
#include "rmnet_ll_core.h"
|
||||
|
||||
#define IPA_RMNET_LL_RECEIVE 1
|
||||
#define IPA_RMNET_LL_FLOW_EVT 2
|
||||
|
||||
#define MAX_Q_LEN 1000
|
||||
|
||||
static struct rmnet_ll_endpoint *rmnet_ll_ipa_ep;
|
||||
static struct sk_buff_head tx_pending_list;
|
||||
extern spinlock_t rmnet_ll_tx_lock;
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)
|
||||
static void rmnet_ll_ipa_tx_pending(unsigned long data);
|
||||
DECLARE_TASKLET(tx_pending_task, rmnet_ll_ipa_tx_pending, 0);
|
||||
static void rmnet_ll_ipa_tx_pending(unsigned long data)
|
||||
#else
|
||||
static void rmnet_ll_ipa_tx_pending(struct tasklet_struct *t);
|
||||
DECLARE_TASKLET(tx_pending_task, rmnet_ll_ipa_tx_pending);
|
||||
static void rmnet_ll_ipa_tx_pending(struct tasklet_struct *t)
|
||||
#endif
|
||||
{
|
||||
struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
|
||||
struct sk_buff *skb;
|
||||
int rc;
|
||||
|
||||
spin_lock_bh(&rmnet_ll_tx_lock);
|
||||
|
||||
while ((skb = __skb_dequeue(&tx_pending_list))) {
|
||||
rc = ipa_rmnet_ll_xmit(skb);
|
||||
if (rc == -EAGAIN) {
|
||||
stats->tx_disabled++;
|
||||
__skb_queue_head(&tx_pending_list, skb);
|
||||
break;
|
||||
}
|
||||
if (rc >= 0)
|
||||
stats->tx_fc_sent++;
|
||||
else
|
||||
stats->tx_fc_err++;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&rmnet_ll_tx_lock);
|
||||
}
|
||||
|
||||
static void rmnet_ll_ipa_rx(void *arg, void *rx_data)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep = *((struct rmnet_ll_endpoint **)arg);
|
||||
struct rmnet_ll_endpoint *ll_ep = rmnet_ll_ipa_ep;
|
||||
struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
|
||||
struct sk_buff *skb, *tmp;
|
||||
|
||||
if (arg == (void *)(uintptr_t)(IPA_RMNET_LL_FLOW_EVT)) {
|
||||
stats->tx_enabled++;
|
||||
tasklet_schedule(&tx_pending_task);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(arg != (void *)(uintptr_t)(IPA_RMNET_LL_RECEIVE))) {
|
||||
pr_err("%s: invalid arg %u\n", __func__, (uintptr_t)arg);
|
||||
return;
|
||||
}
|
||||
|
||||
skb = rx_data;
|
||||
/* Odds are IPA does this, but just to be safe */
|
||||
skb->dev = ll_ep->phys_dev;
|
||||
@@ -67,10 +120,16 @@ static void rmnet_ll_ipa_probe(void *arg)
|
||||
static void rmnet_ll_ipa_remove(void *arg)
|
||||
{
|
||||
struct rmnet_ll_endpoint **ll_ep = arg;
|
||||
struct sk_buff *skb;
|
||||
|
||||
dev_put((*ll_ep)->phys_dev);
|
||||
kfree(*ll_ep);
|
||||
*ll_ep = NULL;
|
||||
|
||||
spin_lock_bh(&rmnet_ll_tx_lock);
|
||||
while ((skb = __skb_dequeue(&tx_pending_list)))
|
||||
kfree_skb(skb);
|
||||
spin_unlock_bh(&rmnet_ll_tx_lock);
|
||||
}
|
||||
|
||||
static void rmnet_ll_ipa_ready(void * __unused)
|
||||
@@ -90,17 +149,45 @@ static void rmnet_ll_ipa_ready(void * __unused)
|
||||
|
||||
static int rmnet_ll_ipa_tx(struct sk_buff *skb)
|
||||
{
|
||||
struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
|
||||
int rc;
|
||||
|
||||
if (!rmnet_ll_ipa_ep)
|
||||
return -ENODEV;
|
||||
|
||||
if (!skb_queue_empty(&tx_pending_list))
|
||||
goto queue_skb;
|
||||
|
||||
rc = ipa_rmnet_ll_xmit(skb);
|
||||
|
||||
/* rc >=0: success, return number of free descriptors left */
|
||||
if (rc >= 0)
|
||||
return 0;
|
||||
|
||||
/* IPA handles freeing the SKB on failure */
|
||||
return ipa_rmnet_ll_xmit(skb);
|
||||
if (rc != -EAGAIN)
|
||||
return rc;
|
||||
|
||||
stats->tx_disabled++;
|
||||
|
||||
queue_skb:
|
||||
/* Flow controlled */
|
||||
if (skb_queue_len(&tx_pending_list) >= MAX_Q_LEN) {
|
||||
kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
__skb_queue_tail(&tx_pending_list, skb);
|
||||
stats->tx_fc_queued++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rmnet_ll_ipa_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
__skb_queue_head_init(&tx_pending_list);
|
||||
rc = ipa_register_ipa_ready_cb(rmnet_ll_ipa_ready, NULL);
|
||||
if (rc == -EEXIST) {
|
||||
/* IPA is already up. Call it ourselves, since they don't */
|
||||
|
@@ -1,4 +1,4 @@
|
||||
/* Copyright (c) 2013-2014, 2016-2020 The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2013-2014, 2016-2021 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@@ -13,6 +13,8 @@
|
||||
#ifndef _RMNET_PRIVATE_H_
|
||||
#define _RMNET_PRIVATE_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define RMNET_MAX_PACKET_SIZE 16384
|
||||
#define RMNET_DFLT_PACKET_SIZE 1500
|
||||
#define RMNET_NEEDED_HEADROOM 16
|
||||
@@ -45,4 +47,17 @@ RMNET_INGRESS_FORMAT_DL_MARKER_V2)
|
||||
/* Pass the frame directly to another device with dev_queue_xmit() */
|
||||
#define RMNET_EPMODE_BRIDGE (2)
|
||||
|
||||
/* Struct for skb control block use within rmnet driver */
|
||||
struct rmnet_skb_cb {
|
||||
/* MUST be the first entries because of legacy reasons */
|
||||
char flush_shs;
|
||||
char qmap_steer;
|
||||
|
||||
/* coalescing stats */
|
||||
u32 coal_bytes;
|
||||
u32 coal_bufsize;
|
||||
};
|
||||
|
||||
#define RMNET_SKB_CB(skb) ((struct rmnet_skb_cb *)(skb)->cb)
|
||||
|
||||
#endif /* _RMNET_PRIVATE_H_ */
|
||||
|
@@ -1,4 +1,5 @@
|
||||
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@@ -79,7 +80,8 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
|
||||
u32 mark;
|
||||
unsigned int len;
|
||||
rmnet_perf_tether_egress_hook_t rmnet_perf_tether_egress;
|
||||
bool low_latency;
|
||||
bool low_latency = false;
|
||||
bool need_to_drop = false;
|
||||
|
||||
priv = netdev_priv(dev);
|
||||
if (priv->real_dev) {
|
||||
@@ -92,7 +94,14 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
|
||||
if (rmnet_perf_tether_egress) {
|
||||
rmnet_perf_tether_egress(skb);
|
||||
}
|
||||
low_latency = qmi_rmnet_flow_is_low_latency(dev, skb);
|
||||
|
||||
qmi_rmnet_get_flow_state(dev, skb, &need_to_drop, &low_latency);
|
||||
if (unlikely(need_to_drop)) {
|
||||
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
if (low_latency && skb_is_gso(skb)) {
|
||||
netdev_features_t features;
|
||||
struct sk_buff *segs, *tmp;
|
||||
@@ -512,6 +521,11 @@ static const char rmnet_ll_gstrings_stats[][ETH_GSTRING_LEN] = {
|
||||
"LL RX OOM errors",
|
||||
"LL RX packets",
|
||||
"LL RX temp buffer allocations",
|
||||
"LL TX disabled",
|
||||
"LL TX enabled",
|
||||
"LL TX FC queued",
|
||||
"LL TX FC sent",
|
||||
"LL TX FC err",
|
||||
};
|
||||
|
||||
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
|
||||
|
111
core/wda_qmi.c
111
core/wda_qmi.c
@@ -1,4 +1,5 @@
|
||||
/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@@ -42,7 +43,7 @@ static void wda_svc_config(struct work_struct *work);
|
||||
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01 0x002E
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01 0x002E
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN 4
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN 48
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01_MAX_MSG_LEN 7
|
||||
|
||||
enum wda_powersave_config_mask_enum_v01 {
|
||||
@@ -73,6 +74,14 @@ struct wda_set_powersave_config_resp_msg_v01 {
|
||||
struct wda_set_powersave_mode_req_msg_v01 {
|
||||
/* Mandatory */
|
||||
uint8_t powersave_control_flag;
|
||||
/* Optional */
|
||||
uint8_t allow_dfc_notify_valid;
|
||||
uint8_t allow_dfc_notify;
|
||||
uint8_t allow_bearer_id_list_valid;
|
||||
uint8_t allow_bearer_id_list_len;
|
||||
uint8_t allow_bearer_id_list[PS_MAX_BEARERS];
|
||||
uint8_t auto_shut_allow_bearer_valid;
|
||||
uint8_t auto_shut_allow_bearer;
|
||||
};
|
||||
|
||||
struct wda_set_powersave_mode_resp_msg_v01 {
|
||||
@@ -176,6 +185,83 @@ static struct qmi_elem_info wda_set_powersave_mode_req_msg_v01_ei[] = {
|
||||
powersave_control_flag),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
allow_dfc_notify_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
allow_dfc_notify),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x11,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
allow_bearer_id_list_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_DATA_LEN,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x11,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
allow_bearer_id_list_len),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = PS_MAX_BEARERS,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = VAR_LEN_ARRAY,
|
||||
.tlv_type = 0x11,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
allow_bearer_id_list),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x12,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
auto_shut_allow_bearer_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x12,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
auto_shut_allow_bearer),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_EOTI,
|
||||
.array_type = NO_ARRAY,
|
||||
@@ -202,7 +288,8 @@ static struct qmi_elem_info wda_set_powersave_mode_resp_msg_v01_ei[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static int wda_set_powersave_mode_req(void *wda_data, uint8_t enable)
|
||||
static int wda_set_powersave_mode_req(void *wda_data, uint8_t enable,
|
||||
u8 num_bearers, u8 *bearer_id)
|
||||
{
|
||||
struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data;
|
||||
struct wda_set_powersave_mode_resp_msg_v01 *resp;
|
||||
@@ -232,6 +319,20 @@ static int wda_set_powersave_mode_req(void *wda_data, uint8_t enable)
|
||||
}
|
||||
|
||||
req->powersave_control_flag = enable;
|
||||
|
||||
if (enable && num_bearers && bearer_id &&
|
||||
num_bearers <= PS_MAX_BEARERS) {
|
||||
req->allow_dfc_notify_valid = 1;
|
||||
req->allow_dfc_notify = 1;
|
||||
|
||||
req->allow_bearer_id_list_valid = 1;
|
||||
req->allow_bearer_id_list_len = num_bearers;
|
||||
memcpy(req->allow_bearer_id_list, bearer_id, num_bearers);
|
||||
|
||||
req->auto_shut_allow_bearer_valid = 1;
|
||||
req->auto_shut_allow_bearer = 1;
|
||||
}
|
||||
|
||||
ret = qmi_send_request(&data->handle, &data->ssctl, &txn,
|
||||
QMI_WDA_SET_POWERSAVE_MODE_REQ_V01,
|
||||
QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN,
|
||||
@@ -465,10 +566,12 @@ void wda_qmi_client_exit(void *wda_data)
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
int wda_set_powersave_mode(void *wda_data, uint8_t enable)
|
||||
int wda_set_powersave_mode(void *wda_data, uint8_t enable, u8 num_bearers,
|
||||
u8 *bearer_id)
|
||||
{
|
||||
trace_wda_set_powersave_mode(enable);
|
||||
return wda_set_powersave_mode_req(wda_data, enable);
|
||||
return wda_set_powersave_mode_req(wda_data, enable, num_bearers,
|
||||
bearer_id);
|
||||
}
|
||||
|
||||
void wda_qmi_client_release(void *wda_data)
|
||||
|
Yeni konuda referans
Bir kullanıcı engelle