dfc: fix spinlock leak

In the DFC powersave work, the separate spin lock and unlock
of multiple qos structures could be out of sync during SSR
and results in spinlock leak after exiting the work.

This change consolidated the spinlock operations to avoid
multiple locking and unlocking, and fixed below issue:

BUG: workqueue leaked lock or atomic: kworker/0:9/0x00000201/1361
     last function: qmi_rmnet_check_stats_2.cfi_jt [rmnet_core]
1 lock held by kworker/0:9/1361:
(&qos->qos_lock){....}-{2:2}, at: rmnet_lock_unlock_all_flows+0xa4/0xdc

Change-Id: I10c1687a4f9993363dc631dee0b347faaa1067ab
Acked-by: Weiyi Chen <weiyic@qti.qualcomm.com>
Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
这个提交包含在:
Subash Abhinov Kasiviswanathan
2021-08-17 20:21:55 -07:00
父节点 6b6ee8b645
当前提交 ef13a42ae2
修改 4 个文件,包含 33 行新增114 行删除

查看文件

@@ -907,28 +907,10 @@ bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
/**
* qmi_rmnet_lock_unlock_all_flows - lock or unlock all bearers
* rmnet_prepare_ps_bearers - get disabled bearers and
* reset enabled bearers
*/
void qmi_rmnet_lock_unlock_all_flows(struct net_device *dev, bool lock)
{
struct qos_info *qos;
qos = (struct qos_info *)rmnet_get_qos_pt(dev);
if (!qos)
return;
if (lock)
spin_lock_bh(&qos->qos_lock);
else
spin_unlock_bh(&qos->qos_lock);
}
EXPORT_SYMBOL(qmi_rmnet_lock_unlock_all_flows);
/**
* qmi_rmnet_get_disabled_flows - get disabled bearers
* Needs to be called with qos_lock
*/
void qmi_rmnet_get_disabled_flows(struct net_device *dev, u8 *num_bearers,
void qmi_rmnet_prepare_ps_bearers(struct net_device *dev, u8 *num_bearers,
u8 *bearer_id)
{
struct qos_info *qos;
@@ -940,35 +922,10 @@ void qmi_rmnet_get_disabled_flows(struct net_device *dev, u8 *num_bearers,
if (!qos || !num_bearers)
return;
spin_lock_bh(&qos->qos_lock);
num_bearers_left = *num_bearers;
list_for_each_entry(bearer, &qos->bearer_head, list) {
if (!bearer->grant_size && num_bearers_left) {
if (bearer_id)
bearer_id[current_num_bearers] =
bearer->bearer_id;
current_num_bearers++;
num_bearers_left--;
}
}
*num_bearers = current_num_bearers;
}
EXPORT_SYMBOL(qmi_rmnet_get_disabled_flows);
/**
* qmi_rmnet_reset_enabled_flows - reset enabled bearers for powersave
* Needs to be called with qos_lock
*/
void qmi_rmnet_reset_enabled_flows(struct net_device *dev)
{
struct qos_info *qos;
struct rmnet_bearer_map *bearer;
qos = (struct qos_info *)rmnet_get_qos_pt(dev);
if (!qos)
return;
list_for_each_entry(bearer, &qos->bearer_head, list) {
if (bearer->grant_size) {
bearer->seq = 0;
@@ -980,10 +937,22 @@ void qmi_rmnet_reset_enabled_flows(struct net_device *dev)
bearer->grant_size = DEFAULT_GRANT;
bearer->grant_thresh =
qmi_rmnet_grant_per(DEFAULT_GRANT);
} else if (num_bearers_left) {
if (bearer_id)
bearer_id[current_num_bearers] =
bearer->bearer_id;
current_num_bearers++;
num_bearers_left--;
} else {
pr_err("DFC: no bearer space\n");
}
}
*num_bearers = current_num_bearers;
spin_unlock_bh(&qos->qos_lock);
}
EXPORT_SYMBOL(qmi_rmnet_reset_enabled_flows);
EXPORT_SYMBOL(qmi_rmnet_prepare_ps_bearers);
#ifdef CONFIG_QTI_QMI_DFC
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
@@ -1438,25 +1407,19 @@ static void qmi_rmnet_check_stats_2(struct work_struct *work)
}
if (!rxd && !txd) {
rmnet_lock_unlock_all_flows(real_work->port, true);
num_bearers = sizeof(ps_bearer_id);
memset(ps_bearer_id, 0, sizeof(ps_bearer_id));
rmnet_get_disabled_flows(real_work->port, &num_bearers,
ps_bearer_id);
/* Enter powersave */
if (dfc_qmap_set_powersave(1, num_bearers, ps_bearer_id)) {
rmnet_lock_unlock_all_flows(real_work->port, false);
goto end;
}
rmnet_reset_enabled_flows(real_work->port);
qmi->ps_ignore_grant = true;
qmi->ps_enabled = true;
clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
rmnet_lock_unlock_all_flows(real_work->port, false);
smp_mb();
num_bearers = sizeof(ps_bearer_id);
memset(ps_bearer_id, 0, sizeof(ps_bearer_id));
rmnet_prepare_ps_bearers(real_work->port, &num_bearers,
ps_bearer_id);
/* Enter powersave */
dfc_qmap_set_powersave(1, num_bearers, ps_bearer_id);
if (rmnet_get_powersave_notif(real_work->port))
qmi_rmnet_ps_on_notify(real_work->port);

查看文件

@@ -33,10 +33,8 @@ int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
int attr_len);
void qmi_rmnet_enable_all_flows(struct net_device *dev);
bool qmi_rmnet_all_flows_enabled(struct net_device *dev);
void qmi_rmnet_lock_unlock_all_flows(struct net_device *dev, bool lock);
void qmi_rmnet_get_disabled_flows(struct net_device *dev, u8 *num_bearers,
void qmi_rmnet_prepare_ps_bearers(struct net_device *dev, u8 *num_bearers,
u8 *bearer_id);
void qmi_rmnet_reset_enabled_flows(struct net_device *dev);
#else
static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
{
@@ -60,19 +58,13 @@ qmi_rmnet_all_flows_enabled(struct net_device *dev)
return true;
}
static inline void qmi_rmnet_lock_unlock_all_flows(struct net_device *dev,
bool lock)
{
}
static inline void qmi_rmnet_get_disabled_flows(struct net_device *dev,
static inline void qmi_rmnet_prepare_ps_bearers(struct net_device *dev,
u8 *num_bearers, u8 *bearer_id)
{
if (num_bearers)
*num_bearers = 0;
}
static void qmi_rmnet_reset_enabled_flows(struct net_device *dev);
#endif
#ifdef CONFIG_QTI_QMI_DFC

查看文件

@@ -755,24 +755,7 @@ out:
}
EXPORT_SYMBOL(rmnet_all_flows_enabled);
void rmnet_lock_unlock_all_flows(void *port, bool lock)
{
struct rmnet_endpoint *ep;
unsigned long bkt;
if (unlikely(!port))
return;
rcu_read_lock();
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
bkt, ep, hlnode) {
qmi_rmnet_lock_unlock_all_flows(ep->egress_dev, lock);
}
rcu_read_unlock();
}
EXPORT_SYMBOL(rmnet_lock_unlock_all_flows);
void rmnet_get_disabled_flows(void *port, u8 *num_bearers, u8 *bearer_id)
void rmnet_prepare_ps_bearers(void *port, u8 *num_bearers, u8 *bearer_id)
{
struct rmnet_endpoint *ep;
unsigned long bkt;
@@ -789,7 +772,7 @@ void rmnet_get_disabled_flows(void *port, u8 *num_bearers, u8 *bearer_id)
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
bkt, ep, hlnode) {
num_bearers_in_out = number_bearers_left;
qmi_rmnet_get_disabled_flows(ep->egress_dev,
qmi_rmnet_prepare_ps_bearers(ep->egress_dev,
&num_bearers_in_out,
bearer_id ? bearer_id +
current_num_bearers : NULL);
@@ -800,24 +783,7 @@ void rmnet_get_disabled_flows(void *port, u8 *num_bearers, u8 *bearer_id)
*num_bearers = current_num_bearers;
}
EXPORT_SYMBOL(rmnet_get_disabled_flows);
void rmnet_reset_enabled_flows(void *port)
{
struct rmnet_endpoint *ep;
unsigned long bkt;
if (unlikely(!port))
return;
rcu_read_lock();
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
bkt, ep, hlnode) {
qmi_rmnet_reset_enabled_flows(ep->egress_dev);
}
rcu_read_unlock();
}
EXPORT_SYMBOL(rmnet_reset_enabled_flows);
EXPORT_SYMBOL(rmnet_prepare_ps_bearers);
int rmnet_get_powersave_notif(void *port)
{

查看文件

@@ -35,9 +35,7 @@ void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
int rmnet_get_powersave_notif(void *port);
struct net_device *rmnet_get_real_dev(void *port);
int rmnet_get_dlmarker_info(void *port);
void rmnet_lock_unlock_all_flows(void *port, bool lock);
void rmnet_get_disabled_flows(void *port, u8 *num_bearers, u8 *bearer_id);
void rmnet_reset_enabled_flows(void *port);
void rmnet_prepare_ps_bearers(void *port, u8 *num_bearers, u8 *bearer_id);
#else
static inline void *rmnet_get_qmi_pt(void *port)
{