[NETLINK]: Switch cb_lock spinlock to mutex and allow to override it
Switch cb_lock to mutex and allow netlink kernel users to override it with a subsystem specific mutex for consistent locking in dump callbacks. All netlink_dump_start users have been audited not to rely on any side-effects of the previously used spinlock. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
b076deb849
commit
af65bdfce9
@@ -56,6 +56,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/selinux.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <net/sock.h>
|
||||
#include <net/scm.h>
|
||||
@@ -76,7 +77,8 @@ struct netlink_sock {
|
||||
unsigned long state;
|
||||
wait_queue_head_t wait;
|
||||
struct netlink_callback *cb;
|
||||
spinlock_t cb_lock;
|
||||
struct mutex *cb_mutex;
|
||||
struct mutex cb_def_mutex;
|
||||
void (*data_ready)(struct sock *sk, int bytes);
|
||||
struct module *module;
|
||||
};
|
||||
@@ -108,6 +110,7 @@ struct netlink_table {
|
||||
unsigned long *listeners;
|
||||
unsigned int nl_nonroot;
|
||||
unsigned int groups;
|
||||
struct mutex *cb_mutex;
|
||||
struct module *module;
|
||||
int registered;
|
||||
};
|
||||
@@ -370,7 +373,8 @@ static struct proto netlink_proto = {
|
||||
.obj_size = sizeof(struct netlink_sock),
|
||||
};
|
||||
|
||||
static int __netlink_create(struct socket *sock, int protocol)
|
||||
static int __netlink_create(struct socket *sock, struct mutex *cb_mutex,
|
||||
int protocol)
|
||||
{
|
||||
struct sock *sk;
|
||||
struct netlink_sock *nlk;
|
||||
@@ -384,7 +388,8 @@ static int __netlink_create(struct socket *sock, int protocol)
|
||||
sock_init_data(sock, sk);
|
||||
|
||||
nlk = nlk_sk(sk);
|
||||
spin_lock_init(&nlk->cb_lock);
|
||||
nlk->cb_mutex = cb_mutex ? : &nlk->cb_def_mutex;
|
||||
mutex_init(nlk->cb_mutex);
|
||||
init_waitqueue_head(&nlk->wait);
|
||||
|
||||
sk->sk_destruct = netlink_sock_destruct;
|
||||
@@ -395,6 +400,7 @@ static int __netlink_create(struct socket *sock, int protocol)
|
||||
static int netlink_create(struct socket *sock, int protocol)
|
||||
{
|
||||
struct module *module = NULL;
|
||||
struct mutex *cb_mutex;
|
||||
struct netlink_sock *nlk;
|
||||
int err = 0;
|
||||
|
||||
@@ -417,9 +423,10 @@ static int netlink_create(struct socket *sock, int protocol)
|
||||
if (nl_table[protocol].registered &&
|
||||
try_module_get(nl_table[protocol].module))
|
||||
module = nl_table[protocol].module;
|
||||
cb_mutex = nl_table[protocol].cb_mutex;
|
||||
netlink_unlock_table();
|
||||
|
||||
if ((err = __netlink_create(sock, protocol)) < 0)
|
||||
if ((err = __netlink_create(sock, cb_mutex, protocol)) < 0)
|
||||
goto out_module;
|
||||
|
||||
nlk = nlk_sk(sock->sk);
|
||||
@@ -444,14 +451,14 @@ static int netlink_release(struct socket *sock)
|
||||
sock_orphan(sk);
|
||||
nlk = nlk_sk(sk);
|
||||
|
||||
spin_lock(&nlk->cb_lock);
|
||||
mutex_lock(nlk->cb_mutex);
|
||||
if (nlk->cb) {
|
||||
if (nlk->cb->done)
|
||||
nlk->cb->done(nlk->cb);
|
||||
netlink_destroy_callback(nlk->cb);
|
||||
nlk->cb = NULL;
|
||||
}
|
||||
spin_unlock(&nlk->cb_lock);
|
||||
mutex_unlock(nlk->cb_mutex);
|
||||
|
||||
/* OK. Socket is unlinked, and, therefore,
|
||||
no new packets will arrive */
|
||||
@@ -1266,7 +1273,7 @@ static void netlink_data_ready(struct sock *sk, int len)
|
||||
struct sock *
|
||||
netlink_kernel_create(int unit, unsigned int groups,
|
||||
void (*input)(struct sock *sk, int len),
|
||||
struct module *module)
|
||||
struct mutex *cb_mutex, struct module *module)
|
||||
{
|
||||
struct socket *sock;
|
||||
struct sock *sk;
|
||||
@@ -1281,7 +1288,7 @@ netlink_kernel_create(int unit, unsigned int groups,
|
||||
if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
|
||||
return NULL;
|
||||
|
||||
if (__netlink_create(sock, unit) < 0)
|
||||
if (__netlink_create(sock, cb_mutex, unit) < 0)
|
||||
goto out_sock_release;
|
||||
|
||||
if (groups < 32)
|
||||
@@ -1305,6 +1312,7 @@ netlink_kernel_create(int unit, unsigned int groups,
|
||||
netlink_table_grab();
|
||||
nl_table[unit].groups = groups;
|
||||
nl_table[unit].listeners = listeners;
|
||||
nl_table[unit].cb_mutex = cb_mutex;
|
||||
nl_table[unit].module = module;
|
||||
nl_table[unit].registered = 1;
|
||||
netlink_table_ungrab();
|
||||
@@ -1347,7 +1355,7 @@ static int netlink_dump(struct sock *sk)
|
||||
if (!skb)
|
||||
goto errout;
|
||||
|
||||
spin_lock(&nlk->cb_lock);
|
||||
mutex_lock(nlk->cb_mutex);
|
||||
|
||||
cb = nlk->cb;
|
||||
if (cb == NULL) {
|
||||
@@ -1358,7 +1366,7 @@ static int netlink_dump(struct sock *sk)
|
||||
len = cb->dump(skb, cb);
|
||||
|
||||
if (len > 0) {
|
||||
spin_unlock(&nlk->cb_lock);
|
||||
mutex_unlock(nlk->cb_mutex);
|
||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
sk->sk_data_ready(sk, len);
|
||||
return 0;
|
||||
@@ -1376,13 +1384,13 @@ static int netlink_dump(struct sock *sk)
|
||||
if (cb->done)
|
||||
cb->done(cb);
|
||||
nlk->cb = NULL;
|
||||
spin_unlock(&nlk->cb_lock);
|
||||
mutex_unlock(nlk->cb_mutex);
|
||||
|
||||
netlink_destroy_callback(cb);
|
||||
return 0;
|
||||
|
||||
errout_skb:
|
||||
spin_unlock(&nlk->cb_lock);
|
||||
mutex_unlock(nlk->cb_mutex);
|
||||
kfree_skb(skb);
|
||||
errout:
|
||||
return err;
|
||||
@@ -1414,15 +1422,15 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
||||
}
|
||||
nlk = nlk_sk(sk);
|
||||
/* A dump or destruction is in progress... */
|
||||
spin_lock(&nlk->cb_lock);
|
||||
mutex_lock(nlk->cb_mutex);
|
||||
if (nlk->cb || sock_flag(sk, SOCK_DEAD)) {
|
||||
spin_unlock(&nlk->cb_lock);
|
||||
mutex_unlock(nlk->cb_mutex);
|
||||
netlink_destroy_callback(cb);
|
||||
sock_put(sk);
|
||||
return -EBUSY;
|
||||
}
|
||||
nlk->cb = cb;
|
||||
spin_unlock(&nlk->cb_lock);
|
||||
mutex_unlock(nlk->cb_mutex);
|
||||
|
||||
netlink_dump(sk);
|
||||
sock_put(sk);
|
||||
|
Reference in New Issue
Block a user