diag.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/types.h>
  3. #include <linux/spinlock.h>
  4. #include <linux/sock_diag.h>
  5. #include <linux/unix_diag.h>
  6. #include <linux/skbuff.h>
  7. #include <linux/module.h>
  8. #include <linux/uidgid.h>
  9. #include <net/netlink.h>
  10. #include <net/af_unix.h>
  11. #include <net/tcp_states.h>
  12. #include <net/sock.h>
  13. static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
  14. {
  15. /* might or might not have a hash table lock */
  16. struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
  17. if (!addr)
  18. return 0;
  19. return nla_put(nlskb, UNIX_DIAG_NAME,
  20. addr->len - offsetof(struct sockaddr_un, sun_path),
  21. addr->name->sun_path);
  22. }
  23. static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
  24. {
  25. struct dentry *dentry = unix_sk(sk)->path.dentry;
  26. if (dentry) {
  27. struct unix_diag_vfs uv = {
  28. .udiag_vfs_ino = d_backing_inode(dentry)->i_ino,
  29. .udiag_vfs_dev = dentry->d_sb->s_dev,
  30. };
  31. return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
  32. }
  33. return 0;
  34. }
  35. static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
  36. {
  37. struct sock *peer;
  38. int ino;
  39. peer = unix_peer_get(sk);
  40. if (peer) {
  41. unix_state_lock(peer);
  42. ino = sock_i_ino(peer);
  43. unix_state_unlock(peer);
  44. sock_put(peer);
  45. return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
  46. }
  47. return 0;
  48. }
  49. static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
  50. {
  51. struct sk_buff *skb;
  52. struct nlattr *attr;
  53. u32 *buf;
  54. int i;
  55. if (sk->sk_state == TCP_LISTEN) {
  56. spin_lock(&sk->sk_receive_queue.lock);
  57. attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
  58. sk->sk_receive_queue.qlen * sizeof(u32));
  59. if (!attr)
  60. goto errout;
  61. buf = nla_data(attr);
  62. i = 0;
  63. skb_queue_walk(&sk->sk_receive_queue, skb) {
  64. struct sock *req, *peer;
  65. req = skb->sk;
  66. /*
  67. * The state lock is outer for the same sk's
  68. * queue lock. With the other's queue locked it's
  69. * OK to lock the state.
  70. */
  71. unix_state_lock_nested(req);
  72. peer = unix_sk(req)->peer;
  73. buf[i++] = (peer ? sock_i_ino(peer) : 0);
  74. unix_state_unlock(req);
  75. }
  76. spin_unlock(&sk->sk_receive_queue.lock);
  77. }
  78. return 0;
  79. errout:
  80. spin_unlock(&sk->sk_receive_queue.lock);
  81. return -EMSGSIZE;
  82. }
  83. static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
  84. {
  85. struct unix_diag_rqlen rql;
  86. if (sk->sk_state == TCP_LISTEN) {
  87. rql.udiag_rqueue = sk->sk_receive_queue.qlen;
  88. rql.udiag_wqueue = sk->sk_max_ack_backlog;
  89. } else {
  90. rql.udiag_rqueue = (u32) unix_inq_len(sk);
  91. rql.udiag_wqueue = (u32) unix_outq_len(sk);
  92. }
  93. return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
  94. }
  95. static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
  96. struct user_namespace *user_ns)
  97. {
  98. uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
  99. return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
  100. }
  101. static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
  102. struct user_namespace *user_ns,
  103. u32 portid, u32 seq, u32 flags, int sk_ino)
  104. {
  105. struct nlmsghdr *nlh;
  106. struct unix_diag_msg *rep;
  107. nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
  108. flags);
  109. if (!nlh)
  110. return -EMSGSIZE;
  111. rep = nlmsg_data(nlh);
  112. rep->udiag_family = AF_UNIX;
  113. rep->udiag_type = sk->sk_type;
  114. rep->udiag_state = sk->sk_state;
  115. rep->pad = 0;
  116. rep->udiag_ino = sk_ino;
  117. sock_diag_save_cookie(sk, rep->udiag_cookie);
  118. if ((req->udiag_show & UDIAG_SHOW_NAME) &&
  119. sk_diag_dump_name(sk, skb))
  120. goto out_nlmsg_trim;
  121. if ((req->udiag_show & UDIAG_SHOW_VFS) &&
  122. sk_diag_dump_vfs(sk, skb))
  123. goto out_nlmsg_trim;
  124. if ((req->udiag_show & UDIAG_SHOW_PEER) &&
  125. sk_diag_dump_peer(sk, skb))
  126. goto out_nlmsg_trim;
  127. if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
  128. sk_diag_dump_icons(sk, skb))
  129. goto out_nlmsg_trim;
  130. if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
  131. sk_diag_show_rqlen(sk, skb))
  132. goto out_nlmsg_trim;
  133. if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
  134. sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
  135. goto out_nlmsg_trim;
  136. if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
  137. goto out_nlmsg_trim;
  138. if ((req->udiag_show & UDIAG_SHOW_UID) &&
  139. sk_diag_dump_uid(sk, skb, user_ns))
  140. goto out_nlmsg_trim;
  141. nlmsg_end(skb, nlh);
  142. return 0;
  143. out_nlmsg_trim:
  144. nlmsg_cancel(skb, nlh);
  145. return -EMSGSIZE;
  146. }
  147. static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
  148. struct user_namespace *user_ns,
  149. u32 portid, u32 seq, u32 flags)
  150. {
  151. int sk_ino;
  152. unix_state_lock(sk);
  153. sk_ino = sock_i_ino(sk);
  154. unix_state_unlock(sk);
  155. if (!sk_ino)
  156. return 0;
  157. return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
  158. }
  159. static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
  160. {
  161. struct net *net = sock_net(skb->sk);
  162. int num, s_num, slot, s_slot;
  163. struct unix_diag_req *req;
  164. req = nlmsg_data(cb->nlh);
  165. s_slot = cb->args[0];
  166. num = s_num = cb->args[1];
  167. for (slot = s_slot; slot < UNIX_HASH_SIZE; s_num = 0, slot++) {
  168. struct sock *sk;
  169. num = 0;
  170. spin_lock(&net->unx.table.locks[slot]);
  171. sk_for_each(sk, &net->unx.table.buckets[slot]) {
  172. if (num < s_num)
  173. goto next;
  174. if (!(req->udiag_states & (1 << sk->sk_state)))
  175. goto next;
  176. if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
  177. NETLINK_CB(cb->skb).portid,
  178. cb->nlh->nlmsg_seq,
  179. NLM_F_MULTI) < 0) {
  180. spin_unlock(&net->unx.table.locks[slot]);
  181. goto done;
  182. }
  183. next:
  184. num++;
  185. }
  186. spin_unlock(&net->unx.table.locks[slot]);
  187. }
  188. done:
  189. cb->args[0] = slot;
  190. cb->args[1] = num;
  191. return skb->len;
  192. }
  193. static struct sock *unix_lookup_by_ino(struct net *net, unsigned int ino)
  194. {
  195. struct sock *sk;
  196. int i;
  197. for (i = 0; i < UNIX_HASH_SIZE; i++) {
  198. spin_lock(&net->unx.table.locks[i]);
  199. sk_for_each(sk, &net->unx.table.buckets[i]) {
  200. if (ino == sock_i_ino(sk)) {
  201. sock_hold(sk);
  202. spin_unlock(&net->unx.table.locks[i]);
  203. return sk;
  204. }
  205. }
  206. spin_unlock(&net->unx.table.locks[i]);
  207. }
  208. return NULL;
  209. }
  210. static int unix_diag_get_exact(struct sk_buff *in_skb,
  211. const struct nlmsghdr *nlh,
  212. struct unix_diag_req *req)
  213. {
  214. struct net *net = sock_net(in_skb->sk);
  215. unsigned int extra_len;
  216. struct sk_buff *rep;
  217. struct sock *sk;
  218. int err;
  219. err = -EINVAL;
  220. if (req->udiag_ino == 0)
  221. goto out_nosk;
  222. sk = unix_lookup_by_ino(net, req->udiag_ino);
  223. err = -ENOENT;
  224. if (sk == NULL)
  225. goto out_nosk;
  226. err = sock_diag_check_cookie(sk, req->udiag_cookie);
  227. if (err)
  228. goto out;
  229. extra_len = 256;
  230. again:
  231. err = -ENOMEM;
  232. rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
  233. if (!rep)
  234. goto out;
  235. err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk),
  236. NETLINK_CB(in_skb).portid,
  237. nlh->nlmsg_seq, 0, req->udiag_ino);
  238. if (err < 0) {
  239. nlmsg_free(rep);
  240. extra_len += 256;
  241. if (extra_len >= PAGE_SIZE)
  242. goto out;
  243. goto again;
  244. }
  245. err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
  246. out:
  247. if (sk)
  248. sock_put(sk);
  249. out_nosk:
  250. return err;
  251. }
  252. static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
  253. {
  254. int hdrlen = sizeof(struct unix_diag_req);
  255. if (nlmsg_len(h) < hdrlen)
  256. return -EINVAL;
  257. if (h->nlmsg_flags & NLM_F_DUMP) {
  258. struct netlink_dump_control c = {
  259. .dump = unix_diag_dump,
  260. };
  261. return netlink_dump_start(sock_net(skb->sk)->diag_nlsk, skb, h, &c);
  262. } else
  263. return unix_diag_get_exact(skb, h, nlmsg_data(h));
  264. }
  265. static const struct sock_diag_handler unix_diag_handler = {
  266. .family = AF_UNIX,
  267. .dump = unix_diag_handler_dump,
  268. };
  269. static int __init unix_diag_init(void)
  270. {
  271. return sock_diag_register(&unix_diag_handler);
  272. }
  273. static void __exit unix_diag_exit(void)
  274. {
  275. sock_diag_unregister(&unix_diag_handler);
  276. }
  277. module_init(unix_diag_init);
  278. module_exit(unix_diag_exit);
  279. MODULE_LICENSE("GPL");
  280. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);