xsk_diag.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* XDP sockets monitoring support
  3. *
  4. * Copyright(c) 2019 Intel Corporation.
  5. *
  6. * Author: Björn Töpel <[email protected]>
  7. */
  8. #include <linux/module.h>
  9. #include <net/xdp_sock.h>
  10. #include <linux/xdp_diag.h>
  11. #include <linux/sock_diag.h>
  12. #include "xsk_queue.h"
  13. #include "xsk.h"
  14. static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb)
  15. {
  16. struct xdp_diag_info di = {};
  17. di.ifindex = xs->dev ? xs->dev->ifindex : 0;
  18. di.queue_id = xs->queue_id;
  19. return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di);
  20. }
  21. static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type,
  22. struct sk_buff *nlskb)
  23. {
  24. struct xdp_diag_ring dr = {};
  25. dr.entries = queue->nentries;
  26. return nla_put(nlskb, nl_type, sizeof(dr), &dr);
  27. }
  28. static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs,
  29. struct sk_buff *nlskb)
  30. {
  31. int err = 0;
  32. if (xs->rx)
  33. err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb);
  34. if (!err && xs->tx)
  35. err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb);
  36. return err;
  37. }
  38. static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
  39. {
  40. struct xsk_buff_pool *pool = xs->pool;
  41. struct xdp_umem *umem = xs->umem;
  42. struct xdp_diag_umem du = {};
  43. int err;
  44. if (!umem)
  45. return 0;
  46. du.id = umem->id;
  47. du.size = umem->size;
  48. du.num_pages = umem->npgs;
  49. du.chunk_size = umem->chunk_size;
  50. du.headroom = umem->headroom;
  51. du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0;
  52. du.queue_id = pool ? pool->queue_id : 0;
  53. du.flags = 0;
  54. if (umem->zc)
  55. du.flags |= XDP_DU_F_ZEROCOPY;
  56. du.refs = refcount_read(&umem->users);
  57. err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
  58. if (!err && pool && pool->fq)
  59. err = xsk_diag_put_ring(pool->fq,
  60. XDP_DIAG_UMEM_FILL_RING, nlskb);
  61. if (!err && pool && pool->cq)
  62. err = xsk_diag_put_ring(pool->cq,
  63. XDP_DIAG_UMEM_COMPLETION_RING, nlskb);
  64. return err;
  65. }
  66. static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb)
  67. {
  68. struct xdp_diag_stats du = {};
  69. du.n_rx_dropped = xs->rx_dropped;
  70. du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx);
  71. du.n_rx_full = xs->rx_queue_full;
  72. du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
  73. du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx);
  74. du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx);
  75. return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du);
  76. }
  77. static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
  78. struct xdp_diag_req *req,
  79. struct user_namespace *user_ns,
  80. u32 portid, u32 seq, u32 flags, int sk_ino)
  81. {
  82. struct xdp_sock *xs = xdp_sk(sk);
  83. struct xdp_diag_msg *msg;
  84. struct nlmsghdr *nlh;
  85. nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg),
  86. flags);
  87. if (!nlh)
  88. return -EMSGSIZE;
  89. msg = nlmsg_data(nlh);
  90. memset(msg, 0, sizeof(*msg));
  91. msg->xdiag_family = AF_XDP;
  92. msg->xdiag_type = sk->sk_type;
  93. msg->xdiag_ino = sk_ino;
  94. sock_diag_save_cookie(sk, msg->xdiag_cookie);
  95. mutex_lock(&xs->mutex);
  96. if (READ_ONCE(xs->state) == XSK_UNBOUND)
  97. goto out_nlmsg_trim;
  98. if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
  99. goto out_nlmsg_trim;
  100. if ((req->xdiag_show & XDP_SHOW_INFO) &&
  101. nla_put_u32(nlskb, XDP_DIAG_UID,
  102. from_kuid_munged(user_ns, sock_i_uid(sk))))
  103. goto out_nlmsg_trim;
  104. if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
  105. xsk_diag_put_rings_cfg(xs, nlskb))
  106. goto out_nlmsg_trim;
  107. if ((req->xdiag_show & XDP_SHOW_UMEM) &&
  108. xsk_diag_put_umem(xs, nlskb))
  109. goto out_nlmsg_trim;
  110. if ((req->xdiag_show & XDP_SHOW_MEMINFO) &&
  111. sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
  112. goto out_nlmsg_trim;
  113. if ((req->xdiag_show & XDP_SHOW_STATS) &&
  114. xsk_diag_put_stats(xs, nlskb))
  115. goto out_nlmsg_trim;
  116. mutex_unlock(&xs->mutex);
  117. nlmsg_end(nlskb, nlh);
  118. return 0;
  119. out_nlmsg_trim:
  120. mutex_unlock(&xs->mutex);
  121. nlmsg_cancel(nlskb, nlh);
  122. return -EMSGSIZE;
  123. }
  124. static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb)
  125. {
  126. struct xdp_diag_req *req = nlmsg_data(cb->nlh);
  127. struct net *net = sock_net(nlskb->sk);
  128. int num = 0, s_num = cb->args[0];
  129. struct sock *sk;
  130. mutex_lock(&net->xdp.lock);
  131. sk_for_each(sk, &net->xdp.list) {
  132. if (!net_eq(sock_net(sk), net))
  133. continue;
  134. if (num++ < s_num)
  135. continue;
  136. if (xsk_diag_fill(sk, nlskb, req,
  137. sk_user_ns(NETLINK_CB(cb->skb).sk),
  138. NETLINK_CB(cb->skb).portid,
  139. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  140. sock_i_ino(sk)) < 0) {
  141. num--;
  142. break;
  143. }
  144. }
  145. mutex_unlock(&net->xdp.lock);
  146. cb->args[0] = num;
  147. return nlskb->len;
  148. }
  149. static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
  150. {
  151. struct netlink_dump_control c = { .dump = xsk_diag_dump };
  152. int hdrlen = sizeof(struct xdp_diag_req);
  153. struct net *net = sock_net(nlskb->sk);
  154. if (nlmsg_len(hdr) < hdrlen)
  155. return -EINVAL;
  156. if (!(hdr->nlmsg_flags & NLM_F_DUMP))
  157. return -EOPNOTSUPP;
  158. return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c);
  159. }
  160. static const struct sock_diag_handler xsk_diag_handler = {
  161. .family = AF_XDP,
  162. .dump = xsk_diag_handler_dump,
  163. };
  164. static int __init xsk_diag_init(void)
  165. {
  166. return sock_diag_register(&xsk_diag_handler);
  167. }
  168. static void __exit xsk_diag_exit(void)
  169. {
  170. sock_diag_unregister(&xsk_diag_handler);
  171. }
  172. module_init(xsk_diag_init);
  173. module_exit(xsk_diag_exit);
  174. MODULE_LICENSE("GPL");
  175. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP);