unix_bpf.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2021 Cong Wang <[email protected]> */
  3. #include <linux/skmsg.h>
  4. #include <linux/bpf.h>
  5. #include <net/sock.h>
  6. #include <net/af_unix.h>
  7. #define unix_sk_has_data(__sk, __psock) \
  8. ({ !skb_queue_empty(&__sk->sk_receive_queue) || \
  9. !skb_queue_empty(&__psock->ingress_skb) || \
  10. !list_empty(&__psock->ingress_msg); \
  11. })
  12. static int unix_msg_wait_data(struct sock *sk, struct sk_psock *psock,
  13. long timeo)
  14. {
  15. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  16. struct unix_sock *u = unix_sk(sk);
  17. int ret = 0;
  18. if (sk->sk_shutdown & RCV_SHUTDOWN)
  19. return 1;
  20. if (!timeo)
  21. return ret;
  22. add_wait_queue(sk_sleep(sk), &wait);
  23. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  24. if (!unix_sk_has_data(sk, psock)) {
  25. mutex_unlock(&u->iolock);
  26. wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
  27. mutex_lock(&u->iolock);
  28. ret = unix_sk_has_data(sk, psock);
  29. }
  30. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  31. remove_wait_queue(sk_sleep(sk), &wait);
  32. return ret;
  33. }
  34. static int __unix_recvmsg(struct sock *sk, struct msghdr *msg,
  35. size_t len, int flags)
  36. {
  37. if (sk->sk_type == SOCK_DGRAM)
  38. return __unix_dgram_recvmsg(sk, msg, len, flags);
  39. else
  40. return __unix_stream_recvmsg(sk, msg, len, flags);
  41. }
  42. static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
  43. size_t len, int flags, int *addr_len)
  44. {
  45. struct unix_sock *u = unix_sk(sk);
  46. struct sk_psock *psock;
  47. int copied;
  48. if (!len)
  49. return 0;
  50. psock = sk_psock_get(sk);
  51. if (unlikely(!psock))
  52. return __unix_recvmsg(sk, msg, len, flags);
  53. mutex_lock(&u->iolock);
  54. if (!skb_queue_empty(&sk->sk_receive_queue) &&
  55. sk_psock_queue_empty(psock)) {
  56. mutex_unlock(&u->iolock);
  57. sk_psock_put(sk, psock);
  58. return __unix_recvmsg(sk, msg, len, flags);
  59. }
  60. msg_bytes_ready:
  61. copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
  62. if (!copied) {
  63. long timeo;
  64. int data;
  65. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  66. data = unix_msg_wait_data(sk, psock, timeo);
  67. if (data) {
  68. if (!sk_psock_queue_empty(psock))
  69. goto msg_bytes_ready;
  70. mutex_unlock(&u->iolock);
  71. sk_psock_put(sk, psock);
  72. return __unix_recvmsg(sk, msg, len, flags);
  73. }
  74. copied = -EAGAIN;
  75. }
  76. mutex_unlock(&u->iolock);
  77. sk_psock_put(sk, psock);
  78. return copied;
  79. }
  80. static struct proto *unix_dgram_prot_saved __read_mostly;
  81. static DEFINE_SPINLOCK(unix_dgram_prot_lock);
  82. static struct proto unix_dgram_bpf_prot;
  83. static struct proto *unix_stream_prot_saved __read_mostly;
  84. static DEFINE_SPINLOCK(unix_stream_prot_lock);
  85. static struct proto unix_stream_bpf_prot;
  86. static void unix_dgram_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
  87. {
  88. *prot = *base;
  89. prot->close = sock_map_close;
  90. prot->recvmsg = unix_bpf_recvmsg;
  91. prot->sock_is_readable = sk_msg_is_readable;
  92. }
  93. static void unix_stream_bpf_rebuild_protos(struct proto *prot,
  94. const struct proto *base)
  95. {
  96. *prot = *base;
  97. prot->close = sock_map_close;
  98. prot->recvmsg = unix_bpf_recvmsg;
  99. prot->sock_is_readable = sk_msg_is_readable;
  100. prot->unhash = sock_map_unhash;
  101. }
  102. static void unix_dgram_bpf_check_needs_rebuild(struct proto *ops)
  103. {
  104. if (unlikely(ops != smp_load_acquire(&unix_dgram_prot_saved))) {
  105. spin_lock_bh(&unix_dgram_prot_lock);
  106. if (likely(ops != unix_dgram_prot_saved)) {
  107. unix_dgram_bpf_rebuild_protos(&unix_dgram_bpf_prot, ops);
  108. smp_store_release(&unix_dgram_prot_saved, ops);
  109. }
  110. spin_unlock_bh(&unix_dgram_prot_lock);
  111. }
  112. }
  113. static void unix_stream_bpf_check_needs_rebuild(struct proto *ops)
  114. {
  115. if (unlikely(ops != smp_load_acquire(&unix_stream_prot_saved))) {
  116. spin_lock_bh(&unix_stream_prot_lock);
  117. if (likely(ops != unix_stream_prot_saved)) {
  118. unix_stream_bpf_rebuild_protos(&unix_stream_bpf_prot, ops);
  119. smp_store_release(&unix_stream_prot_saved, ops);
  120. }
  121. spin_unlock_bh(&unix_stream_prot_lock);
  122. }
  123. }
  124. int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
  125. {
  126. if (sk->sk_type != SOCK_DGRAM)
  127. return -EOPNOTSUPP;
  128. if (restore) {
  129. sk->sk_write_space = psock->saved_write_space;
  130. sock_replace_proto(sk, psock->sk_proto);
  131. return 0;
  132. }
  133. unix_dgram_bpf_check_needs_rebuild(psock->sk_proto);
  134. sock_replace_proto(sk, &unix_dgram_bpf_prot);
  135. return 0;
  136. }
  137. int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
  138. {
  139. if (restore) {
  140. sk->sk_write_space = psock->saved_write_space;
  141. sock_replace_proto(sk, psock->sk_proto);
  142. return 0;
  143. }
  144. unix_stream_bpf_check_needs_rebuild(psock->sk_proto);
  145. sock_replace_proto(sk, &unix_stream_bpf_prot);
  146. return 0;
  147. }
  148. void __init unix_bpf_build_proto(void)
  149. {
  150. unix_dgram_bpf_rebuild_protos(&unix_dgram_bpf_prot, &unix_dgram_proto);
  151. unix_stream_bpf_rebuild_protos(&unix_stream_bpf_prot, &unix_stream_proto);
  152. }