busy_poll.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * net busy poll support
  4. * Copyright(c) 2013 Intel Corporation.
  5. *
  6. * Author: Eliezer Tamir
  7. *
  8. * Contact Information:
  9. * e1000-devel Mailing List <[email protected]>
  10. */
  11. #ifndef _LINUX_NET_BUSY_POLL_H
  12. #define _LINUX_NET_BUSY_POLL_H
  13. #include <linux/netdevice.h>
  14. #include <linux/sched/clock.h>
  15. #include <linux/sched/signal.h>
  16. #include <net/ip.h>
  17. /* 0 - Reserved to indicate value not set
  18. * 1..NR_CPUS - Reserved for sender_cpu
  19. * NR_CPUS+1..~0 - Region available for NAPI IDs
  20. */
  21. #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
  22. #define BUSY_POLL_BUDGET 8
  23. #ifdef CONFIG_NET_RX_BUSY_POLL
  24. struct napi_struct;
  25. extern unsigned int sysctl_net_busy_read __read_mostly;
  26. extern unsigned int sysctl_net_busy_poll __read_mostly;
  27. static inline bool net_busy_loop_on(void)
  28. {
  29. return READ_ONCE(sysctl_net_busy_poll);
  30. }
  31. static inline bool sk_can_busy_loop(const struct sock *sk)
  32. {
  33. return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
  34. }
  35. bool sk_busy_loop_end(void *p, unsigned long start_time);
  36. void napi_busy_loop(unsigned int napi_id,
  37. bool (*loop_end)(void *, unsigned long),
  38. void *loop_end_arg, bool prefer_busy_poll, u16 budget);
  39. #else /* CONFIG_NET_RX_BUSY_POLL */
  40. static inline unsigned long net_busy_loop_on(void)
  41. {
  42. return 0;
  43. }
  44. static inline bool sk_can_busy_loop(struct sock *sk)
  45. {
  46. return false;
  47. }
  48. #endif /* CONFIG_NET_RX_BUSY_POLL */
  49. static inline unsigned long busy_loop_current_time(void)
  50. {
  51. #ifdef CONFIG_NET_RX_BUSY_POLL
  52. return (unsigned long)(local_clock() >> 10);
  53. #else
  54. return 0;
  55. #endif
  56. }
  57. /* in poll/select we use the global sysctl_net_ll_poll value */
  58. static inline bool busy_loop_timeout(unsigned long start_time)
  59. {
  60. #ifdef CONFIG_NET_RX_BUSY_POLL
  61. unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
  62. if (bp_usec) {
  63. unsigned long end_time = start_time + bp_usec;
  64. unsigned long now = busy_loop_current_time();
  65. return time_after(now, end_time);
  66. }
  67. #endif
  68. return true;
  69. }
  70. static inline bool sk_busy_loop_timeout(struct sock *sk,
  71. unsigned long start_time)
  72. {
  73. #ifdef CONFIG_NET_RX_BUSY_POLL
  74. unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
  75. if (bp_usec) {
  76. unsigned long end_time = start_time + bp_usec;
  77. unsigned long now = busy_loop_current_time();
  78. return time_after(now, end_time);
  79. }
  80. #endif
  81. return true;
  82. }
  83. static inline void sk_busy_loop(struct sock *sk, int nonblock)
  84. {
  85. #ifdef CONFIG_NET_RX_BUSY_POLL
  86. unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
  87. if (napi_id >= MIN_NAPI_ID)
  88. napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk,
  89. READ_ONCE(sk->sk_prefer_busy_poll),
  90. READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET);
  91. #endif
  92. }
  93. /* used in the NIC receive handler to mark the skb */
  94. static inline void skb_mark_napi_id(struct sk_buff *skb,
  95. struct napi_struct *napi)
  96. {
  97. #ifdef CONFIG_NET_RX_BUSY_POLL
  98. /* If the skb was already marked with a valid NAPI ID, avoid overwriting
  99. * it.
  100. */
  101. if (skb->napi_id < MIN_NAPI_ID)
  102. skb->napi_id = napi->napi_id;
  103. #endif
  104. }
  105. /* used in the protocol hanlder to propagate the napi_id to the socket */
  106. static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
  107. {
  108. #ifdef CONFIG_NET_RX_BUSY_POLL
  109. if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
  110. WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
  111. #endif
  112. sk_rx_queue_update(sk, skb);
  113. }
  114. /* Variant of sk_mark_napi_id() for passive flow setup,
  115. * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
  116. * needs to be set.
  117. */
  118. static inline void sk_mark_napi_id_set(struct sock *sk,
  119. const struct sk_buff *skb)
  120. {
  121. #ifdef CONFIG_NET_RX_BUSY_POLL
  122. WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
  123. #endif
  124. sk_rx_queue_set(sk, skb);
  125. }
  126. static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id)
  127. {
  128. #ifdef CONFIG_NET_RX_BUSY_POLL
  129. if (!READ_ONCE(sk->sk_napi_id))
  130. WRITE_ONCE(sk->sk_napi_id, napi_id);
  131. #endif
  132. }
  133. /* variant used for unconnected sockets */
  134. static inline void sk_mark_napi_id_once(struct sock *sk,
  135. const struct sk_buff *skb)
  136. {
  137. #ifdef CONFIG_NET_RX_BUSY_POLL
  138. __sk_mark_napi_id_once(sk, skb->napi_id);
  139. #endif
  140. }
  141. static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
  142. const struct xdp_buff *xdp)
  143. {
  144. #ifdef CONFIG_NET_RX_BUSY_POLL
  145. __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
  146. #endif
  147. }
  148. #endif /* _LINUX_NET_BUSY_POLL_H */