tcp_rate.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <net/tcp.h>
  3. /* The bandwidth estimator estimates the rate at which the network
  4. * can currently deliver outbound data packets for this flow. At a high
  5. * level, it operates by taking a delivery rate sample for each ACK.
  6. *
  7. * A rate sample records the rate at which the network delivered packets
  8. * for this flow, calculated over the time interval between the transmission
  9. * of a data packet and the acknowledgment of that packet.
  10. *
  11. * Specifically, over the interval between each transmit and corresponding ACK,
  12. * the estimator generates a delivery rate sample. Typically it uses the rate
  13. * at which packets were acknowledged. However, the approach of using only the
  14. * acknowledgment rate faces a challenge under the prevalent ACK decimation or
  15. * compression: packets can temporarily appear to be delivered much quicker
  16. * than the bottleneck rate. Since it is physically impossible to do that in a
  17. * sustained fashion, when the estimator notices that the ACK rate is faster
  18. * than the transmit rate, it uses the latter:
  19. *
  20. * send_rate = #pkts_delivered/(last_snd_time - first_snd_time)
  21. * ack_rate = #pkts_delivered/(last_ack_time - first_ack_time)
  22. * bw = min(send_rate, ack_rate)
  23. *
  24. * Notice the estimator essentially estimates the goodput, not always the
  25. * network bottleneck link rate when the sending or receiving is limited by
  26. * other factors like applications or receiver window limits. The estimator
  27. * deliberately avoids using the inter-packet spacing approach because that
  28. * approach requires a large number of samples and sophisticated filtering.
  29. *
  30. * TCP flows can often be application-limited in request/response workloads.
  31. * The estimator marks a bandwidth sample as application-limited if there
  32. * was some moment during the sampled window of packets when there was no data
  33. * ready to send in the write queue.
  34. */
  35. /* Snapshot the current delivery information in the skb, to generate
  36. * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
  37. */
  38. void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
  39. {
  40. struct tcp_sock *tp = tcp_sk(sk);
  41. /* In general we need to start delivery rate samples from the
  42. * time we received the most recent ACK, to ensure we include
  43. * the full time the network needs to deliver all in-flight
  44. * packets. If there are no packets in flight yet, then we
  45. * know that any ACKs after now indicate that the network was
  46. * able to deliver those packets completely in the sampling
  47. * interval between now and the next ACK.
  48. *
  49. * Note that we use packets_out instead of tcp_packets_in_flight(tp)
  50. * because the latter is a guess based on RTO and loss-marking
  51. * heuristics. We don't want spurious RTOs or loss markings to cause
  52. * a spuriously small time interval, causing a spuriously high
  53. * bandwidth estimate.
  54. */
  55. if (!tp->packets_out) {
  56. u64 tstamp_us = tcp_skb_timestamp_us(skb);
  57. tp->first_tx_mstamp = tstamp_us;
  58. tp->delivered_mstamp = tstamp_us;
  59. }
  60. TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
  61. TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp;
  62. TCP_SKB_CB(skb)->tx.delivered = tp->delivered;
  63. TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce;
  64. TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0;
  65. }
  66. /* When an skb is sacked or acked, we fill in the rate sample with the (prior)
  67. * delivery information when the skb was last transmitted.
  68. *
  69. * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
  70. * called multiple times. We favor the information from the most recently
  71. * sent skb, i.e., the skb with the most recently sent time and the highest
  72. * sequence.
  73. */
  74. void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
  75. struct rate_sample *rs)
  76. {
  77. struct tcp_sock *tp = tcp_sk(sk);
  78. struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
  79. u64 tx_tstamp;
  80. if (!scb->tx.delivered_mstamp)
  81. return;
  82. tx_tstamp = tcp_skb_timestamp_us(skb);
  83. if (!rs->prior_delivered ||
  84. tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
  85. scb->end_seq, rs->last_end_seq)) {
  86. rs->prior_delivered_ce = scb->tx.delivered_ce;
  87. rs->prior_delivered = scb->tx.delivered;
  88. rs->prior_mstamp = scb->tx.delivered_mstamp;
  89. rs->is_app_limited = scb->tx.is_app_limited;
  90. rs->is_retrans = scb->sacked & TCPCB_RETRANS;
  91. rs->last_end_seq = scb->end_seq;
  92. /* Record send time of most recently ACKed packet: */
  93. tp->first_tx_mstamp = tx_tstamp;
  94. /* Find the duration of the "send phase" of this window: */
  95. rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
  96. scb->tx.first_tx_mstamp);
  97. }
  98. /* Mark off the skb delivered once it's sacked to avoid being
  99. * used again when it's cumulatively acked. For acked packets
  100. * we don't need to reset since it'll be freed soon.
  101. */
  102. if (scb->sacked & TCPCB_SACKED_ACKED)
  103. scb->tx.delivered_mstamp = 0;
  104. }
  105. /* Update the connection delivery information and generate a rate sample. */
  106. void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
  107. bool is_sack_reneg, struct rate_sample *rs)
  108. {
  109. struct tcp_sock *tp = tcp_sk(sk);
  110. u32 snd_us, ack_us;
  111. /* Clear app limited if bubble is acked and gone. */
  112. if (tp->app_limited && after(tp->delivered, tp->app_limited))
  113. tp->app_limited = 0;
  114. /* TODO: there are multiple places throughout tcp_ack() to get
  115. * current time. Refactor the code using a new "tcp_acktag_state"
  116. * to carry current time, flags, stats like "tcp_sacktag_state".
  117. */
  118. if (delivered)
  119. tp->delivered_mstamp = tp->tcp_mstamp;
  120. rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
  121. rs->losses = lost; /* freshly marked lost */
  122. /* Return an invalid sample if no timing information is available or
  123. * in recovery from loss with SACK reneging. Rate samples taken during
  124. * a SACK reneging event may overestimate bw by including packets that
  125. * were SACKed before the reneg.
  126. */
  127. if (!rs->prior_mstamp || is_sack_reneg) {
  128. rs->delivered = -1;
  129. rs->interval_us = -1;
  130. return;
  131. }
  132. rs->delivered = tp->delivered - rs->prior_delivered;
  133. rs->delivered_ce = tp->delivered_ce - rs->prior_delivered_ce;
  134. /* delivered_ce occupies less than 32 bits in the skb control block */
  135. rs->delivered_ce &= TCPCB_DELIVERED_CE_MASK;
  136. /* Model sending data and receiving ACKs as separate pipeline phases
  137. * for a window. Usually the ACK phase is longer, but with ACK
  138. * compression the send phase can be longer. To be safe we use the
  139. * longer phase.
  140. */
  141. snd_us = rs->interval_us; /* send phase */
  142. ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
  143. rs->prior_mstamp); /* ack phase */
  144. rs->interval_us = max(snd_us, ack_us);
  145. /* Record both segment send and ack receive intervals */
  146. rs->snd_interval_us = snd_us;
  147. rs->rcv_interval_us = ack_us;
  148. /* Normally we expect interval_us >= min-rtt.
  149. * Note that rate may still be over-estimated when a spuriously
  150. * retransmistted skb was first (s)acked because "interval_us"
  151. * is under-estimated (up to an RTT). However continuously
  152. * measuring the delivery rate during loss recovery is crucial
  153. * for connections suffer heavy or prolonged losses.
  154. */
  155. if (unlikely(rs->interval_us < tcp_min_rtt(tp))) {
  156. if (!rs->is_retrans)
  157. pr_debug("tcp rate: %ld %d %u %u %u\n",
  158. rs->interval_us, rs->delivered,
  159. inet_csk(sk)->icsk_ca_state,
  160. tp->rx_opt.sack_ok, tcp_min_rtt(tp));
  161. rs->interval_us = -1;
  162. return;
  163. }
  164. /* Record the last non-app-limited or the highest app-limited bw */
  165. if (!rs->is_app_limited ||
  166. ((u64)rs->delivered * tp->rate_interval_us >=
  167. (u64)tp->rate_delivered * rs->interval_us)) {
  168. tp->rate_delivered = rs->delivered;
  169. tp->rate_interval_us = rs->interval_us;
  170. tp->rate_app_limited = rs->is_app_limited;
  171. }
  172. }
  173. /* If a gap is detected between sends, mark the socket application-limited. */
  174. void tcp_rate_check_app_limited(struct sock *sk)
  175. {
  176. struct tcp_sock *tp = tcp_sk(sk);
  177. if (/* We have less than one packet to send. */
  178. tp->write_seq - tp->snd_nxt < tp->mss_cache &&
  179. /* Nothing in sending host's qdisc queues or NIC tx queue. */
  180. sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) &&
  181. /* We are not limited by CWND. */
  182. tcp_packets_in_flight(tp) < tcp_snd_cwnd(tp) &&
  183. /* All lost packets have been retransmitted. */
  184. tp->lost_out <= tp->retrans_out)
  185. tp->app_limited =
  186. (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
  187. }
  188. EXPORT_SYMBOL_GPL(tcp_rate_check_app_limited);