~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv4/tcp_recovery.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/tcp.h>
  3 #include <net/tcp.h>
  4 
  5 static u32 tcp_rack_reo_wnd(const struct sock *sk)
  6 {
  7         const struct tcp_sock *tp = tcp_sk(sk);
  8 
  9         if (!tp->reord_seen) {
 10                 /* If reordering has not been observed, be aggressive during
 11                  * the recovery or starting the recovery by DUPACK threshold.
 12                  */
 13                 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
 14                         return 0;
 15 
 16                 if (tp->sacked_out >= tp->reordering &&
 17                     !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
 18                       TCP_RACK_NO_DUPTHRESH))
 19                         return 0;
 20         }
 21 
 22         /* To be more reordering resilient, allow min_rtt/4 settling delay.
 23          * Use min_rtt instead of the smoothed RTT because reordering is
 24          * often a path property and less related to queuing or delayed ACKs.
 25          * Upon receiving DSACKs, linearly increase the window up to the
 26          * smoothed RTT.
 27          */
 28         return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
 29                    tp->srtt_us >> 3);
 30 }
 31 
 32 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
 33 {
 34         return tp->rack.rtt_us + reo_wnd -
 35                tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
 36 }
 37 
 38 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
 39  *
 40  * Marks a packet lost, if some packet sent later has been (s)acked.
 41  * The underlying idea is similar to the traditional dupthresh and FACK
 42  * but they look at different metrics:
 43  *
 44  * dupthresh: 3 OOO packets delivered (packet count)
 45  * FACK: sequence delta to highest sacked sequence (sequence space)
 46  * RACK: sent time delta to the latest delivered packet (time domain)
 47  *
 48  * The advantage of RACK is it applies to both original and retransmitted
 49  * packet and therefore is robust against tail losses. Another advantage
 50  * is being more resilient to reordering by simply allowing some
 51  * "settling delay", instead of tweaking the dupthresh.
 52  *
 53  * When tcp_rack_detect_loss() detects some packets are lost and we
 54  * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
 55  * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
 56  * make us enter the CA_Recovery state.
 57  */
 58 static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
 59 {
 60         struct tcp_sock *tp = tcp_sk(sk);
 61         struct sk_buff *skb, *n;
 62         u32 reo_wnd;
 63 
 64         *reo_timeout = 0;
 65         reo_wnd = tcp_rack_reo_wnd(sk);
 66         list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
 67                                  tcp_tsorted_anchor) {
 68                 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
 69                 s32 remaining;
 70 
 71                 /* Skip ones marked lost but not yet retransmitted */
 72                 if ((scb->sacked & TCPCB_LOST) &&
 73                     !(scb->sacked & TCPCB_SACKED_RETRANS))
 74                         continue;
 75 
 76                 if (!tcp_skb_sent_after(tp->rack.mstamp,
 77                                         tcp_skb_timestamp_us(skb),
 78                                         tp->rack.end_seq, scb->end_seq))
 79                         break;
 80 
 81                 /* A packet is lost if it has not been s/acked beyond
 82                  * the recent RTT plus the reordering window.
 83                  */
 84                 remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
 85                 if (remaining <= 0) {
 86                         tcp_mark_skb_lost(sk, skb);
 87                         list_del_init(&skb->tcp_tsorted_anchor);
 88                 } else {
 89                         /* Record maximum wait time */
 90                         *reo_timeout = max_t(u32, *reo_timeout, remaining);
 91                 }
 92         }
 93 }
 94 
 95 bool tcp_rack_mark_lost(struct sock *sk)
 96 {
 97         struct tcp_sock *tp = tcp_sk(sk);
 98         u32 timeout;
 99 
100         if (!tp->rack.advanced)
101                 return false;
102 
103         /* Reset the advanced flag to avoid unnecessary queue scanning */
104         tp->rack.advanced = 0;
105         tcp_rack_detect_loss(sk, &timeout);
106         if (timeout) {
107                 timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US);
108                 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
109                                           timeout, inet_csk(sk)->icsk_rto);
110         }
111         return !!timeout;
112 }
113 
114 /* Record the most recently (re)sent time among the (s)acked packets
115  * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
116  * draft-cheng-tcpm-rack-00.txt
117  */
118 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
119                       u64 xmit_time)
120 {
121         u32 rtt_us;
122 
123         rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
124         if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
125                 /* If the sacked packet was retransmitted, it's ambiguous
126                  * whether the retransmission or the original (or the prior
127                  * retransmission) was sacked.
128                  *
129                  * If the original is lost, there is no ambiguity. Otherwise
130                  * we assume the original can be delayed up to aRTT + min_rtt.
131                  * the aRTT term is bounded by the fast recovery or timeout,
132                  * so it's at least one RTT (i.e., retransmission is at least
133                  * an RTT later).
134                  */
135                 return;
136         }
137         tp->rack.advanced = 1;
138         tp->rack.rtt_us = rtt_us;
139         if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp,
140                                end_seq, tp->rack.end_seq)) {
141                 tp->rack.mstamp = xmit_time;
142                 tp->rack.end_seq = end_seq;
143         }
144 }
145 
146 /* We have waited long enough to accommodate reordering. Mark the expired
147  * packets lost and retransmit them.
148  */
149 void tcp_rack_reo_timeout(struct sock *sk)
150 {
151         struct tcp_sock *tp = tcp_sk(sk);
152         u32 timeout, prior_inflight;
153         u32 lost = tp->lost;
154 
155         prior_inflight = tcp_packets_in_flight(tp);
156         tcp_rack_detect_loss(sk, &timeout);
157         if (prior_inflight != tcp_packets_in_flight(tp)) {
158                 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
159                         tcp_enter_recovery(sk, false);
160                         if (!inet_csk(sk)->icsk_ca_ops->cong_control)
161                                 tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0);
162                 }
163                 tcp_xmit_retransmit_queue(sk);
164         }
165         if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
166                 tcp_rearm_rto(sk);
167 }
168 
169 /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
170  *
171  * If a DSACK is received that seems like it may have been due to reordering
172  * triggering fast recovery, increment reo_wnd by min_rtt/4 (upper bounded
173  * by srtt), since there is possibility that spurious retransmission was
174  * due to reordering delay longer than reo_wnd.
175  *
176  * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
177  * no. of successful recoveries (accounts for full DSACK-based loss
178  * recovery undo). After that, reset it to default (min_rtt/4).
179  *
180  * At max, reo_wnd is incremented only once per rtt. So that the new
181  * DSACK on which we are reacting, is due to the spurious retx (approx)
182  * after the reo_wnd has been updated last time.
183  *
184  * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
185  * absolute value to account for change in rtt.
186  */
187 void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
188 {
189         struct tcp_sock *tp = tcp_sk(sk);
190 
191         if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
192              TCP_RACK_STATIC_REO_WND) ||
193             !rs->prior_delivered)
194                 return;
195 
196         /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
197         if (before(rs->prior_delivered, tp->rack.last_delivered))
198                 tp->rack.dsack_seen = 0;
199 
200         /* Adjust the reo_wnd if update is pending */
201         if (tp->rack.dsack_seen) {
202                 tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
203                                                tp->rack.reo_wnd_steps + 1);
204                 tp->rack.dsack_seen = 0;
205                 tp->rack.last_delivered = tp->delivered;
206                 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
207         } else if (!tp->rack.reo_wnd_persist) {
208                 tp->rack.reo_wnd_steps = 1;
209         }
210 }
211 
212 /* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
213  * the next unacked packet upon receiving
214  * a) three or more DUPACKs to start the fast recovery
215  * b) an ACK acknowledging new data during the fast recovery.
216  */
217 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
218 {
219         const u8 state = inet_csk(sk)->icsk_ca_state;
220         struct tcp_sock *tp = tcp_sk(sk);
221 
222         if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
223             (state == TCP_CA_Recovery && snd_una_advanced)) {
224                 struct sk_buff *skb = tcp_rtx_queue_head(sk);
225                 u32 mss;
226 
227                 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
228                         return;
229 
230                 mss = tcp_skb_mss(skb);
231                 if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
232                         tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
233                                      mss, mss, GFP_ATOMIC);
234 
235                 tcp_mark_skb_lost(sk, skb);
236         }
237 }
238 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php