1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * TCP Veno congestion control 4 * 5 * This is based on the congestion detection/avoidance scheme described in 6 * C. P. Fu, S. C. Liew. 7 * "TCP Veno: TCP Enhancement for Transmission over Wireless Access Networks." 8 * IEEE Journal on Selected Areas in Communication, 9 * Feb. 2003. 10 * See https://www.ie.cuhk.edu.hk/fileadmin/staff_upload/soung/Journal/J3.pdf 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <linux/skbuff.h> 16 #include <linux/inet_diag.h> 17 18 #include <net/tcp.h> 19 20 /* Default values of the Veno variables, in fixed-point representation 21 * with V_PARAM_SHIFT bits to the right of the binary point. 22 */ 23 #define V_PARAM_SHIFT 1 24 static const int beta = 3 << V_PARAM_SHIFT; 25 26 /* Veno variables */ 27 struct veno { 28 u8 doing_veno_now; /* if true, do veno for this rtt */ 29 u16 cntrtt; /* # of rtts measured within last rtt */ 30 u32 minrtt; /* min of rtts measured within last rtt (in usec) */ 31 u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */ 32 u32 inc; /* decide whether to increase cwnd */ 33 u32 diff; /* calculate the diff rate */ 34 }; 35 36 /* There are several situations when we must "re-start" Veno: 37 * 38 * o when a connection is established 39 * o after an RTO 40 * o after fast recovery 41 * o when we send a packet and there is no outstanding 42 * unacknowledged data (restarting an idle connection) 43 * 44 */ 45 static inline void veno_enable(struct sock *sk) 46 { 47 struct veno *veno = inet_csk_ca(sk); 48 49 /* turn on Veno */ 50 veno->doing_veno_now = 1; 51 52 veno->minrtt = 0x7fffffff; 53 } 54 55 static inline void veno_disable(struct sock *sk) 56 { 57 struct veno *veno = inet_csk_ca(sk); 58 59 /* turn off Veno */ 60 veno->doing_veno_now = 0; 61 } 62 63 static void tcp_veno_init(struct sock *sk) 64 { 65 struct veno *veno = inet_csk_ca(sk); 66 67 veno->basertt = 0x7fffffff; 68 veno->inc = 1; 69 veno_enable(sk); 70 } 71 72 /* Do rtt sampling needed for Veno. */ 73 static void tcp_veno_pkts_acked(struct sock *sk, 74 const struct ack_sample *sample) 75 { 76 struct veno *veno = inet_csk_ca(sk); 77 u32 vrtt; 78 79 if (sample->rtt_us < 0) 80 return; 81 82 /* Never allow zero rtt or baseRTT */ 83 vrtt = sample->rtt_us + 1; 84 85 /* Filter to find propagation delay: */ 86 if (vrtt < veno->basertt) 87 veno->basertt = vrtt; 88 89 /* Find the min rtt during the last rtt to find 90 * the current prop. delay + queuing delay: 91 */ 92 veno->minrtt = min(veno->minrtt, vrtt); 93 veno->cntrtt++; 94 } 95 96 static void tcp_veno_state(struct sock *sk, u8 ca_state) 97 { 98 if (ca_state == TCP_CA_Open) 99 veno_enable(sk); 100 else 101 veno_disable(sk); 102 } 103 104 /* 105 * If the connection is idle and we are restarting, 106 * then we don't want to do any Veno calculations 107 * until we get fresh rtt samples. So when we 108 * restart, we reset our Veno state to a clean 109 * state. After we get acks for this flight of 110 * packets, _then_ we can make Veno calculations 111 * again. 112 */ 113 static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event) 114 { 115 if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START) 116 tcp_veno_init(sk); 117 } 118 119 static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) 120 { 121 struct tcp_sock *tp = tcp_sk(sk); 122 struct veno *veno = inet_csk_ca(sk); 123 124 if (!veno->doing_veno_now) { 125 tcp_reno_cong_avoid(sk, ack, acked); 126 return; 127 } 128 129 /* limited by applications */ 130 if (!tcp_is_cwnd_limited(sk)) 131 return; 132 133 /* We do the Veno calculations only if we got enough rtt samples */ 134 if (veno->cntrtt <= 2) { 135 /* We don't have enough rtt samples to do the Veno 136 * calculation, so we'll behave like Reno. 137 */ 138 tcp_reno_cong_avoid(sk, ack, acked); 139 } else { 140 u64 target_cwnd; 141 u32 rtt; 142 143 /* We have enough rtt samples, so, using the Veno 144 * algorithm, we determine the state of the network. 145 */ 146 147 rtt = veno->minrtt; 148 149 target_cwnd = (u64)tcp_snd_cwnd(tp) * veno->basertt; 150 target_cwnd <<= V_PARAM_SHIFT; 151 do_div(target_cwnd, rtt); 152 153 veno->diff = (tcp_snd_cwnd(tp) << V_PARAM_SHIFT) - target_cwnd; 154 155 if (tcp_in_slow_start(tp)) { 156 /* Slow start. */ 157 acked = tcp_slow_start(tp, acked); 158 if (!acked) 159 goto done; 160 } 161 162 /* Congestion avoidance. */ 163 if (veno->diff < beta) { 164 /* In the "non-congestive state", increase cwnd 165 * every rtt. 166 */ 167 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); 168 } else { 169 /* In the "congestive state", increase cwnd 170 * every other rtt. 171 */ 172 if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) { 173 if (veno->inc && 174 tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) { 175 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); 176 veno->inc = 0; 177 } else 178 veno->inc = 1; 179 tp->snd_cwnd_cnt = 0; 180 } else 181 tp->snd_cwnd_cnt += acked; 182 } 183 done: 184 if (tcp_snd_cwnd(tp) < 2) 185 tcp_snd_cwnd_set(tp, 2); 186 else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp) 187 tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp); 188 } 189 /* Wipe the slate clean for the next rtt. */ 190 /* veno->cntrtt = 0; */ 191 veno->minrtt = 0x7fffffff; 192 } 193 194 /* Veno MD phase */ 195 static u32 tcp_veno_ssthresh(struct sock *sk) 196 { 197 const struct tcp_sock *tp = tcp_sk(sk); 198 struct veno *veno = inet_csk_ca(sk); 199 200 if (veno->diff < beta) 201 /* in "non-congestive state", cut cwnd by 1/5 */ 202 return max(tcp_snd_cwnd(tp) * 4 / 5, 2U); 203 else 204 /* in "congestive state", cut cwnd by 1/2 */ 205 return max(tcp_snd_cwnd(tp) >> 1U, 2U); 206 } 207 208 static struct tcp_congestion_ops tcp_veno __read_mostly = { 209 .init = tcp_veno_init, 210 .ssthresh = tcp_veno_ssthresh, 211 .undo_cwnd = tcp_reno_undo_cwnd, 212 .cong_avoid = tcp_veno_cong_avoid, 213 .pkts_acked = tcp_veno_pkts_acked, 214 .set_state = tcp_veno_state, 215 .cwnd_event = tcp_veno_cwnd_event, 216 217 .owner = THIS_MODULE, 218 .name = "veno", 219 }; 220 221 static int __init tcp_veno_register(void) 222 { 223 BUILD_BUG_ON(sizeof(struct veno) > ICSK_CA_PRIV_SIZE); 224 tcp_register_congestion_control(&tcp_veno); 225 return 0; 226 } 227 228 static void __exit tcp_veno_unregister(void) 229 { 230 tcp_unregister_congestion_control(&tcp_veno); 231 } 232 233 module_init(tcp_veno_register); 234 module_exit(tcp_veno_unregister); 235 236 MODULE_AUTHOR("Bin Zhou, Cheng Peng Fu"); 237 MODULE_LICENSE("GPL"); 238 MODULE_DESCRIPTION("TCP Veno"); 239
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.