1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * TCP HYBLA 3 * TCP HYBLA 4 * 4 * 5 * TCP-HYBLA Congestion control algorithm, bas 5 * TCP-HYBLA Congestion control algorithm, based on: 6 * C.Caini, R.Firrincieli, "TCP-Hybla: A TCP 6 * C.Caini, R.Firrincieli, "TCP-Hybla: A TCP Enhancement 7 * for Heterogeneous Networks", 7 * for Heterogeneous Networks", 8 * International Journal on satellite Commun 8 * International Journal on satellite Communications, 9 * Septemb 9 * September 2004 10 * Daniele Lacamera 10 * Daniele Lacamera 11 * root at danielinux.net 11 * root at danielinux.net 12 */ 12 */ 13 13 14 #include <linux/module.h> 14 #include <linux/module.h> 15 #include <net/tcp.h> 15 #include <net/tcp.h> 16 16 17 /* Tcp Hybla structure. */ 17 /* Tcp Hybla structure. */ 18 struct hybla { 18 struct hybla { 19 bool hybla_en; 19 bool hybla_en; 20 u32 snd_cwnd_cents; /* Keeps increme 20 u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */ 21 u32 rho; /* Rho parameter 21 u32 rho; /* Rho parameter, integer part */ 22 u32 rho2; /* Rho * Rho, in 22 u32 rho2; /* Rho * Rho, integer part */ 23 u32 rho_3ls; /* Rho parameter 23 u32 rho_3ls; /* Rho parameter, <<3 */ 24 u32 rho2_7ls; /* Rho^2, <<7 24 u32 rho2_7ls; /* Rho^2, <<7 */ 25 u32 minrtt_us; /* Minimum smoot 25 u32 minrtt_us; /* Minimum smoothed round trip time value seen */ 26 }; 26 }; 27 27 28 /* Hybla reference round trip time (default= 1 28 /* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */ 29 static int rtt0 = 25; 29 static int rtt0 = 25; 30 module_param(rtt0, int, 0644); 30 module_param(rtt0, int, 0644); 31 MODULE_PARM_DESC(rtt0, "reference rout trip ti 31 MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)"); 32 32 33 /* This is called to refresh values for hybla 33 /* This is called to refresh values for hybla parameters */ 34 static inline void hybla_recalc_param (struct 34 static inline void hybla_recalc_param (struct sock *sk) 35 { 35 { 36 struct hybla *ca = inet_csk_ca(sk); 36 struct hybla *ca = inet_csk_ca(sk); 37 37 38 ca->rho_3ls = max_t(u32, 38 ca->rho_3ls = max_t(u32, 39 tcp_sk(sk)->srtt_u 39 tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC), 40 8U); 40 8U); 41 ca->rho = ca->rho_3ls >> 3; 41 ca->rho = ca->rho_3ls >> 3; 42 ca->rho2_7ls = (ca->rho_3ls * ca->rho_ 42 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; 43 ca->rho2 = ca->rho2_7ls >> 7; 43 ca->rho2 = ca->rho2_7ls >> 7; 44 } 44 } 45 45 46 static void hybla_init(struct sock *sk) 46 static void hybla_init(struct sock *sk) 47 { 47 { 48 struct tcp_sock *tp = tcp_sk(sk); 48 struct tcp_sock *tp = tcp_sk(sk); 49 struct hybla *ca = inet_csk_ca(sk); 49 struct hybla *ca = inet_csk_ca(sk); 50 50 51 ca->rho = 0; 51 ca->rho = 0; 52 ca->rho2 = 0; 52 ca->rho2 = 0; 53 ca->rho_3ls = 0; 53 ca->rho_3ls = 0; 54 ca->rho2_7ls = 0; 54 ca->rho2_7ls = 0; 55 ca->snd_cwnd_cents = 0; 55 ca->snd_cwnd_cents = 0; 56 ca->hybla_en = true; 56 ca->hybla_en = true; 57 tcp_snd_cwnd_set(tp, 2); !! 57 tp->snd_cwnd = 2; 58 tp->snd_cwnd_clamp = 65535; 58 tp->snd_cwnd_clamp = 65535; 59 59 60 /* 1st Rho measurement based on initia 60 /* 1st Rho measurement based on initial srtt */ 61 hybla_recalc_param(sk); 61 hybla_recalc_param(sk); 62 62 63 /* set minimum rtt as this is the 1st 63 /* set minimum rtt as this is the 1st ever seen */ 64 ca->minrtt_us = tp->srtt_us; 64 ca->minrtt_us = tp->srtt_us; 65 tcp_snd_cwnd_set(tp, ca->rho); !! 65 tp->snd_cwnd = ca->rho; 66 } 66 } 67 67 68 static void hybla_state(struct sock *sk, u8 ca 68 static void hybla_state(struct sock *sk, u8 ca_state) 69 { 69 { 70 struct hybla *ca = inet_csk_ca(sk); 70 struct hybla *ca = inet_csk_ca(sk); 71 71 72 ca->hybla_en = (ca_state == TCP_CA_Ope 72 ca->hybla_en = (ca_state == TCP_CA_Open); 73 } 73 } 74 74 75 static inline u32 hybla_fraction(u32 odds) 75 static inline u32 hybla_fraction(u32 odds) 76 { 76 { 77 static const u32 fractions[] = { 77 static const u32 fractions[] = { 78 128, 139, 152, 165, 181, 197, 78 128, 139, 152, 165, 181, 197, 215, 234, 79 }; 79 }; 80 80 81 return (odds < ARRAY_SIZE(fractions)) 81 return (odds < ARRAY_SIZE(fractions)) ? fractions[odds] : 128; 82 } 82 } 83 83 84 /* TCP Hybla main routine. 84 /* TCP Hybla main routine. 85 * This is the algorithm behavior: 85 * This is the algorithm behavior: 86 * o Recalc Hybla parameters if min_rtt ha 86 * o Recalc Hybla parameters if min_rtt has changed 87 * o Give cwnd a new value based on the mo 87 * o Give cwnd a new value based on the model proposed 88 * o remember increments <1 88 * o remember increments <1 89 */ 89 */ 90 static void hybla_cong_avoid(struct sock *sk, 90 static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked) 91 { 91 { 92 struct tcp_sock *tp = tcp_sk(sk); 92 struct tcp_sock *tp = tcp_sk(sk); 93 struct hybla *ca = inet_csk_ca(sk); 93 struct hybla *ca = inet_csk_ca(sk); 94 u32 increment, odd, rho_fractions; 94 u32 increment, odd, rho_fractions; 95 int is_slowstart = 0; 95 int is_slowstart = 0; 96 96 97 /* Recalculate rho only if this srtt 97 /* Recalculate rho only if this srtt is the lowest */ 98 if (tp->srtt_us < ca->minrtt_us) { 98 if (tp->srtt_us < ca->minrtt_us) { 99 hybla_recalc_param(sk); 99 hybla_recalc_param(sk); 100 ca->minrtt_us = tp->srtt_us; 100 ca->minrtt_us = tp->srtt_us; 101 } 101 } 102 102 103 if (!tcp_is_cwnd_limited(sk)) 103 if (!tcp_is_cwnd_limited(sk)) 104 return; 104 return; 105 105 106 if (!ca->hybla_en) { 106 if (!ca->hybla_en) { 107 tcp_reno_cong_avoid(sk, ack, a 107 tcp_reno_cong_avoid(sk, ack, acked); 108 return; 108 return; 109 } 109 } 110 110 111 if (ca->rho == 0) 111 if (ca->rho == 0) 112 hybla_recalc_param(sk); 112 hybla_recalc_param(sk); 113 113 114 rho_fractions = ca->rho_3ls - (ca->rho 114 rho_fractions = ca->rho_3ls - (ca->rho << 3); 115 115 116 if (tcp_in_slow_start(tp)) { 116 if (tcp_in_slow_start(tp)) { 117 /* 117 /* 118 * slow start 118 * slow start 119 * INC = 2^RHO - 1 119 * INC = 2^RHO - 1 120 * This is done by splitting t 120 * This is done by splitting the rho parameter 121 * into 2 parts: an integer pa 121 * into 2 parts: an integer part and a fraction part. 122 * Inrement<<7 is estimated by 122 * Inrement<<7 is estimated by doing: 123 * [2^(int+fract)] 123 * [2^(int+fract)]<<7 124 * that is equal to: 124 * that is equal to: 125 * (2^int) * [(2 125 * (2^int) * [(2^fract) <<7] 126 * 2^int is straightly compute 126 * 2^int is straightly computed as 1<<int, 127 * while we will use hybla_slo 127 * while we will use hybla_slowstart_fraction_increment() to 128 * calculate 2^fract in a <<7 128 * calculate 2^fract in a <<7 value. 129 */ 129 */ 130 is_slowstart = 1; 130 is_slowstart = 1; 131 increment = ((1 << min(ca->rho 131 increment = ((1 << min(ca->rho, 16U)) * 132 hybla_fraction(rho_fra 132 hybla_fraction(rho_fractions)) - 128; 133 } else { 133 } else { 134 /* 134 /* 135 * congestion avoidance 135 * congestion avoidance 136 * INC = RHO^2 / W 136 * INC = RHO^2 / W 137 * as long as increment is est 137 * as long as increment is estimated as (rho<<7)/window 138 * it already is <<7 and we ca 138 * it already is <<7 and we can easily count its fractions. 139 */ 139 */ 140 increment = ca->rho2_7ls / tcp !! 140 increment = ca->rho2_7ls / tp->snd_cwnd; 141 if (increment < 128) 141 if (increment < 128) 142 tp->snd_cwnd_cnt++; 142 tp->snd_cwnd_cnt++; 143 } 143 } 144 144 145 odd = increment % 128; 145 odd = increment % 128; 146 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) !! 146 tp->snd_cwnd += increment >> 7; 147 ca->snd_cwnd_cents += odd; 147 ca->snd_cwnd_cents += odd; 148 148 149 /* check when fractions goes >=128 and 149 /* check when fractions goes >=128 and increase cwnd by 1. */ 150 while (ca->snd_cwnd_cents >= 128) { 150 while (ca->snd_cwnd_cents >= 128) { 151 tcp_snd_cwnd_set(tp, tcp_snd_c !! 151 tp->snd_cwnd++; 152 ca->snd_cwnd_cents -= 128; 152 ca->snd_cwnd_cents -= 128; 153 tp->snd_cwnd_cnt = 0; 153 tp->snd_cwnd_cnt = 0; 154 } 154 } 155 /* check when cwnd has not been increm 155 /* check when cwnd has not been incremented for a while */ 156 if (increment == 0 && odd == 0 && tp-> !! 156 if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) { 157 tcp_snd_cwnd_set(tp, tcp_snd_c !! 157 tp->snd_cwnd++; 158 tp->snd_cwnd_cnt = 0; 158 tp->snd_cwnd_cnt = 0; 159 } 159 } 160 /* clamp down slowstart cwnd to ssthre 160 /* clamp down slowstart cwnd to ssthresh value. */ 161 if (is_slowstart) 161 if (is_slowstart) 162 tcp_snd_cwnd_set(tp, min(tcp_s !! 162 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 163 163 164 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd( !! 164 tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); 165 } 165 } 166 166 167 static struct tcp_congestion_ops tcp_hybla __r 167 static struct tcp_congestion_ops tcp_hybla __read_mostly = { 168 .init = hybla_init, 168 .init = hybla_init, 169 .ssthresh = tcp_reno_ssthresh, 169 .ssthresh = tcp_reno_ssthresh, 170 .undo_cwnd = tcp_reno_undo_cwnd, 170 .undo_cwnd = tcp_reno_undo_cwnd, 171 .cong_avoid = hybla_cong_avoid, 171 .cong_avoid = hybla_cong_avoid, 172 .set_state = hybla_state, 172 .set_state = hybla_state, 173 173 174 .owner = THIS_MODULE, 174 .owner = THIS_MODULE, 175 .name = "hybla" 175 .name = "hybla" 176 }; 176 }; 177 177 178 static int __init hybla_register(void) 178 static int __init hybla_register(void) 179 { 179 { 180 BUILD_BUG_ON(sizeof(struct hybla) > IC 180 BUILD_BUG_ON(sizeof(struct hybla) > ICSK_CA_PRIV_SIZE); 181 return tcp_register_congestion_control 181 return tcp_register_congestion_control(&tcp_hybla); 182 } 182 } 183 183 184 static void __exit hybla_unregister(void) 184 static void __exit hybla_unregister(void) 185 { 185 { 186 tcp_unregister_congestion_control(&tcp 186 tcp_unregister_congestion_control(&tcp_hybla); 187 } 187 } 188 188 189 module_init(hybla_register); 189 module_init(hybla_register); 190 module_exit(hybla_unregister); 190 module_exit(hybla_unregister); 191 191 192 MODULE_AUTHOR("Daniele Lacamera"); 192 MODULE_AUTHOR("Daniele Lacamera"); 193 MODULE_LICENSE("GPL"); 193 MODULE_LICENSE("GPL"); 194 MODULE_DESCRIPTION("TCP Hybla"); 194 MODULE_DESCRIPTION("TCP Hybla"); 195 195
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.