1 // SPDX-License-Identifier: GPL-2.0-or-later 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 2 /* 3 * net/sched/sch_htb.c Hierarchical token buc 3 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version 4 * 4 * 5 * Authors: Martin Devera, <devik@cdi.cz> 5 * Authors: Martin Devera, <devik@cdi.cz> 6 * 6 * 7 * Credits (in time order) for older HTB versi 7 * Credits (in time order) for older HTB versions: 8 * Stef Coene <stef.coene@docum.o 8 * Stef Coene <stef.coene@docum.org> 9 * HTB support at LARTC m 9 * HTB support at LARTC mailing list 10 * Ondrej Kraus, <krauso@barr.cz> 10 * Ondrej Kraus, <krauso@barr.cz> 11 * found missing INIT_QDI 11 * found missing INIT_QDISC(htb) 12 * Vladimir Smelhaus, Aamer Akhte 12 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert 13 * helped a lot to locate 13 * helped a lot to locate nasty class stall bug 14 * Andi Kleen, Jamal Hadi, Bert H 14 * Andi Kleen, Jamal Hadi, Bert Hubert 15 * code review and helpfu 15 * code review and helpful comments on shaping 16 * Tomasz Wrona, <tw@eter.tym.pl> 16 * Tomasz Wrona, <tw@eter.tym.pl> 17 * created test case so t 17 * created test case so that I was able to fix nasty bug 18 * Wilfried Weissmann 18 * Wilfried Weissmann 19 * spotted bug in dequeue 19 * spotted bug in dequeue code and helped with fix 20 * Jiri Fojtasek 20 * Jiri Fojtasek 21 * fixed requeue routine 21 * fixed requeue routine 22 * and many others. thanks. 22 * and many others. thanks. 23 */ 23 */ 24 #include <linux/module.h> 24 #include <linux/module.h> 25 #include <linux/moduleparam.h> 25 #include <linux/moduleparam.h> 26 #include <linux/types.h> 26 #include <linux/types.h> 27 #include <linux/kernel.h> 27 #include <linux/kernel.h> 28 #include <linux/string.h> 28 #include <linux/string.h> 29 #include <linux/errno.h> 29 #include <linux/errno.h> 30 #include <linux/skbuff.h> 30 #include <linux/skbuff.h> 31 #include <linux/list.h> 31 #include <linux/list.h> 32 #include <linux/compiler.h> 32 #include <linux/compiler.h> 33 #include <linux/rbtree.h> 33 #include <linux/rbtree.h> 34 #include <linux/workqueue.h> 34 #include <linux/workqueue.h> 35 #include <linux/slab.h> 35 #include <linux/slab.h> 36 #include <net/netlink.h> 36 #include <net/netlink.h> 37 #include <net/sch_generic.h> 37 #include <net/sch_generic.h> 38 #include <net/pkt_sched.h> 38 #include <net/pkt_sched.h> 39 #include <net/pkt_cls.h> 39 #include <net/pkt_cls.h> 40 40 41 /* HTB algorithm. 41 /* HTB algorithm. 42 Author: devik@cdi.cz 42 Author: devik@cdi.cz 43 ========================================== 43 ======================================================================== 44 HTB is like TBF with multiple classes. It 44 HTB is like TBF with multiple classes. It is also similar to CBQ because 45 it allows to assign priority to each class 45 it allows to assign priority to each class in hierarchy. 46 In fact it is another implementation of Fl 46 In fact it is another implementation of Floyd's formal sharing. 47 47 48 Levels: 48 Levels: 49 Each class is assigned level. Leaf has ALW 49 Each class is assigned level. Leaf has ALWAYS level 0 and root 50 classes have level TC_HTB_MAXDEPTH-1. Inte 50 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level 51 one less than their parent. 51 one less than their parent. 52 */ 52 */ 53 53 54 static int htb_hysteresis __read_mostly = 0; / 54 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */ 55 #define HTB_VER 0x30011 /* major must 55 #define HTB_VER 0x30011 /* major must be matched with number supplied by TC as version */ 56 56 57 #if HTB_VER >> 16 != TC_HTB_PROTOVER 57 #if HTB_VER >> 16 != TC_HTB_PROTOVER 58 #error "Mismatched sch_htb.c and pkt_sch.h" 58 #error "Mismatched sch_htb.c and pkt_sch.h" 59 #endif 59 #endif 60 60 61 /* Module parameter and sysfs export */ 61 /* Module parameter and sysfs export */ 62 module_param (htb_hysteresis, int, 0640); 62 module_param (htb_hysteresis, int, 0640); 63 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis m 63 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate"); 64 64 65 static int htb_rate_est = 0; /* htb classes ha 65 static int htb_rate_est = 0; /* htb classes have a default rate estimator */ 66 module_param(htb_rate_est, int, 0640); 66 module_param(htb_rate_est, int, 0640); 67 MODULE_PARM_DESC(htb_rate_est, "setup a defaul 67 MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes"); 68 68 69 /* used internaly to keep status of single cla 69 /* used internaly to keep status of single class */ 70 enum htb_cmode { 70 enum htb_cmode { 71 HTB_CANT_SEND, /* class can't 71 HTB_CANT_SEND, /* class can't send and can't borrow */ 72 HTB_MAY_BORROW, /* class can't 72 HTB_MAY_BORROW, /* class can't send but may borrow */ 73 HTB_CAN_SEND /* class can s 73 HTB_CAN_SEND /* class can send */ 74 }; 74 }; 75 75 76 struct htb_prio { 76 struct htb_prio { 77 union { 77 union { 78 struct rb_root row; 78 struct rb_root row; 79 struct rb_root feed; 79 struct rb_root feed; 80 }; 80 }; 81 struct rb_node *ptr; 81 struct rb_node *ptr; 82 /* When class changes from state 1->2 82 /* When class changes from state 1->2 and disconnects from 83 * parent's feed then we lost ptr valu 83 * parent's feed then we lost ptr value and start from the 84 * first child again. Here we store cl 84 * first child again. Here we store classid of the 85 * last valid ptr (used when ptr is NU 85 * last valid ptr (used when ptr is NULL). 86 */ 86 */ 87 u32 last_ptr_id; 87 u32 last_ptr_id; 88 }; 88 }; 89 89 90 /* interior & leaf nodes; props specific to le 90 /* interior & leaf nodes; props specific to leaves are marked L: 91 * To reduce false sharing, place mostly read 91 * To reduce false sharing, place mostly read fields at beginning, 92 * and mostly written ones at the end. 92 * and mostly written ones at the end. 93 */ 93 */ 94 struct htb_class { 94 struct htb_class { 95 struct Qdisc_class_common common; 95 struct Qdisc_class_common common; 96 struct psched_ratecfg rate; 96 struct psched_ratecfg rate; 97 struct psched_ratecfg ceil; 97 struct psched_ratecfg ceil; 98 s64 buffer, cbuffe 98 s64 buffer, cbuffer;/* token bucket depth/rate */ 99 s64 mbuffer; 99 s64 mbuffer; /* max wait time */ 100 u32 prio; 100 u32 prio; /* these two are used only by leaves... */ 101 int quantum; 101 int quantum; /* but stored for parent-to-leaf return */ 102 102 103 struct tcf_proto __rcu *filter_list; 103 struct tcf_proto __rcu *filter_list; /* class attached filters */ 104 struct tcf_block *block; 104 struct tcf_block *block; 105 105 106 int level; 106 int level; /* our level (see above) */ 107 unsigned int children; 107 unsigned int children; 108 struct htb_class *parent; 108 struct htb_class *parent; /* parent class */ 109 109 110 struct net_rate_estimator __rcu *rate_ 110 struct net_rate_estimator __rcu *rate_est; 111 111 112 /* 112 /* 113 * Written often fields 113 * Written often fields 114 */ 114 */ 115 struct gnet_stats_basic_sync bstats; 115 struct gnet_stats_basic_sync bstats; 116 struct gnet_stats_basic_sync bstats_bi 116 struct gnet_stats_basic_sync bstats_bias; 117 struct tc_htb_xstats xstats; /* our 117 struct tc_htb_xstats xstats; /* our special stats */ 118 118 119 /* token bucket parameters */ 119 /* token bucket parameters */ 120 s64 tokens, ctoken 120 s64 tokens, ctokens;/* current number of tokens */ 121 s64 t_c; 121 s64 t_c; /* checkpoint time */ 122 122 123 union { 123 union { 124 struct htb_class_leaf { 124 struct htb_class_leaf { 125 int defici 125 int deficit[TC_HTB_MAXDEPTH]; 126 struct Qdisc *q; 126 struct Qdisc *q; 127 struct netdev_queue *o 127 struct netdev_queue *offload_queue; 128 } leaf; 128 } leaf; 129 struct htb_class_inner { 129 struct htb_class_inner { 130 struct htb_prio clprio 130 struct htb_prio clprio[TC_HTB_NUMPRIO]; 131 } inner; 131 } inner; 132 }; 132 }; 133 s64 pq_key; 133 s64 pq_key; 134 134 135 int prio_activity; 135 int prio_activity; /* for which prios are we active */ 136 enum htb_cmode cmode; 136 enum htb_cmode cmode; /* current mode of the class */ 137 struct rb_node pq_node; 137 struct rb_node pq_node; /* node for event queue */ 138 struct rb_node node[TC_HTB_NU 138 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ 139 139 140 unsigned int drops ____cacheline_align 140 unsigned int drops ____cacheline_aligned_in_smp; 141 unsigned int overlimits; 141 unsigned int overlimits; 142 }; 142 }; 143 143 144 struct htb_level { 144 struct htb_level { 145 struct rb_root wait_pq; 145 struct rb_root wait_pq; 146 struct htb_prio hprio[TC_HTB_NUMPRIO]; 146 struct htb_prio hprio[TC_HTB_NUMPRIO]; 147 }; 147 }; 148 148 149 struct htb_sched { 149 struct htb_sched { 150 struct Qdisc_class_hash clhash; 150 struct Qdisc_class_hash clhash; 151 int defcls; 151 int defcls; /* class where unclassified flows go to */ 152 int rate2quantum; 152 int rate2quantum; /* quant = rate / rate2quantum */ 153 153 154 /* filters for qdisc itself */ 154 /* filters for qdisc itself */ 155 struct tcf_proto __rcu *filter_list; 155 struct tcf_proto __rcu *filter_list; 156 struct tcf_block *block; 156 struct tcf_block *block; 157 157 158 #define HTB_WARN_TOOMANYEVENTS 0x1 158 #define HTB_WARN_TOOMANYEVENTS 0x1 159 unsigned int warned; /* onl 159 unsigned int warned; /* only one warning */ 160 int direct_qlen; 160 int direct_qlen; 161 struct work_struct work; 161 struct work_struct work; 162 162 163 /* non shaped skbs; let them go direct 163 /* non shaped skbs; let them go directly thru */ 164 struct qdisc_skb_head direct_queue; 164 struct qdisc_skb_head direct_queue; 165 u32 direct_pkts; 165 u32 direct_pkts; 166 u32 overlimits; 166 u32 overlimits; 167 167 168 struct qdisc_watchdog watchdog; 168 struct qdisc_watchdog watchdog; 169 169 170 s64 now; /* cac 170 s64 now; /* cached dequeue time */ 171 171 172 /* time of nearest event per level (ro 172 /* time of nearest event per level (row) */ 173 s64 near_ev_cache[ 173 s64 near_ev_cache[TC_HTB_MAXDEPTH]; 174 174 175 int row_mask[TC_HT 175 int row_mask[TC_HTB_MAXDEPTH]; 176 176 177 struct htb_level hlevel[TC_HTB_ 177 struct htb_level hlevel[TC_HTB_MAXDEPTH]; 178 178 179 struct Qdisc **direct_qdisc 179 struct Qdisc **direct_qdiscs; 180 unsigned int num_direct_qdi 180 unsigned int num_direct_qdiscs; 181 181 182 bool offload; 182 bool offload; 183 }; 183 }; 184 184 185 /* find class in global hash table using given 185 /* find class in global hash table using given handle */ 186 static inline struct htb_class *htb_find(u32 h 186 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) 187 { 187 { 188 struct htb_sched *q = qdisc_priv(sch); 188 struct htb_sched *q = qdisc_priv(sch); 189 struct Qdisc_class_common *clc; 189 struct Qdisc_class_common *clc; 190 190 191 clc = qdisc_class_find(&q->clhash, han 191 clc = qdisc_class_find(&q->clhash, handle); 192 if (clc == NULL) 192 if (clc == NULL) 193 return NULL; 193 return NULL; 194 return container_of(clc, struct htb_cl 194 return container_of(clc, struct htb_class, common); 195 } 195 } 196 196 197 static unsigned long htb_search(struct Qdisc * 197 static unsigned long htb_search(struct Qdisc *sch, u32 handle) 198 { 198 { 199 return (unsigned long)htb_find(handle, 199 return (unsigned long)htb_find(handle, sch); 200 } 200 } 201 201 202 #define HTB_DIRECT ((struct htb_class *)-1L) 202 #define HTB_DIRECT ((struct htb_class *)-1L) 203 203 204 /** 204 /** 205 * htb_classify - classify a packet into class 205 * htb_classify - classify a packet into class 206 * @skb: the socket buffer 206 * @skb: the socket buffer 207 * @sch: the active queue discipline 207 * @sch: the active queue discipline 208 * @qerr: pointer for returned status code 208 * @qerr: pointer for returned status code 209 * 209 * 210 * It returns NULL if the packet should be dro 210 * It returns NULL if the packet should be dropped or -1 if the packet 211 * should be passed directly thru. In all othe 211 * should be passed directly thru. In all other cases leaf class is returned. 212 * We allow direct class selection by classid 212 * We allow direct class selection by classid in priority. The we examine 213 * filters in qdisc and in inner nodes (if hig 213 * filters in qdisc and in inner nodes (if higher filter points to the inner 214 * node). If we end up with classid MAJOR:0 we 214 * node). If we end up with classid MAJOR:0 we enqueue the skb into special 215 * internal fifo (direct). These packets then 215 * internal fifo (direct). These packets then go directly thru. If we still 216 * have no valid leaf we try to use MAJOR:defa 216 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful 217 * then finish and return direct queue. 217 * then finish and return direct queue. 218 */ 218 */ 219 static struct htb_class *htb_classify(struct s 219 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, 220 int *qer 220 int *qerr) 221 { 221 { 222 struct htb_sched *q = qdisc_priv(sch); 222 struct htb_sched *q = qdisc_priv(sch); 223 struct htb_class *cl; 223 struct htb_class *cl; 224 struct tcf_result res; 224 struct tcf_result res; 225 struct tcf_proto *tcf; 225 struct tcf_proto *tcf; 226 int result; 226 int result; 227 227 228 /* allow to select class by setting sk 228 /* allow to select class by setting skb->priority to valid classid; 229 * note that nfmark can be used too by 229 * note that nfmark can be used too by attaching filter fw with no 230 * rules in it 230 * rules in it 231 */ 231 */ 232 if (skb->priority == sch->handle) 232 if (skb->priority == sch->handle) 233 return HTB_DIRECT; /* X:0 233 return HTB_DIRECT; /* X:0 (direct flow) selected */ 234 cl = htb_find(skb->priority, sch); 234 cl = htb_find(skb->priority, sch); 235 if (cl) { 235 if (cl) { 236 if (cl->level == 0) 236 if (cl->level == 0) 237 return cl; 237 return cl; 238 /* Start with inner filter cha 238 /* Start with inner filter chain if a non-leaf class is selected */ 239 tcf = rcu_dereference_bh(cl->f 239 tcf = rcu_dereference_bh(cl->filter_list); 240 } else { 240 } else { 241 tcf = rcu_dereference_bh(q->fi 241 tcf = rcu_dereference_bh(q->filter_list); 242 } 242 } 243 243 244 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_ 244 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 245 while (tcf && (result = tcf_classify(s 245 while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) { 246 #ifdef CONFIG_NET_CLS_ACT 246 #ifdef CONFIG_NET_CLS_ACT 247 switch (result) { 247 switch (result) { 248 case TC_ACT_QUEUED: 248 case TC_ACT_QUEUED: 249 case TC_ACT_STOLEN: 249 case TC_ACT_STOLEN: 250 case TC_ACT_TRAP: 250 case TC_ACT_TRAP: 251 *qerr = NET_XMIT_SUCCE 251 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 252 fallthrough; 252 fallthrough; 253 case TC_ACT_SHOT: 253 case TC_ACT_SHOT: 254 return NULL; 254 return NULL; 255 } 255 } 256 #endif 256 #endif 257 cl = (void *)res.class; 257 cl = (void *)res.class; 258 if (!cl) { 258 if (!cl) { 259 if (res.classid == sch 259 if (res.classid == sch->handle) 260 return HTB_DIR 260 return HTB_DIRECT; /* X:0 (direct flow) */ 261 cl = htb_find(res.clas 261 cl = htb_find(res.classid, sch); 262 if (!cl) 262 if (!cl) 263 break; /* fil 263 break; /* filter selected invalid classid */ 264 } 264 } 265 if (!cl->level) 265 if (!cl->level) 266 return cl; /* we 266 return cl; /* we hit leaf; return it */ 267 267 268 /* we have got inner class; ap 268 /* we have got inner class; apply inner filter chain */ 269 tcf = rcu_dereference_bh(cl->f 269 tcf = rcu_dereference_bh(cl->filter_list); 270 } 270 } 271 /* classification failed; try to use d 271 /* classification failed; try to use default class */ 272 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch-> 272 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); 273 if (!cl || cl->level) 273 if (!cl || cl->level) 274 return HTB_DIRECT; /* bad 274 return HTB_DIRECT; /* bad default .. this is safe bet */ 275 return cl; 275 return cl; 276 } 276 } 277 277 278 /** 278 /** 279 * htb_add_to_id_tree - adds class to the roun 279 * htb_add_to_id_tree - adds class to the round robin list 280 * @root: the root of the tree 280 * @root: the root of the tree 281 * @cl: the class to add 281 * @cl: the class to add 282 * @prio: the give prio in class 282 * @prio: the give prio in class 283 * 283 * 284 * Routine adds class to the list (actually tr 284 * Routine adds class to the list (actually tree) sorted by classid. 285 * Make sure that class is not already on such 285 * Make sure that class is not already on such list for given prio. 286 */ 286 */ 287 static void htb_add_to_id_tree(struct rb_root 287 static void htb_add_to_id_tree(struct rb_root *root, 288 struct htb_clas 288 struct htb_class *cl, int prio) 289 { 289 { 290 struct rb_node **p = &root->rb_node, * 290 struct rb_node **p = &root->rb_node, *parent = NULL; 291 291 292 while (*p) { 292 while (*p) { 293 struct htb_class *c; 293 struct htb_class *c; 294 parent = *p; 294 parent = *p; 295 c = rb_entry(parent, struct ht 295 c = rb_entry(parent, struct htb_class, node[prio]); 296 296 297 if (cl->common.classid > c->co 297 if (cl->common.classid > c->common.classid) 298 p = &parent->rb_right; 298 p = &parent->rb_right; 299 else 299 else 300 p = &parent->rb_left; 300 p = &parent->rb_left; 301 } 301 } 302 rb_link_node(&cl->node[prio], parent, 302 rb_link_node(&cl->node[prio], parent, p); 303 rb_insert_color(&cl->node[prio], root) 303 rb_insert_color(&cl->node[prio], root); 304 } 304 } 305 305 306 /** 306 /** 307 * htb_add_to_wait_tree - adds class to the ev 307 * htb_add_to_wait_tree - adds class to the event queue with delay 308 * @q: the priority event queue 308 * @q: the priority event queue 309 * @cl: the class to add 309 * @cl: the class to add 310 * @delay: delay in microseconds 310 * @delay: delay in microseconds 311 * 311 * 312 * The class is added to priority event queue 312 * The class is added to priority event queue to indicate that class will 313 * change its mode in cl->pq_key microseconds. 313 * change its mode in cl->pq_key microseconds. Make sure that class is not 314 * already in the queue. 314 * already in the queue. 315 */ 315 */ 316 static void htb_add_to_wait_tree(struct htb_sc 316 static void htb_add_to_wait_tree(struct htb_sched *q, 317 struct htb_cl 317 struct htb_class *cl, s64 delay) 318 { 318 { 319 struct rb_node **p = &q->hlevel[cl->le 319 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; 320 320 321 cl->pq_key = q->now + delay; 321 cl->pq_key = q->now + delay; 322 if (cl->pq_key == q->now) 322 if (cl->pq_key == q->now) 323 cl->pq_key++; 323 cl->pq_key++; 324 324 325 /* update the nearest event cache */ 325 /* update the nearest event cache */ 326 if (q->near_ev_cache[cl->level] > cl-> 326 if (q->near_ev_cache[cl->level] > cl->pq_key) 327 q->near_ev_cache[cl->level] = 327 q->near_ev_cache[cl->level] = cl->pq_key; 328 328 329 while (*p) { 329 while (*p) { 330 struct htb_class *c; 330 struct htb_class *c; 331 parent = *p; 331 parent = *p; 332 c = rb_entry(parent, struct ht 332 c = rb_entry(parent, struct htb_class, pq_node); 333 if (cl->pq_key >= c->pq_key) 333 if (cl->pq_key >= c->pq_key) 334 p = &parent->rb_right; 334 p = &parent->rb_right; 335 else 335 else 336 p = &parent->rb_left; 336 p = &parent->rb_left; 337 } 337 } 338 rb_link_node(&cl->pq_node, parent, p); 338 rb_link_node(&cl->pq_node, parent, p); 339 rb_insert_color(&cl->pq_node, &q->hlev 339 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); 340 } 340 } 341 341 342 /** 342 /** 343 * htb_next_rb_node - finds next node in binar 343 * htb_next_rb_node - finds next node in binary tree 344 * @n: the current node in binary tree 344 * @n: the current node in binary tree 345 * 345 * 346 * When we are past last key we return NULL. 346 * When we are past last key we return NULL. 347 * Average complexity is 2 steps per call. 347 * Average complexity is 2 steps per call. 348 */ 348 */ 349 static inline void htb_next_rb_node(struct rb_ 349 static inline void htb_next_rb_node(struct rb_node **n) 350 { 350 { 351 *n = rb_next(*n); 351 *n = rb_next(*n); 352 } 352 } 353 353 354 /** 354 /** 355 * htb_add_class_to_row - add class to its row 355 * htb_add_class_to_row - add class to its row 356 * @q: the priority event queue 356 * @q: the priority event queue 357 * @cl: the class to add 357 * @cl: the class to add 358 * @mask: the given priorities in class in bit 358 * @mask: the given priorities in class in bitmap 359 * 359 * 360 * The class is added to row at priorities mar 360 * The class is added to row at priorities marked in mask. 361 * It does nothing if mask == 0. 361 * It does nothing if mask == 0. 362 */ 362 */ 363 static inline void htb_add_class_to_row(struct 363 static inline void htb_add_class_to_row(struct htb_sched *q, 364 struct 364 struct htb_class *cl, int mask) 365 { 365 { 366 q->row_mask[cl->level] |= mask; 366 q->row_mask[cl->level] |= mask; 367 while (mask) { 367 while (mask) { 368 int prio = ffz(~mask); 368 int prio = ffz(~mask); 369 mask &= ~(1 << prio); 369 mask &= ~(1 << prio); 370 htb_add_to_id_tree(&q->hlevel[ 370 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); 371 } 371 } 372 } 372 } 373 373 374 /* If this triggers, it is a bug in this code, 374 /* If this triggers, it is a bug in this code, but it need not be fatal */ 375 static void htb_safe_rb_erase(struct rb_node * 375 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root) 376 { 376 { 377 if (RB_EMPTY_NODE(rb)) { 377 if (RB_EMPTY_NODE(rb)) { 378 WARN_ON(1); 378 WARN_ON(1); 379 } else { 379 } else { 380 rb_erase(rb, root); 380 rb_erase(rb, root); 381 RB_CLEAR_NODE(rb); 381 RB_CLEAR_NODE(rb); 382 } 382 } 383 } 383 } 384 384 385 385 386 /** 386 /** 387 * htb_remove_class_from_row - removes class f 387 * htb_remove_class_from_row - removes class from its row 388 * @q: the priority event queue 388 * @q: the priority event queue 389 * @cl: the class to add 389 * @cl: the class to add 390 * @mask: the given priorities in class in bit 390 * @mask: the given priorities in class in bitmap 391 * 391 * 392 * The class is removed from row at priorities 392 * The class is removed from row at priorities marked in mask. 393 * It does nothing if mask == 0. 393 * It does nothing if mask == 0. 394 */ 394 */ 395 static inline void htb_remove_class_from_row(s 395 static inline void htb_remove_class_from_row(struct htb_sched *q, 396 396 struct htb_class *cl, int mask) 397 { 397 { 398 int m = 0; 398 int m = 0; 399 struct htb_level *hlevel = &q->hlevel[ 399 struct htb_level *hlevel = &q->hlevel[cl->level]; 400 400 401 while (mask) { 401 while (mask) { 402 int prio = ffz(~mask); 402 int prio = ffz(~mask); 403 struct htb_prio *hprio = &hlev 403 struct htb_prio *hprio = &hlevel->hprio[prio]; 404 404 405 mask &= ~(1 << prio); 405 mask &= ~(1 << prio); 406 if (hprio->ptr == cl->node + p 406 if (hprio->ptr == cl->node + prio) 407 htb_next_rb_node(&hpri 407 htb_next_rb_node(&hprio->ptr); 408 408 409 htb_safe_rb_erase(cl->node + p 409 htb_safe_rb_erase(cl->node + prio, &hprio->row); 410 if (!hprio->row.rb_node) 410 if (!hprio->row.rb_node) 411 m |= 1 << prio; 411 m |= 1 << prio; 412 } 412 } 413 q->row_mask[cl->level] &= ~m; 413 q->row_mask[cl->level] &= ~m; 414 } 414 } 415 415 416 /** 416 /** 417 * htb_activate_prios - creates active classe' 417 * htb_activate_prios - creates active classe's feed chain 418 * @q: the priority event queue 418 * @q: the priority event queue 419 * @cl: the class to activate 419 * @cl: the class to activate 420 * 420 * 421 * The class is connected to ancestors and/or 421 * The class is connected to ancestors and/or appropriate rows 422 * for priorities it is participating on. cl-> 422 * for priorities it is participating on. cl->cmode must be new 423 * (activated) mode. It does nothing if cl->pr 423 * (activated) mode. It does nothing if cl->prio_activity == 0. 424 */ 424 */ 425 static void htb_activate_prios(struct htb_sche 425 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) 426 { 426 { 427 struct htb_class *p = cl->parent; 427 struct htb_class *p = cl->parent; 428 long m, mask = cl->prio_activity; 428 long m, mask = cl->prio_activity; 429 429 430 while (cl->cmode == HTB_MAY_BORROW && 430 while (cl->cmode == HTB_MAY_BORROW && p && mask) { 431 m = mask; 431 m = mask; 432 while (m) { 432 while (m) { 433 unsigned int prio = ff 433 unsigned int prio = ffz(~m); 434 434 435 if (WARN_ON_ONCE(prio 435 if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio))) 436 break; 436 break; 437 m &= ~(1 << prio); 437 m &= ~(1 << prio); 438 438 439 if (p->inner.clprio[pr 439 if (p->inner.clprio[prio].feed.rb_node) 440 /* parent alre 440 /* parent already has its feed in use so that 441 * reset bit i 441 * reset bit in mask as parent is already ok 442 */ 442 */ 443 mask &= ~(1 << 443 mask &= ~(1 << prio); 444 444 445 htb_add_to_id_tree(&p- 445 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio); 446 } 446 } 447 p->prio_activity |= mask; 447 p->prio_activity |= mask; 448 cl = p; 448 cl = p; 449 p = cl->parent; 449 p = cl->parent; 450 450 451 } 451 } 452 if (cl->cmode == HTB_CAN_SEND && mask) 452 if (cl->cmode == HTB_CAN_SEND && mask) 453 htb_add_class_to_row(q, cl, ma 453 htb_add_class_to_row(q, cl, mask); 454 } 454 } 455 455 456 /** 456 /** 457 * htb_deactivate_prios - remove class from fe 457 * htb_deactivate_prios - remove class from feed chain 458 * @q: the priority event queue 458 * @q: the priority event queue 459 * @cl: the class to deactivate 459 * @cl: the class to deactivate 460 * 460 * 461 * cl->cmode must represent old mode (before d 461 * cl->cmode must represent old mode (before deactivation). It does 462 * nothing if cl->prio_activity == 0. Class is 462 * nothing if cl->prio_activity == 0. Class is removed from all feed 463 * chains and rows. 463 * chains and rows. 464 */ 464 */ 465 static void htb_deactivate_prios(struct htb_sc 465 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) 466 { 466 { 467 struct htb_class *p = cl->parent; 467 struct htb_class *p = cl->parent; 468 long m, mask = cl->prio_activity; 468 long m, mask = cl->prio_activity; 469 469 470 while (cl->cmode == HTB_MAY_BORROW && 470 while (cl->cmode == HTB_MAY_BORROW && p && mask) { 471 m = mask; 471 m = mask; 472 mask = 0; 472 mask = 0; 473 while (m) { 473 while (m) { 474 int prio = ffz(~m); 474 int prio = ffz(~m); 475 m &= ~(1 << prio); 475 m &= ~(1 << prio); 476 476 477 if (p->inner.clprio[pr 477 if (p->inner.clprio[prio].ptr == cl->node + prio) { 478 /* we are remo 478 /* we are removing child which is pointed to from 479 * parent feed 479 * parent feed - forget the pointer but remember 480 * classid 480 * classid 481 */ 481 */ 482 p->inner.clpri 482 p->inner.clprio[prio].last_ptr_id = cl->common.classid; 483 p->inner.clpri 483 p->inner.clprio[prio].ptr = NULL; 484 } 484 } 485 485 486 htb_safe_rb_erase(cl-> 486 htb_safe_rb_erase(cl->node + prio, 487 &p-> 487 &p->inner.clprio[prio].feed); 488 488 489 if (!p->inner.clprio[p 489 if (!p->inner.clprio[prio].feed.rb_node) 490 mask |= 1 << p 490 mask |= 1 << prio; 491 } 491 } 492 492 493 p->prio_activity &= ~mask; 493 p->prio_activity &= ~mask; 494 cl = p; 494 cl = p; 495 p = cl->parent; 495 p = cl->parent; 496 496 497 } 497 } 498 if (cl->cmode == HTB_CAN_SEND && mask) 498 if (cl->cmode == HTB_CAN_SEND && mask) 499 htb_remove_class_from_row(q, c 499 htb_remove_class_from_row(q, cl, mask); 500 } 500 } 501 501 502 static inline s64 htb_lowater(const struct htb 502 static inline s64 htb_lowater(const struct htb_class *cl) 503 { 503 { 504 if (htb_hysteresis) 504 if (htb_hysteresis) 505 return cl->cmode != HTB_CANT_S 505 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; 506 else 506 else 507 return 0; 507 return 0; 508 } 508 } 509 static inline s64 htb_hiwater(const struct htb 509 static inline s64 htb_hiwater(const struct htb_class *cl) 510 { 510 { 511 if (htb_hysteresis) 511 if (htb_hysteresis) 512 return cl->cmode == HTB_CAN_SE 512 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; 513 else 513 else 514 return 0; 514 return 0; 515 } 515 } 516 516 517 517 518 /** 518 /** 519 * htb_class_mode - computes and returns curre 519 * htb_class_mode - computes and returns current class mode 520 * @cl: the target class 520 * @cl: the target class 521 * @diff: diff time in microseconds 521 * @diff: diff time in microseconds 522 * 522 * 523 * It computes cl's mode at time cl->t_c+diff 523 * It computes cl's mode at time cl->t_c+diff and returns it. If mode 524 * is not HTB_CAN_SEND then cl->pq_key is upda 524 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference 525 * from now to time when cl will change its st 525 * from now to time when cl will change its state. 526 * Also it is worth to note that class mode do 526 * Also it is worth to note that class mode doesn't change simply 527 * at cl->{c,}tokens == 0 but there can rather 527 * at cl->{c,}tokens == 0 but there can rather be hysteresis of 528 * 0 .. -cl->{c,}buffer range. It is meant to 528 * 0 .. -cl->{c,}buffer range. It is meant to limit number of 529 * mode transitions per time unit. The speed g 529 * mode transitions per time unit. The speed gain is about 1/6. 530 */ 530 */ 531 static inline enum htb_cmode 531 static inline enum htb_cmode 532 htb_class_mode(struct htb_class *cl, s64 *diff 532 htb_class_mode(struct htb_class *cl, s64 *diff) 533 { 533 { 534 s64 toks; 534 s64 toks; 535 535 536 if ((toks = (cl->ctokens + *diff)) < h 536 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { 537 *diff = -toks; 537 *diff = -toks; 538 return HTB_CANT_SEND; 538 return HTB_CANT_SEND; 539 } 539 } 540 540 541 if ((toks = (cl->tokens + *diff)) >= h 541 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl)) 542 return HTB_CAN_SEND; 542 return HTB_CAN_SEND; 543 543 544 *diff = -toks; 544 *diff = -toks; 545 return HTB_MAY_BORROW; 545 return HTB_MAY_BORROW; 546 } 546 } 547 547 548 /** 548 /** 549 * htb_change_class_mode - changes classe's mo 549 * htb_change_class_mode - changes classe's mode 550 * @q: the priority event queue 550 * @q: the priority event queue 551 * @cl: the target class 551 * @cl: the target class 552 * @diff: diff time in microseconds 552 * @diff: diff time in microseconds 553 * 553 * 554 * This should be the only way how to change c 554 * This should be the only way how to change classe's mode under normal 555 * circumstances. Routine will update feed lis 555 * circumstances. Routine will update feed lists linkage, change mode 556 * and add class to the wait event queue if ap 556 * and add class to the wait event queue if appropriate. New mode should 557 * be different from old one and cl->pq_key ha 557 * be different from old one and cl->pq_key has to be valid if changing 558 * to mode other than HTB_CAN_SEND (see htb_ad 558 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree). 559 */ 559 */ 560 static void 560 static void 561 htb_change_class_mode(struct htb_sched *q, str 561 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) 562 { 562 { 563 enum htb_cmode new_mode = htb_class_mo 563 enum htb_cmode new_mode = htb_class_mode(cl, diff); 564 564 565 if (new_mode == cl->cmode) 565 if (new_mode == cl->cmode) 566 return; 566 return; 567 567 568 if (new_mode == HTB_CANT_SEND) { 568 if (new_mode == HTB_CANT_SEND) { 569 cl->overlimits++; 569 cl->overlimits++; 570 q->overlimits++; 570 q->overlimits++; 571 } 571 } 572 572 573 if (cl->prio_activity) { /* not 573 if (cl->prio_activity) { /* not necessary: speed optimization */ 574 if (cl->cmode != HTB_CANT_SEND 574 if (cl->cmode != HTB_CANT_SEND) 575 htb_deactivate_prios(q 575 htb_deactivate_prios(q, cl); 576 cl->cmode = new_mode; 576 cl->cmode = new_mode; 577 if (new_mode != HTB_CANT_SEND) 577 if (new_mode != HTB_CANT_SEND) 578 htb_activate_prios(q, 578 htb_activate_prios(q, cl); 579 } else 579 } else 580 cl->cmode = new_mode; 580 cl->cmode = new_mode; 581 } 581 } 582 582 583 /** 583 /** 584 * htb_activate - inserts leaf cl into appropr 584 * htb_activate - inserts leaf cl into appropriate active feeds 585 * @q: the priority event queue 585 * @q: the priority event queue 586 * @cl: the target class 586 * @cl: the target class 587 * 587 * 588 * Routine learns (new) priority of leaf and a 588 * Routine learns (new) priority of leaf and activates feed chain 589 * for the prio. It can be called on already a 589 * for the prio. It can be called on already active leaf safely. 590 * It also adds leaf into droplist. 590 * It also adds leaf into droplist. 591 */ 591 */ 592 static inline void htb_activate(struct htb_sch 592 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) 593 { 593 { 594 WARN_ON(cl->level || !cl->leaf.q || !c 594 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); 595 595 596 if (!cl->prio_activity) { 596 if (!cl->prio_activity) { 597 cl->prio_activity = 1 << cl->p 597 cl->prio_activity = 1 << cl->prio; 598 htb_activate_prios(q, cl); 598 htb_activate_prios(q, cl); 599 } 599 } 600 } 600 } 601 601 602 /** 602 /** 603 * htb_deactivate - remove leaf cl from active 603 * htb_deactivate - remove leaf cl from active feeds 604 * @q: the priority event queue 604 * @q: the priority event queue 605 * @cl: the target class 605 * @cl: the target class 606 * 606 * 607 * Make sure that leaf is active. In the other 607 * Make sure that leaf is active. In the other words it can't be called 608 * with non-active leaf. It also removes class 608 * with non-active leaf. It also removes class from the drop list. 609 */ 609 */ 610 static inline void htb_deactivate(struct htb_s 610 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) 611 { 611 { 612 WARN_ON(!cl->prio_activity); 612 WARN_ON(!cl->prio_activity); 613 613 614 htb_deactivate_prios(q, cl); 614 htb_deactivate_prios(q, cl); 615 cl->prio_activity = 0; 615 cl->prio_activity = 0; 616 } 616 } 617 617 618 static int htb_enqueue(struct sk_buff *skb, st 618 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, 619 struct sk_buff **to_fre 619 struct sk_buff **to_free) 620 { 620 { 621 int ret; 621 int ret; 622 unsigned int len = qdisc_pkt_len(skb); 622 unsigned int len = qdisc_pkt_len(skb); 623 struct htb_sched *q = qdisc_priv(sch); 623 struct htb_sched *q = qdisc_priv(sch); 624 struct htb_class *cl = htb_classify(sk 624 struct htb_class *cl = htb_classify(skb, sch, &ret); 625 625 626 if (cl == HTB_DIRECT) { 626 if (cl == HTB_DIRECT) { 627 /* enqueue to helper queue */ 627 /* enqueue to helper queue */ 628 if (q->direct_queue.qlen < q-> 628 if (q->direct_queue.qlen < q->direct_qlen) { 629 __qdisc_enqueue_tail(s 629 __qdisc_enqueue_tail(skb, &q->direct_queue); 630 q->direct_pkts++; 630 q->direct_pkts++; 631 } else { 631 } else { 632 return qdisc_drop(skb, 632 return qdisc_drop(skb, sch, to_free); 633 } 633 } 634 #ifdef CONFIG_NET_CLS_ACT 634 #ifdef CONFIG_NET_CLS_ACT 635 } else if (!cl) { 635 } else if (!cl) { 636 if (ret & __NET_XMIT_BYPASS) 636 if (ret & __NET_XMIT_BYPASS) 637 qdisc_qstats_drop(sch) 637 qdisc_qstats_drop(sch); 638 __qdisc_drop(skb, to_free); 638 __qdisc_drop(skb, to_free); 639 return ret; 639 return ret; 640 #endif 640 #endif 641 } else if ((ret = qdisc_enqueue(skb, c 641 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, 642 to_fre 642 to_free)) != NET_XMIT_SUCCESS) { 643 if (net_xmit_drop_count(ret)) 643 if (net_xmit_drop_count(ret)) { 644 qdisc_qstats_drop(sch) 644 qdisc_qstats_drop(sch); 645 cl->drops++; 645 cl->drops++; 646 } 646 } 647 return ret; 647 return ret; 648 } else { 648 } else { 649 htb_activate(q, cl); 649 htb_activate(q, cl); 650 } 650 } 651 651 652 sch->qstats.backlog += len; 652 sch->qstats.backlog += len; 653 sch->q.qlen++; 653 sch->q.qlen++; 654 return NET_XMIT_SUCCESS; 654 return NET_XMIT_SUCCESS; 655 } 655 } 656 656 657 static inline void htb_accnt_tokens(struct htb 657 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff) 658 { 658 { 659 s64 toks = diff + cl->tokens; 659 s64 toks = diff + cl->tokens; 660 660 661 if (toks > cl->buffer) 661 if (toks > cl->buffer) 662 toks = cl->buffer; 662 toks = cl->buffer; 663 toks -= (s64) psched_l2t_ns(&cl->rate, 663 toks -= (s64) psched_l2t_ns(&cl->rate, bytes); 664 if (toks <= -cl->mbuffer) 664 if (toks <= -cl->mbuffer) 665 toks = 1 - cl->mbuffer; 665 toks = 1 - cl->mbuffer; 666 666 667 cl->tokens = toks; 667 cl->tokens = toks; 668 } 668 } 669 669 670 static inline void htb_accnt_ctokens(struct ht 670 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff) 671 { 671 { 672 s64 toks = diff + cl->ctokens; 672 s64 toks = diff + cl->ctokens; 673 673 674 if (toks > cl->cbuffer) 674 if (toks > cl->cbuffer) 675 toks = cl->cbuffer; 675 toks = cl->cbuffer; 676 toks -= (s64) psched_l2t_ns(&cl->ceil, 676 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes); 677 if (toks <= -cl->mbuffer) 677 if (toks <= -cl->mbuffer) 678 toks = 1 - cl->mbuffer; 678 toks = 1 - cl->mbuffer; 679 679 680 cl->ctokens = toks; 680 cl->ctokens = toks; 681 } 681 } 682 682 683 /** 683 /** 684 * htb_charge_class - charges amount "bytes" t 684 * htb_charge_class - charges amount "bytes" to leaf and ancestors 685 * @q: the priority event queue 685 * @q: the priority event queue 686 * @cl: the class to start iterate 686 * @cl: the class to start iterate 687 * @level: the minimum level to account 687 * @level: the minimum level to account 688 * @skb: the socket buffer 688 * @skb: the socket buffer 689 * 689 * 690 * Routine assumes that packet "bytes" long wa 690 * Routine assumes that packet "bytes" long was dequeued from leaf cl 691 * borrowing from "level". It accounts bytes t 691 * borrowing from "level". It accounts bytes to ceil leaky bucket for 692 * leaf and all ancestors and to rate bucket f 692 * leaf and all ancestors and to rate bucket for ancestors at levels 693 * "level" and higher. It also handles possibl 693 * "level" and higher. It also handles possible change of mode resulting 694 * from the update. Note that mode can also in 694 * from the update. Note that mode can also increase here (MAY_BORROW to 695 * CAN_SEND) because we can use more precise c 695 * CAN_SEND) because we can use more precise clock that event queue here. 696 * In such case we remove class from event que 696 * In such case we remove class from event queue first. 697 */ 697 */ 698 static void htb_charge_class(struct htb_sched 698 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, 699 int level, struct 699 int level, struct sk_buff *skb) 700 { 700 { 701 int bytes = qdisc_pkt_len(skb); 701 int bytes = qdisc_pkt_len(skb); 702 enum htb_cmode old_mode; 702 enum htb_cmode old_mode; 703 s64 diff; 703 s64 diff; 704 704 705 while (cl) { 705 while (cl) { 706 diff = min_t(s64, q->now - cl- 706 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); 707 if (cl->level >= level) { 707 if (cl->level >= level) { 708 if (cl->level == level 708 if (cl->level == level) 709 cl->xstats.len 709 cl->xstats.lends++; 710 htb_accnt_tokens(cl, b 710 htb_accnt_tokens(cl, bytes, diff); 711 } else { 711 } else { 712 cl->xstats.borrows++; 712 cl->xstats.borrows++; 713 cl->tokens += diff; 713 cl->tokens += diff; /* we moved t_c; update tokens */ 714 } 714 } 715 htb_accnt_ctokens(cl, bytes, d 715 htb_accnt_ctokens(cl, bytes, diff); 716 cl->t_c = q->now; 716 cl->t_c = q->now; 717 717 718 old_mode = cl->cmode; 718 old_mode = cl->cmode; 719 diff = 0; 719 diff = 0; 720 htb_change_class_mode(q, cl, & 720 htb_change_class_mode(q, cl, &diff); 721 if (old_mode != cl->cmode) { 721 if (old_mode != cl->cmode) { 722 if (old_mode != HTB_CA 722 if (old_mode != HTB_CAN_SEND) 723 htb_safe_rb_er 723 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); 724 if (cl->cmode != HTB_C 724 if (cl->cmode != HTB_CAN_SEND) 725 htb_add_to_wai 725 htb_add_to_wait_tree(q, cl, diff); 726 } 726 } 727 727 728 /* update basic stats except f 728 /* update basic stats except for leaves which are already updated */ 729 if (cl->level) 729 if (cl->level) 730 bstats_update(&cl->bst 730 bstats_update(&cl->bstats, skb); 731 731 732 cl = cl->parent; 732 cl = cl->parent; 733 } 733 } 734 } 734 } 735 735 736 /** 736 /** 737 * htb_do_events - make mode changes to classe 737 * htb_do_events - make mode changes to classes at the level 738 * @q: the priority event queue 738 * @q: the priority event queue 739 * @level: which wait_pq in 'q->hlevel' 739 * @level: which wait_pq in 'q->hlevel' 740 * @start: start jiffies 740 * @start: start jiffies 741 * 741 * 742 * Scans event queue for pending events and ap 742 * Scans event queue for pending events and applies them. Returns time of 743 * next pending event (0 for no event in pq, q 743 * next pending event (0 for no event in pq, q->now for too many events). 744 * Note: Applied are events whose have cl->pq_ 744 * Note: Applied are events whose have cl->pq_key <= q->now. 745 */ 745 */ 746 static s64 htb_do_events(struct htb_sched *q, 746 static s64 htb_do_events(struct htb_sched *q, const int level, 747 unsigned long start) 747 unsigned long start) 748 { 748 { 749 /* don't run for longer than 2 jiffies 749 /* don't run for longer than 2 jiffies; 2 is used instead of 750 * 1 to simplify things when jiffy is 750 * 1 to simplify things when jiffy is going to be incremented 751 * too soon 751 * too soon 752 */ 752 */ 753 unsigned long stop_at = start + 2; 753 unsigned long stop_at = start + 2; 754 struct rb_root *wait_pq = &q->hlevel[l 754 struct rb_root *wait_pq = &q->hlevel[level].wait_pq; 755 755 756 while (time_before(jiffies, stop_at)) 756 while (time_before(jiffies, stop_at)) { 757 struct htb_class *cl; 757 struct htb_class *cl; 758 s64 diff; 758 s64 diff; 759 struct rb_node *p = rb_first(w 759 struct rb_node *p = rb_first(wait_pq); 760 760 761 if (!p) 761 if (!p) 762 return 0; 762 return 0; 763 763 764 cl = rb_entry(p, struct htb_cl 764 cl = rb_entry(p, struct htb_class, pq_node); 765 if (cl->pq_key > q->now) 765 if (cl->pq_key > q->now) 766 return cl->pq_key; 766 return cl->pq_key; 767 767 768 htb_safe_rb_erase(p, wait_pq); 768 htb_safe_rb_erase(p, wait_pq); 769 diff = min_t(s64, q->now - cl- 769 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); 770 htb_change_class_mode(q, cl, & 770 htb_change_class_mode(q, cl, &diff); 771 if (cl->cmode != HTB_CAN_SEND) 771 if (cl->cmode != HTB_CAN_SEND) 772 htb_add_to_wait_tree(q 772 htb_add_to_wait_tree(q, cl, diff); 773 } 773 } 774 774 775 /* too much load - let's continue afte 775 /* too much load - let's continue after a break for scheduling */ 776 if (!(q->warned & HTB_WARN_TOOMANYEVEN 776 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { 777 pr_warn("htb: too many events! 777 pr_warn("htb: too many events!\n"); 778 q->warned |= HTB_WARN_TOOMANYE 778 q->warned |= HTB_WARN_TOOMANYEVENTS; 779 } 779 } 780 780 781 return q->now; 781 return q->now; 782 } 782 } 783 783 784 /* Returns class->node+prio from id-tree where 784 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL 785 * is no such one exists. 785 * is no such one exists. 786 */ 786 */ 787 static struct rb_node *htb_id_find_next_upper( 787 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, 788 788 u32 id) 789 { 789 { 790 struct rb_node *r = NULL; 790 struct rb_node *r = NULL; 791 while (n) { 791 while (n) { 792 struct htb_class *cl = 792 struct htb_class *cl = 793 rb_entry(n, struct htb_cla 793 rb_entry(n, struct htb_class, node[prio]); 794 794 795 if (id > cl->common.classid) { 795 if (id > cl->common.classid) { 796 n = n->rb_right; 796 n = n->rb_right; 797 } else if (id < cl->common.cla 797 } else if (id < cl->common.classid) { 798 r = n; 798 r = n; 799 n = n->rb_left; 799 n = n->rb_left; 800 } else { 800 } else { 801 return n; 801 return n; 802 } 802 } 803 } 803 } 804 return r; 804 return r; 805 } 805 } 806 806 807 /** 807 /** 808 * htb_lookup_leaf - returns next leaf class i 808 * htb_lookup_leaf - returns next leaf class in DRR order 809 * @hprio: the current one 809 * @hprio: the current one 810 * @prio: which prio in class 810 * @prio: which prio in class 811 * 811 * 812 * Find leaf where current feed pointers point 812 * Find leaf where current feed pointers points to. 813 */ 813 */ 814 static struct htb_class *htb_lookup_leaf(struc 814 static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio) 815 { 815 { 816 int i; 816 int i; 817 struct { 817 struct { 818 struct rb_node *root; 818 struct rb_node *root; 819 struct rb_node **pptr; 819 struct rb_node **pptr; 820 u32 *pid; 820 u32 *pid; 821 } stk[TC_HTB_MAXDEPTH], *sp = stk; 821 } stk[TC_HTB_MAXDEPTH], *sp = stk; 822 822 823 BUG_ON(!hprio->row.rb_node); 823 BUG_ON(!hprio->row.rb_node); 824 sp->root = hprio->row.rb_node; 824 sp->root = hprio->row.rb_node; 825 sp->pptr = &hprio->ptr; 825 sp->pptr = &hprio->ptr; 826 sp->pid = &hprio->last_ptr_id; 826 sp->pid = &hprio->last_ptr_id; 827 827 828 for (i = 0; i < 65535; i++) { 828 for (i = 0; i < 65535; i++) { 829 if (!*sp->pptr && *sp->pid) { 829 if (!*sp->pptr && *sp->pid) { 830 /* ptr was invalidated 830 /* ptr was invalidated but id is valid - try to recover 831 * the original or nex 831 * the original or next ptr 832 */ 832 */ 833 *sp->pptr = 833 *sp->pptr = 834 htb_id_find_next_u 834 htb_id_find_next_upper(prio, sp->root, *sp->pid); 835 } 835 } 836 *sp->pid = 0; /* ptr is vali 836 *sp->pid = 0; /* ptr is valid now so that remove this hint as it 837 * can become 837 * can become out of date quickly 838 */ 838 */ 839 if (!*sp->pptr) { /* we 839 if (!*sp->pptr) { /* we are at right end; rewind & go up */ 840 *sp->pptr = sp->root; 840 *sp->pptr = sp->root; 841 while ((*sp->pptr)->rb 841 while ((*sp->pptr)->rb_left) 842 *sp->pptr = (* 842 *sp->pptr = (*sp->pptr)->rb_left; 843 if (sp > stk) { 843 if (sp > stk) { 844 sp--; 844 sp--; 845 if (!*sp->pptr 845 if (!*sp->pptr) { 846 WARN_O 846 WARN_ON(1); 847 return 847 return NULL; 848 } 848 } 849 htb_next_rb_no 849 htb_next_rb_node(sp->pptr); 850 } 850 } 851 } else { 851 } else { 852 struct htb_class *cl; 852 struct htb_class *cl; 853 struct htb_prio *clp; 853 struct htb_prio *clp; 854 854 855 cl = rb_entry(*sp->ppt 855 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); 856 if (!cl->level) 856 if (!cl->level) 857 return cl; 857 return cl; 858 clp = &cl->inner.clpri 858 clp = &cl->inner.clprio[prio]; 859 (++sp)->root = clp->fe 859 (++sp)->root = clp->feed.rb_node; 860 sp->pptr = &clp->ptr; 860 sp->pptr = &clp->ptr; 861 sp->pid = &clp->last_p 861 sp->pid = &clp->last_ptr_id; 862 } 862 } 863 } 863 } 864 WARN_ON(1); 864 WARN_ON(1); 865 return NULL; 865 return NULL; 866 } 866 } 867 867 868 /* dequeues packet at given priority and level 868 /* dequeues packet at given priority and level; call only if 869 * you are sure that there is active class at 869 * you are sure that there is active class at prio/level 870 */ 870 */ 871 static struct sk_buff *htb_dequeue_tree(struct 871 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio, 872 const 872 const int level) 873 { 873 { 874 struct sk_buff *skb = NULL; 874 struct sk_buff *skb = NULL; 875 struct htb_class *cl, *start; 875 struct htb_class *cl, *start; 876 struct htb_level *hlevel = &q->hlevel[ 876 struct htb_level *hlevel = &q->hlevel[level]; 877 struct htb_prio *hprio = &hlevel->hpri 877 struct htb_prio *hprio = &hlevel->hprio[prio]; 878 878 879 /* look initial class up in the row */ 879 /* look initial class up in the row */ 880 start = cl = htb_lookup_leaf(hprio, pr 880 start = cl = htb_lookup_leaf(hprio, prio); 881 881 882 do { 882 do { 883 next: 883 next: 884 if (unlikely(!cl)) 884 if (unlikely(!cl)) 885 return NULL; 885 return NULL; 886 886 887 /* class can be empty - it is 887 /* class can be empty - it is unlikely but can be true if leaf 888 * qdisc drops packets in enqu 888 * qdisc drops packets in enqueue routine or if someone used 889 * graft operation on the leaf 889 * graft operation on the leaf since last dequeue; 890 * simply deactivate and skip 890 * simply deactivate and skip such class 891 */ 891 */ 892 if (unlikely(cl->leaf.q->q.qle 892 if (unlikely(cl->leaf.q->q.qlen == 0)) { 893 struct htb_class *next 893 struct htb_class *next; 894 htb_deactivate(q, cl); 894 htb_deactivate(q, cl); 895 895 896 /* row/level might bec 896 /* row/level might become empty */ 897 if ((q->row_mask[level 897 if ((q->row_mask[level] & (1 << prio)) == 0) 898 return NULL; 898 return NULL; 899 899 900 next = htb_lookup_leaf 900 next = htb_lookup_leaf(hprio, prio); 901 901 902 if (cl == start) 902 if (cl == start) /* fix start if we just deleted it */ 903 start = next; 903 start = next; 904 cl = next; 904 cl = next; 905 goto next; 905 goto next; 906 } 906 } 907 907 908 skb = cl->leaf.q->dequeue(cl-> 908 skb = cl->leaf.q->dequeue(cl->leaf.q); 909 if (likely(skb != NULL)) 909 if (likely(skb != NULL)) 910 break; 910 break; 911 911 912 qdisc_warn_nonwc("htb", cl->le 912 qdisc_warn_nonwc("htb", cl->leaf.q); 913 htb_next_rb_node(level ? &cl-> 913 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr: 914 &q->h 914 &q->hlevel[0].hprio[prio].ptr); 915 cl = htb_lookup_leaf(hprio, pr 915 cl = htb_lookup_leaf(hprio, prio); 916 916 917 } while (cl != start); 917 } while (cl != start); 918 918 919 if (likely(skb != NULL)) { 919 if (likely(skb != NULL)) { 920 bstats_update(&cl->bstats, skb 920 bstats_update(&cl->bstats, skb); 921 cl->leaf.deficit[level] -= qdi 921 cl->leaf.deficit[level] -= qdisc_pkt_len(skb); 922 if (cl->leaf.deficit[level] < 922 if (cl->leaf.deficit[level] < 0) { 923 cl->leaf.deficit[level 923 cl->leaf.deficit[level] += cl->quantum; 924 htb_next_rb_node(level 924 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr : 925 925 &q->hlevel[0].hprio[prio].ptr); 926 } 926 } 927 /* this used to be after charg 927 /* this used to be after charge_class but this constelation 928 * gives us slightly better pe 928 * gives us slightly better performance 929 */ 929 */ 930 if (!cl->leaf.q->q.qlen) 930 if (!cl->leaf.q->q.qlen) 931 htb_deactivate(q, cl); 931 htb_deactivate(q, cl); 932 htb_charge_class(q, cl, level, 932 htb_charge_class(q, cl, level, skb); 933 } 933 } 934 return skb; 934 return skb; 935 } 935 } 936 936 937 static struct sk_buff *htb_dequeue(struct Qdis 937 static struct sk_buff *htb_dequeue(struct Qdisc *sch) 938 { 938 { 939 struct sk_buff *skb; 939 struct sk_buff *skb; 940 struct htb_sched *q = qdisc_priv(sch); 940 struct htb_sched *q = qdisc_priv(sch); 941 int level; 941 int level; 942 s64 next_event; 942 s64 next_event; 943 unsigned long start_at; 943 unsigned long start_at; 944 944 945 /* try to dequeue direct packets as hi 945 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 946 skb = __qdisc_dequeue_head(&q->direct_ 946 skb = __qdisc_dequeue_head(&q->direct_queue); 947 if (skb != NULL) { 947 if (skb != NULL) { 948 ok: 948 ok: 949 qdisc_bstats_update(sch, skb); 949 qdisc_bstats_update(sch, skb); 950 qdisc_qstats_backlog_dec(sch, 950 qdisc_qstats_backlog_dec(sch, skb); 951 sch->q.qlen--; 951 sch->q.qlen--; 952 return skb; 952 return skb; 953 } 953 } 954 954 955 if (!sch->q.qlen) 955 if (!sch->q.qlen) 956 goto fin; 956 goto fin; 957 q->now = ktime_get_ns(); 957 q->now = ktime_get_ns(); 958 start_at = jiffies; 958 start_at = jiffies; 959 959 960 next_event = q->now + 5LLU * NSEC_PER_ 960 next_event = q->now + 5LLU * NSEC_PER_SEC; 961 961 962 for (level = 0; level < TC_HTB_MAXDEPT 962 for (level = 0; level < TC_HTB_MAXDEPTH; level++) { 963 /* common case optimization - 963 /* common case optimization - skip event handler quickly */ 964 int m; 964 int m; 965 s64 event = q->near_ev_cache[l 965 s64 event = q->near_ev_cache[level]; 966 966 967 if (q->now >= event) { 967 if (q->now >= event) { 968 event = htb_do_events( 968 event = htb_do_events(q, level, start_at); 969 if (!event) 969 if (!event) 970 event = q->now 970 event = q->now + NSEC_PER_SEC; 971 q->near_ev_cache[level 971 q->near_ev_cache[level] = event; 972 } 972 } 973 973 974 if (next_event > event) 974 if (next_event > event) 975 next_event = event; 975 next_event = event; 976 976 977 m = ~q->row_mask[level]; 977 m = ~q->row_mask[level]; 978 while (m != (int)(-1)) { 978 while (m != (int)(-1)) { 979 int prio = ffz(m); 979 int prio = ffz(m); 980 980 981 m |= 1 << prio; 981 m |= 1 << prio; 982 skb = htb_dequeue_tree 982 skb = htb_dequeue_tree(q, prio, level); 983 if (likely(skb != NULL 983 if (likely(skb != NULL)) 984 goto ok; 984 goto ok; 985 } 985 } 986 } 986 } 987 if (likely(next_event > q->now)) 987 if (likely(next_event > q->now)) 988 qdisc_watchdog_schedule_ns(&q- 988 qdisc_watchdog_schedule_ns(&q->watchdog, next_event); 989 else 989 else 990 schedule_work(&q->work); 990 schedule_work(&q->work); 991 fin: 991 fin: 992 return skb; 992 return skb; 993 } 993 } 994 994 995 /* reset all classes */ 995 /* reset all classes */ 996 /* always caled under BH & queue lock */ 996 /* always caled under BH & queue lock */ 997 static void htb_reset(struct Qdisc *sch) 997 static void htb_reset(struct Qdisc *sch) 998 { 998 { 999 struct htb_sched *q = qdisc_priv(sch); 999 struct htb_sched *q = qdisc_priv(sch); 1000 struct htb_class *cl; 1000 struct htb_class *cl; 1001 unsigned int i; 1001 unsigned int i; 1002 1002 1003 for (i = 0; i < q->clhash.hashsize; i 1003 for (i = 0; i < q->clhash.hashsize; i++) { 1004 hlist_for_each_entry(cl, &q-> 1004 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 1005 if (cl->level) 1005 if (cl->level) 1006 memset(&cl->i 1006 memset(&cl->inner, 0, sizeof(cl->inner)); 1007 else { 1007 else { 1008 if (cl->leaf. 1008 if (cl->leaf.q && !q->offload) 1009 qdisc 1009 qdisc_reset(cl->leaf.q); 1010 } 1010 } 1011 cl->prio_activity = 0 1011 cl->prio_activity = 0; 1012 cl->cmode = HTB_CAN_S 1012 cl->cmode = HTB_CAN_SEND; 1013 } 1013 } 1014 } 1014 } 1015 qdisc_watchdog_cancel(&q->watchdog); 1015 qdisc_watchdog_cancel(&q->watchdog); 1016 __qdisc_reset_queue(&q->direct_queue) 1016 __qdisc_reset_queue(&q->direct_queue); 1017 memset(q->hlevel, 0, sizeof(q->hlevel 1017 memset(q->hlevel, 0, sizeof(q->hlevel)); 1018 memset(q->row_mask, 0, sizeof(q->row_ 1018 memset(q->row_mask, 0, sizeof(q->row_mask)); 1019 } 1019 } 1020 1020 1021 static const struct nla_policy htb_policy[TCA 1021 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = { 1022 [TCA_HTB_PARMS] = { .len = sizeof(str 1022 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) }, 1023 [TCA_HTB_INIT] = { .len = sizeof(str 1023 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) }, 1024 [TCA_HTB_CTAB] = { .type = NLA_BINAR 1024 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 1025 [TCA_HTB_RTAB] = { .type = NLA_BINAR 1025 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 1026 [TCA_HTB_DIRECT_QLEN] = { .type = NLA 1026 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 }, 1027 [TCA_HTB_RATE64] = { .type = NLA_U64 1027 [TCA_HTB_RATE64] = { .type = NLA_U64 }, 1028 [TCA_HTB_CEIL64] = { .type = NLA_U64 1028 [TCA_HTB_CEIL64] = { .type = NLA_U64 }, 1029 [TCA_HTB_OFFLOAD] = { .type = NLA_FLA 1029 [TCA_HTB_OFFLOAD] = { .type = NLA_FLAG }, 1030 }; 1030 }; 1031 1031 1032 static void htb_work_func(struct work_struct 1032 static void htb_work_func(struct work_struct *work) 1033 { 1033 { 1034 struct htb_sched *q = container_of(wo 1034 struct htb_sched *q = container_of(work, struct htb_sched, work); 1035 struct Qdisc *sch = q->watchdog.qdisc 1035 struct Qdisc *sch = q->watchdog.qdisc; 1036 1036 1037 rcu_read_lock(); 1037 rcu_read_lock(); 1038 __netif_schedule(qdisc_root(sch)); 1038 __netif_schedule(qdisc_root(sch)); 1039 rcu_read_unlock(); 1039 rcu_read_unlock(); 1040 } 1040 } 1041 1041 1042 static int htb_offload(struct net_device *dev 1042 static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt) 1043 { 1043 { 1044 return dev->netdev_ops->ndo_setup_tc( 1044 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt); 1045 } 1045 } 1046 1046 1047 static int htb_init(struct Qdisc *sch, struct 1047 static int htb_init(struct Qdisc *sch, struct nlattr *opt, 1048 struct netlink_ext_ack *e 1048 struct netlink_ext_ack *extack) 1049 { 1049 { 1050 struct net_device *dev = qdisc_dev(sc 1050 struct net_device *dev = qdisc_dev(sch); 1051 struct tc_htb_qopt_offload offload_op 1051 struct tc_htb_qopt_offload offload_opt; 1052 struct htb_sched *q = qdisc_priv(sch) 1052 struct htb_sched *q = qdisc_priv(sch); 1053 struct nlattr *tb[TCA_HTB_MAX + 1]; 1053 struct nlattr *tb[TCA_HTB_MAX + 1]; 1054 struct tc_htb_glob *gopt; 1054 struct tc_htb_glob *gopt; 1055 unsigned int ntx; 1055 unsigned int ntx; 1056 bool offload; 1056 bool offload; 1057 int err; 1057 int err; 1058 1058 1059 qdisc_watchdog_init(&q->watchdog, sch 1059 qdisc_watchdog_init(&q->watchdog, sch); 1060 INIT_WORK(&q->work, htb_work_func); 1060 INIT_WORK(&q->work, htb_work_func); 1061 1061 1062 if (!opt) 1062 if (!opt) 1063 return -EINVAL; 1063 return -EINVAL; 1064 1064 1065 err = tcf_block_get(&q->block, &q->fi 1065 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); 1066 if (err) 1066 if (err) 1067 return err; 1067 return err; 1068 1068 1069 err = nla_parse_nested_deprecated(tb, 1069 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy, 1070 NUL 1070 NULL); 1071 if (err < 0) 1071 if (err < 0) 1072 return err; 1072 return err; 1073 1073 1074 if (!tb[TCA_HTB_INIT]) 1074 if (!tb[TCA_HTB_INIT]) 1075 return -EINVAL; 1075 return -EINVAL; 1076 1076 1077 gopt = nla_data(tb[TCA_HTB_INIT]); 1077 gopt = nla_data(tb[TCA_HTB_INIT]); 1078 if (gopt->version != HTB_VER >> 16) 1078 if (gopt->version != HTB_VER >> 16) 1079 return -EINVAL; 1079 return -EINVAL; 1080 1080 1081 offload = nla_get_flag(tb[TCA_HTB_OFF 1081 offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]); 1082 1082 1083 if (offload) { 1083 if (offload) { 1084 if (sch->parent != TC_H_ROOT) 1084 if (sch->parent != TC_H_ROOT) { 1085 NL_SET_ERR_MSG(extack 1085 NL_SET_ERR_MSG(extack, "HTB must be the root qdisc to use offload"); 1086 return -EOPNOTSUPP; 1086 return -EOPNOTSUPP; 1087 } 1087 } 1088 1088 1089 if (!tc_can_offload(dev) || ! 1089 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) { 1090 NL_SET_ERR_MSG(extack 1090 NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on"); 1091 return -EOPNOTSUPP; 1091 return -EOPNOTSUPP; 1092 } 1092 } 1093 1093 1094 q->num_direct_qdiscs = dev->r 1094 q->num_direct_qdiscs = dev->real_num_tx_queues; 1095 q->direct_qdiscs = kcalloc(q- 1095 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs, 1096 si 1096 sizeof(*q->direct_qdiscs), 1097 GF 1097 GFP_KERNEL); 1098 if (!q->direct_qdiscs) 1098 if (!q->direct_qdiscs) 1099 return -ENOMEM; 1099 return -ENOMEM; 1100 } 1100 } 1101 1101 1102 err = qdisc_class_hash_init(&q->clhas 1102 err = qdisc_class_hash_init(&q->clhash); 1103 if (err < 0) 1103 if (err < 0) 1104 return err; 1104 return err; 1105 1105 1106 if (tb[TCA_HTB_DIRECT_QLEN]) 1106 if (tb[TCA_HTB_DIRECT_QLEN]) 1107 q->direct_qlen = nla_get_u32( 1107 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); 1108 else 1108 else 1109 q->direct_qlen = qdisc_dev(sc 1109 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; 1110 1110 1111 if ((q->rate2quantum = gopt->rate2qua 1111 if ((q->rate2quantum = gopt->rate2quantum) < 1) 1112 q->rate2quantum = 1; 1112 q->rate2quantum = 1; 1113 q->defcls = gopt->defcls; 1113 q->defcls = gopt->defcls; 1114 1114 1115 if (!offload) 1115 if (!offload) 1116 return 0; 1116 return 0; 1117 1117 1118 for (ntx = 0; ntx < q->num_direct_qdi 1118 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { 1119 struct netdev_queue *dev_queu 1119 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); 1120 struct Qdisc *qdisc; 1120 struct Qdisc *qdisc; 1121 1121 1122 qdisc = qdisc_create_dflt(dev 1122 qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, 1123 TC_ 1123 TC_H_MAKE(sch->handle, 0), extack); 1124 if (!qdisc) { 1124 if (!qdisc) { 1125 return -ENOMEM; 1125 return -ENOMEM; 1126 } 1126 } 1127 1127 1128 q->direct_qdiscs[ntx] = qdisc 1128 q->direct_qdiscs[ntx] = qdisc; 1129 qdisc->flags |= TCQ_F_ONETXQU 1129 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1130 } 1130 } 1131 1131 1132 sch->flags |= TCQ_F_MQROOT; 1132 sch->flags |= TCQ_F_MQROOT; 1133 1133 1134 offload_opt = (struct tc_htb_qopt_off 1134 offload_opt = (struct tc_htb_qopt_offload) { 1135 .command = TC_HTB_CREATE, 1135 .command = TC_HTB_CREATE, 1136 .parent_classid = TC_H_MAJ(sc 1136 .parent_classid = TC_H_MAJ(sch->handle) >> 16, 1137 .classid = TC_H_MIN(q->defcls 1137 .classid = TC_H_MIN(q->defcls), 1138 .extack = extack, 1138 .extack = extack, 1139 }; 1139 }; 1140 err = htb_offload(dev, &offload_opt); 1140 err = htb_offload(dev, &offload_opt); 1141 if (err) 1141 if (err) 1142 return err; 1142 return err; 1143 1143 1144 /* Defer this assignment, so that htb 1144 /* Defer this assignment, so that htb_destroy skips offload-related 1145 * parts (especially calling ndo_setu 1145 * parts (especially calling ndo_setup_tc) on errors. 1146 */ 1146 */ 1147 q->offload = true; 1147 q->offload = true; 1148 1148 1149 return 0; 1149 return 0; 1150 } 1150 } 1151 1151 1152 static void htb_attach_offload(struct Qdisc * 1152 static void htb_attach_offload(struct Qdisc *sch) 1153 { 1153 { 1154 struct net_device *dev = qdisc_dev(sc 1154 struct net_device *dev = qdisc_dev(sch); 1155 struct htb_sched *q = qdisc_priv(sch) 1155 struct htb_sched *q = qdisc_priv(sch); 1156 unsigned int ntx; 1156 unsigned int ntx; 1157 1157 1158 for (ntx = 0; ntx < q->num_direct_qdi 1158 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { 1159 struct Qdisc *old, *qdisc = q 1159 struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx]; 1160 1160 1161 old = dev_graft_qdisc(qdisc-> 1161 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); 1162 qdisc_put(old); 1162 qdisc_put(old); 1163 qdisc_hash_add(qdisc, false); 1163 qdisc_hash_add(qdisc, false); 1164 } 1164 } 1165 for (ntx = q->num_direct_qdiscs; ntx 1165 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) { 1166 struct netdev_queue *dev_queu 1166 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); 1167 struct Qdisc *old = dev_graft 1167 struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL); 1168 1168 1169 qdisc_put(old); 1169 qdisc_put(old); 1170 } 1170 } 1171 1171 1172 kfree(q->direct_qdiscs); 1172 kfree(q->direct_qdiscs); 1173 q->direct_qdiscs = NULL; 1173 q->direct_qdiscs = NULL; 1174 } 1174 } 1175 1175 1176 static void htb_attach_software(struct Qdisc 1176 static void htb_attach_software(struct Qdisc *sch) 1177 { 1177 { 1178 struct net_device *dev = qdisc_dev(sc 1178 struct net_device *dev = qdisc_dev(sch); 1179 unsigned int ntx; 1179 unsigned int ntx; 1180 1180 1181 /* Resemble qdisc_graft behavior. */ 1181 /* Resemble qdisc_graft behavior. */ 1182 for (ntx = 0; ntx < dev->num_tx_queue 1182 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 1183 struct netdev_queue *dev_queu 1183 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); 1184 struct Qdisc *old = dev_graft 1184 struct Qdisc *old = dev_graft_qdisc(dev_queue, sch); 1185 1185 1186 qdisc_refcount_inc(sch); 1186 qdisc_refcount_inc(sch); 1187 1187 1188 qdisc_put(old); 1188 qdisc_put(old); 1189 } 1189 } 1190 } 1190 } 1191 1191 1192 static void htb_attach(struct Qdisc *sch) 1192 static void htb_attach(struct Qdisc *sch) 1193 { 1193 { 1194 struct htb_sched *q = qdisc_priv(sch) 1194 struct htb_sched *q = qdisc_priv(sch); 1195 1195 1196 if (q->offload) 1196 if (q->offload) 1197 htb_attach_offload(sch); 1197 htb_attach_offload(sch); 1198 else 1198 else 1199 htb_attach_software(sch); 1199 htb_attach_software(sch); 1200 } 1200 } 1201 1201 1202 static int htb_dump(struct Qdisc *sch, struct 1202 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1203 { 1203 { 1204 struct htb_sched *q = qdisc_priv(sch) 1204 struct htb_sched *q = qdisc_priv(sch); 1205 struct nlattr *nest; 1205 struct nlattr *nest; 1206 struct tc_htb_glob gopt; 1206 struct tc_htb_glob gopt; 1207 1207 1208 if (q->offload) 1208 if (q->offload) 1209 sch->flags |= TCQ_F_OFFLOADED 1209 sch->flags |= TCQ_F_OFFLOADED; 1210 else 1210 else 1211 sch->flags &= ~TCQ_F_OFFLOADE 1211 sch->flags &= ~TCQ_F_OFFLOADED; 1212 1212 1213 sch->qstats.overlimits = q->overlimit 1213 sch->qstats.overlimits = q->overlimits; 1214 /* Its safe to not acquire qdisc lock 1214 /* Its safe to not acquire qdisc lock. As we hold RTNL, 1215 * no change can happen on the qdisc 1215 * no change can happen on the qdisc parameters. 1216 */ 1216 */ 1217 1217 1218 gopt.direct_pkts = q->direct_pkts; 1218 gopt.direct_pkts = q->direct_pkts; 1219 gopt.version = HTB_VER; 1219 gopt.version = HTB_VER; 1220 gopt.rate2quantum = q->rate2quantum; 1220 gopt.rate2quantum = q->rate2quantum; 1221 gopt.defcls = q->defcls; 1221 gopt.defcls = q->defcls; 1222 gopt.debug = 0; 1222 gopt.debug = 0; 1223 1223 1224 nest = nla_nest_start_noflag(skb, TCA 1224 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 1225 if (nest == NULL) 1225 if (nest == NULL) 1226 goto nla_put_failure; 1226 goto nla_put_failure; 1227 if (nla_put(skb, TCA_HTB_INIT, sizeof 1227 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) || 1228 nla_put_u32(skb, TCA_HTB_DIRECT_Q 1228 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) 1229 goto nla_put_failure; 1229 goto nla_put_failure; 1230 if (q->offload && nla_put_flag(skb, T 1230 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) 1231 goto nla_put_failure; 1231 goto nla_put_failure; 1232 1232 1233 return nla_nest_end(skb, nest); 1233 return nla_nest_end(skb, nest); 1234 1234 1235 nla_put_failure: 1235 nla_put_failure: 1236 nla_nest_cancel(skb, nest); 1236 nla_nest_cancel(skb, nest); 1237 return -1; 1237 return -1; 1238 } 1238 } 1239 1239 1240 static int htb_dump_class(struct Qdisc *sch, 1240 static int htb_dump_class(struct Qdisc *sch, unsigned long arg, 1241 struct sk_buff *skb 1241 struct sk_buff *skb, struct tcmsg *tcm) 1242 { 1242 { 1243 struct htb_class *cl = (struct htb_cl 1243 struct htb_class *cl = (struct htb_class *)arg; 1244 struct htb_sched *q = qdisc_priv(sch) 1244 struct htb_sched *q = qdisc_priv(sch); 1245 struct nlattr *nest; 1245 struct nlattr *nest; 1246 struct tc_htb_opt opt; 1246 struct tc_htb_opt opt; 1247 1247 1248 /* Its safe to not acquire qdisc lock 1248 /* Its safe to not acquire qdisc lock. As we hold RTNL, 1249 * no change can happen on the class 1249 * no change can happen on the class parameters. 1250 */ 1250 */ 1251 tcm->tcm_parent = cl->parent ? cl->pa 1251 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; 1252 tcm->tcm_handle = cl->common.classid; 1252 tcm->tcm_handle = cl->common.classid; 1253 if (!cl->level && cl->leaf.q) 1253 if (!cl->level && cl->leaf.q) 1254 tcm->tcm_info = cl->leaf.q->h 1254 tcm->tcm_info = cl->leaf.q->handle; 1255 1255 1256 nest = nla_nest_start_noflag(skb, TCA 1256 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 1257 if (nest == NULL) 1257 if (nest == NULL) 1258 goto nla_put_failure; 1258 goto nla_put_failure; 1259 1259 1260 memset(&opt, 0, sizeof(opt)); 1260 memset(&opt, 0, sizeof(opt)); 1261 1261 1262 psched_ratecfg_getrate(&opt.rate, &cl 1262 psched_ratecfg_getrate(&opt.rate, &cl->rate); 1263 opt.buffer = PSCHED_NS2TICKS(cl->buff 1263 opt.buffer = PSCHED_NS2TICKS(cl->buffer); 1264 psched_ratecfg_getrate(&opt.ceil, &cl 1264 psched_ratecfg_getrate(&opt.ceil, &cl->ceil); 1265 opt.cbuffer = PSCHED_NS2TICKS(cl->cbu 1265 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); 1266 opt.quantum = cl->quantum; 1266 opt.quantum = cl->quantum; 1267 opt.prio = cl->prio; 1267 opt.prio = cl->prio; 1268 opt.level = cl->level; 1268 opt.level = cl->level; 1269 if (nla_put(skb, TCA_HTB_PARMS, sizeo 1269 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt)) 1270 goto nla_put_failure; 1270 goto nla_put_failure; 1271 if (q->offload && nla_put_flag(skb, T 1271 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) 1272 goto nla_put_failure; 1272 goto nla_put_failure; 1273 if ((cl->rate.rate_bytes_ps >= (1ULL 1273 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && 1274 nla_put_u64_64bit(skb, TCA_HTB_RA 1274 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps, 1275 TCA_HTB_PAD)) 1275 TCA_HTB_PAD)) 1276 goto nla_put_failure; 1276 goto nla_put_failure; 1277 if ((cl->ceil.rate_bytes_ps >= (1ULL 1277 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && 1278 nla_put_u64_64bit(skb, TCA_HTB_CE 1278 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps, 1279 TCA_HTB_PAD)) 1279 TCA_HTB_PAD)) 1280 goto nla_put_failure; 1280 goto nla_put_failure; 1281 1281 1282 return nla_nest_end(skb, nest); 1282 return nla_nest_end(skb, nest); 1283 1283 1284 nla_put_failure: 1284 nla_put_failure: 1285 nla_nest_cancel(skb, nest); 1285 nla_nest_cancel(skb, nest); 1286 return -1; 1286 return -1; 1287 } 1287 } 1288 1288 1289 static void htb_offload_aggregate_stats(struc 1289 static void htb_offload_aggregate_stats(struct htb_sched *q, 1290 struc 1290 struct htb_class *cl) 1291 { 1291 { 1292 u64 bytes = 0, packets = 0; 1292 u64 bytes = 0, packets = 0; 1293 struct htb_class *c; 1293 struct htb_class *c; 1294 unsigned int i; 1294 unsigned int i; 1295 1295 1296 gnet_stats_basic_sync_init(&cl->bstat 1296 gnet_stats_basic_sync_init(&cl->bstats); 1297 1297 1298 for (i = 0; i < q->clhash.hashsize; i 1298 for (i = 0; i < q->clhash.hashsize; i++) { 1299 hlist_for_each_entry(c, &q->c 1299 hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) { 1300 struct htb_class *p = 1300 struct htb_class *p = c; 1301 1301 1302 while (p && p->level 1302 while (p && p->level < cl->level) 1303 p = p->parent 1303 p = p->parent; 1304 1304 1305 if (p != cl) 1305 if (p != cl) 1306 continue; 1306 continue; 1307 1307 1308 bytes += u64_stats_re 1308 bytes += u64_stats_read(&c->bstats_bias.bytes); 1309 packets += u64_stats_ 1309 packets += u64_stats_read(&c->bstats_bias.packets); 1310 if (c->level == 0) { 1310 if (c->level == 0) { 1311 bytes += u64_ 1311 bytes += u64_stats_read(&c->leaf.q->bstats.bytes); 1312 packets += u6 1312 packets += u64_stats_read(&c->leaf.q->bstats.packets); 1313 } 1313 } 1314 } 1314 } 1315 } 1315 } 1316 _bstats_update(&cl->bstats, bytes, pa 1316 _bstats_update(&cl->bstats, bytes, packets); 1317 } 1317 } 1318 1318 1319 static int 1319 static int 1320 htb_dump_class_stats(struct Qdisc *sch, unsig 1320 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) 1321 { 1321 { 1322 struct htb_class *cl = (struct htb_cl 1322 struct htb_class *cl = (struct htb_class *)arg; 1323 struct htb_sched *q = qdisc_priv(sch) 1323 struct htb_sched *q = qdisc_priv(sch); 1324 struct gnet_stats_queue qs = { 1324 struct gnet_stats_queue qs = { 1325 .drops = cl->drops, 1325 .drops = cl->drops, 1326 .overlimits = cl->overlimits, 1326 .overlimits = cl->overlimits, 1327 }; 1327 }; 1328 __u32 qlen = 0; 1328 __u32 qlen = 0; 1329 1329 1330 if (!cl->level && cl->leaf.q) 1330 if (!cl->level && cl->leaf.q) 1331 qdisc_qstats_qlen_backlog(cl- 1331 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); 1332 1332 1333 cl->xstats.tokens = clamp_t(s64, PSCH 1333 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), 1334 INT_MIN, 1334 INT_MIN, INT_MAX); 1335 cl->xstats.ctokens = clamp_t(s64, PSC 1335 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), 1336 INT_MIN, 1336 INT_MIN, INT_MAX); 1337 1337 1338 if (q->offload) { 1338 if (q->offload) { 1339 if (!cl->level) { 1339 if (!cl->level) { 1340 if (cl->leaf.q) 1340 if (cl->leaf.q) 1341 cl->bstats = 1341 cl->bstats = cl->leaf.q->bstats; 1342 else 1342 else 1343 gnet_stats_ba 1343 gnet_stats_basic_sync_init(&cl->bstats); 1344 _bstats_update(&cl->b 1344 _bstats_update(&cl->bstats, 1345 u64_st 1345 u64_stats_read(&cl->bstats_bias.bytes), 1346 u64_st 1346 u64_stats_read(&cl->bstats_bias.packets)); 1347 } else { 1347 } else { 1348 htb_offload_aggregate 1348 htb_offload_aggregate_stats(q, cl); 1349 } 1349 } 1350 } 1350 } 1351 1351 1352 if (gnet_stats_copy_basic(d, NULL, &c 1352 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || 1353 gnet_stats_copy_rate_est(d, &cl-> 1353 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1354 gnet_stats_copy_queue(d, NULL, &q 1354 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0) 1355 return -1; 1355 return -1; 1356 1356 1357 return gnet_stats_copy_app(d, &cl->xs 1357 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); 1358 } 1358 } 1359 1359 1360 static struct netdev_queue * 1360 static struct netdev_queue * 1361 htb_select_queue(struct Qdisc *sch, struct tc 1361 htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm) 1362 { 1362 { 1363 struct net_device *dev = qdisc_dev(sc 1363 struct net_device *dev = qdisc_dev(sch); 1364 struct tc_htb_qopt_offload offload_op 1364 struct tc_htb_qopt_offload offload_opt; 1365 struct htb_sched *q = qdisc_priv(sch) 1365 struct htb_sched *q = qdisc_priv(sch); 1366 int err; 1366 int err; 1367 1367 1368 if (!q->offload) 1368 if (!q->offload) 1369 return sch->dev_queue; 1369 return sch->dev_queue; 1370 1370 1371 offload_opt = (struct tc_htb_qopt_off 1371 offload_opt = (struct tc_htb_qopt_offload) { 1372 .command = TC_HTB_LEAF_QUERY_ 1372 .command = TC_HTB_LEAF_QUERY_QUEUE, 1373 .classid = TC_H_MIN(tcm->tcm_ 1373 .classid = TC_H_MIN(tcm->tcm_parent), 1374 }; 1374 }; 1375 err = htb_offload(dev, &offload_opt); 1375 err = htb_offload(dev, &offload_opt); 1376 if (err || offload_opt.qid >= dev->nu 1376 if (err || offload_opt.qid >= dev->num_tx_queues) 1377 return NULL; 1377 return NULL; 1378 return netdev_get_tx_queue(dev, offlo 1378 return netdev_get_tx_queue(dev, offload_opt.qid); 1379 } 1379 } 1380 1380 1381 static struct Qdisc * 1381 static struct Qdisc * 1382 htb_graft_helper(struct netdev_queue *dev_que 1382 htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q) 1383 { 1383 { 1384 struct net_device *dev = dev_queue->d 1384 struct net_device *dev = dev_queue->dev; 1385 struct Qdisc *old_q; 1385 struct Qdisc *old_q; 1386 1386 1387 if (dev->flags & IFF_UP) 1387 if (dev->flags & IFF_UP) 1388 dev_deactivate(dev); 1388 dev_deactivate(dev); 1389 old_q = dev_graft_qdisc(dev_queue, ne 1389 old_q = dev_graft_qdisc(dev_queue, new_q); 1390 if (new_q) 1390 if (new_q) 1391 new_q->flags |= TCQ_F_ONETXQU 1391 new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1392 if (dev->flags & IFF_UP) 1392 if (dev->flags & IFF_UP) 1393 dev_activate(dev); 1393 dev_activate(dev); 1394 1394 1395 return old_q; 1395 return old_q; 1396 } 1396 } 1397 1397 1398 static struct netdev_queue *htb_offload_get_q 1398 static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl) 1399 { 1399 { 1400 struct netdev_queue *queue; 1400 struct netdev_queue *queue; 1401 1401 1402 queue = cl->leaf.offload_queue; 1402 queue = cl->leaf.offload_queue; 1403 if (!(cl->leaf.q->flags & TCQ_F_BUILT 1403 if (!(cl->leaf.q->flags & TCQ_F_BUILTIN)) 1404 WARN_ON(cl->leaf.q->dev_queue 1404 WARN_ON(cl->leaf.q->dev_queue != queue); 1405 1405 1406 return queue; 1406 return queue; 1407 } 1407 } 1408 1408 1409 static void htb_offload_move_qdisc(struct Qdi 1409 static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old, 1410 struct htb 1410 struct htb_class *cl_new, bool destroying) 1411 { 1411 { 1412 struct netdev_queue *queue_old, *queu 1412 struct netdev_queue *queue_old, *queue_new; 1413 struct net_device *dev = qdisc_dev(sc 1413 struct net_device *dev = qdisc_dev(sch); 1414 1414 1415 queue_old = htb_offload_get_queue(cl_ 1415 queue_old = htb_offload_get_queue(cl_old); 1416 queue_new = htb_offload_get_queue(cl_ 1416 queue_new = htb_offload_get_queue(cl_new); 1417 1417 1418 if (!destroying) { 1418 if (!destroying) { 1419 struct Qdisc *qdisc; 1419 struct Qdisc *qdisc; 1420 1420 1421 if (dev->flags & IFF_UP) 1421 if (dev->flags & IFF_UP) 1422 dev_deactivate(dev); 1422 dev_deactivate(dev); 1423 qdisc = dev_graft_qdisc(queue 1423 qdisc = dev_graft_qdisc(queue_old, NULL); 1424 WARN_ON(qdisc != cl_old->leaf 1424 WARN_ON(qdisc != cl_old->leaf.q); 1425 } 1425 } 1426 1426 1427 if (!(cl_old->leaf.q->flags & TCQ_F_B 1427 if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN)) 1428 cl_old->leaf.q->dev_queue = q 1428 cl_old->leaf.q->dev_queue = queue_new; 1429 cl_old->leaf.offload_queue = queue_ne 1429 cl_old->leaf.offload_queue = queue_new; 1430 1430 1431 if (!destroying) { 1431 if (!destroying) { 1432 struct Qdisc *qdisc; 1432 struct Qdisc *qdisc; 1433 1433 1434 qdisc = dev_graft_qdisc(queue 1434 qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q); 1435 if (dev->flags & IFF_UP) 1435 if (dev->flags & IFF_UP) 1436 dev_activate(dev); 1436 dev_activate(dev); 1437 WARN_ON(!(qdisc->flags & TCQ_ 1437 WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN)); 1438 } 1438 } 1439 } 1439 } 1440 1440 1441 static int htb_graft(struct Qdisc *sch, unsig 1441 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1442 struct Qdisc **old, stru 1442 struct Qdisc **old, struct netlink_ext_ack *extack) 1443 { 1443 { 1444 struct netdev_queue *dev_queue = sch- 1444 struct netdev_queue *dev_queue = sch->dev_queue; 1445 struct htb_class *cl = (struct htb_cl 1445 struct htb_class *cl = (struct htb_class *)arg; 1446 struct htb_sched *q = qdisc_priv(sch) 1446 struct htb_sched *q = qdisc_priv(sch); 1447 struct Qdisc *old_q; 1447 struct Qdisc *old_q; 1448 1448 1449 if (cl->level) 1449 if (cl->level) 1450 return -EINVAL; 1450 return -EINVAL; 1451 1451 1452 if (q->offload) 1452 if (q->offload) 1453 dev_queue = htb_offload_get_q 1453 dev_queue = htb_offload_get_queue(cl); 1454 1454 1455 if (!new) { 1455 if (!new) { 1456 new = qdisc_create_dflt(dev_q 1456 new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, 1457 cl->c 1457 cl->common.classid, extack); 1458 if (!new) 1458 if (!new) 1459 return -ENOBUFS; 1459 return -ENOBUFS; 1460 } 1460 } 1461 1461 1462 if (q->offload) { 1462 if (q->offload) { 1463 /* One ref for cl->leaf.q, th 1463 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ 1464 qdisc_refcount_inc(new); 1464 qdisc_refcount_inc(new); 1465 old_q = htb_graft_helper(dev_ 1465 old_q = htb_graft_helper(dev_queue, new); 1466 } 1466 } 1467 1467 1468 *old = qdisc_replace(sch, new, &cl->l 1468 *old = qdisc_replace(sch, new, &cl->leaf.q); 1469 1469 1470 if (q->offload) { 1470 if (q->offload) { 1471 WARN_ON(old_q != *old); 1471 WARN_ON(old_q != *old); 1472 qdisc_put(old_q); 1472 qdisc_put(old_q); 1473 } 1473 } 1474 1474 1475 return 0; 1475 return 0; 1476 } 1476 } 1477 1477 1478 static struct Qdisc *htb_leaf(struct Qdisc *s 1478 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) 1479 { 1479 { 1480 struct htb_class *cl = (struct htb_cl 1480 struct htb_class *cl = (struct htb_class *)arg; 1481 return !cl->level ? cl->leaf.q : NULL 1481 return !cl->level ? cl->leaf.q : NULL; 1482 } 1482 } 1483 1483 1484 static void htb_qlen_notify(struct Qdisc *sch 1484 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) 1485 { 1485 { 1486 struct htb_class *cl = (struct htb_cl 1486 struct htb_class *cl = (struct htb_class *)arg; 1487 1487 1488 htb_deactivate(qdisc_priv(sch), cl); 1488 htb_deactivate(qdisc_priv(sch), cl); 1489 } 1489 } 1490 1490 1491 static inline int htb_parent_last_child(struc 1491 static inline int htb_parent_last_child(struct htb_class *cl) 1492 { 1492 { 1493 if (!cl->parent) 1493 if (!cl->parent) 1494 /* the root class */ 1494 /* the root class */ 1495 return 0; 1495 return 0; 1496 if (cl->parent->children > 1) 1496 if (cl->parent->children > 1) 1497 /* not the last child */ 1497 /* not the last child */ 1498 return 0; 1498 return 0; 1499 return 1; 1499 return 1; 1500 } 1500 } 1501 1501 1502 static void htb_parent_to_leaf(struct Qdisc * 1502 static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl, 1503 struct Qdisc * 1503 struct Qdisc *new_q) 1504 { 1504 { 1505 struct htb_sched *q = qdisc_priv(sch) 1505 struct htb_sched *q = qdisc_priv(sch); 1506 struct htb_class *parent = cl->parent 1506 struct htb_class *parent = cl->parent; 1507 1507 1508 WARN_ON(cl->level || !cl->leaf.q || c 1508 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity); 1509 1509 1510 if (parent->cmode != HTB_CAN_SEND) 1510 if (parent->cmode != HTB_CAN_SEND) 1511 htb_safe_rb_erase(&parent->pq 1511 htb_safe_rb_erase(&parent->pq_node, 1512 &q->hlevel[ 1512 &q->hlevel[parent->level].wait_pq); 1513 1513 1514 parent->level = 0; 1514 parent->level = 0; 1515 memset(&parent->inner, 0, sizeof(pare 1515 memset(&parent->inner, 0, sizeof(parent->inner)); 1516 parent->leaf.q = new_q ? new_q : &noo 1516 parent->leaf.q = new_q ? new_q : &noop_qdisc; 1517 parent->tokens = parent->buffer; 1517 parent->tokens = parent->buffer; 1518 parent->ctokens = parent->cbuffer; 1518 parent->ctokens = parent->cbuffer; 1519 parent->t_c = ktime_get_ns(); 1519 parent->t_c = ktime_get_ns(); 1520 parent->cmode = HTB_CAN_SEND; 1520 parent->cmode = HTB_CAN_SEND; 1521 if (q->offload) 1521 if (q->offload) 1522 parent->leaf.offload_queue = 1522 parent->leaf.offload_queue = cl->leaf.offload_queue; 1523 } 1523 } 1524 1524 1525 static void htb_parent_to_leaf_offload(struct 1525 static void htb_parent_to_leaf_offload(struct Qdisc *sch, 1526 struct 1526 struct netdev_queue *dev_queue, 1527 struct 1527 struct Qdisc *new_q) 1528 { 1528 { 1529 struct Qdisc *old_q; 1529 struct Qdisc *old_q; 1530 1530 1531 /* One ref for cl->leaf.q, the other 1531 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ 1532 if (new_q) 1532 if (new_q) 1533 qdisc_refcount_inc(new_q); 1533 qdisc_refcount_inc(new_q); 1534 old_q = htb_graft_helper(dev_queue, n 1534 old_q = htb_graft_helper(dev_queue, new_q); 1535 WARN_ON(!(old_q->flags & TCQ_F_BUILTI 1535 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); 1536 } 1536 } 1537 1537 1538 static int htb_destroy_class_offload(struct Q 1538 static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl, 1539 bool las 1539 bool last_child, bool destroying, 1540 struct n 1540 struct netlink_ext_ack *extack) 1541 { 1541 { 1542 struct tc_htb_qopt_offload offload_op 1542 struct tc_htb_qopt_offload offload_opt; 1543 struct netdev_queue *dev_queue; 1543 struct netdev_queue *dev_queue; 1544 struct Qdisc *q = cl->leaf.q; 1544 struct Qdisc *q = cl->leaf.q; 1545 struct Qdisc *old; 1545 struct Qdisc *old; 1546 int err; 1546 int err; 1547 1547 1548 if (cl->level) 1548 if (cl->level) 1549 return -EINVAL; 1549 return -EINVAL; 1550 1550 1551 WARN_ON(!q); 1551 WARN_ON(!q); 1552 dev_queue = htb_offload_get_queue(cl) 1552 dev_queue = htb_offload_get_queue(cl); 1553 /* When destroying, caller qdisc_graf 1553 /* When destroying, caller qdisc_graft grafts the new qdisc and invokes 1554 * qdisc_put for the qdisc being dest 1554 * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload 1555 * does not need to graft or qdisc_pu 1555 * does not need to graft or qdisc_put the qdisc being destroyed. 1556 */ 1556 */ 1557 if (!destroying) { 1557 if (!destroying) { 1558 old = htb_graft_helper(dev_qu 1558 old = htb_graft_helper(dev_queue, NULL); 1559 /* Last qdisc grafted should 1559 /* Last qdisc grafted should be the same as cl->leaf.q when 1560 * calling htb_delete. 1560 * calling htb_delete. 1561 */ 1561 */ 1562 WARN_ON(old != q); 1562 WARN_ON(old != q); 1563 } 1563 } 1564 1564 1565 if (cl->parent) { 1565 if (cl->parent) { 1566 _bstats_update(&cl->parent->b 1566 _bstats_update(&cl->parent->bstats_bias, 1567 u64_stats_read 1567 u64_stats_read(&q->bstats.bytes), 1568 u64_stats_read 1568 u64_stats_read(&q->bstats.packets)); 1569 } 1569 } 1570 1570 1571 offload_opt = (struct tc_htb_qopt_off 1571 offload_opt = (struct tc_htb_qopt_offload) { 1572 .command = !last_child ? TC_H 1572 .command = !last_child ? TC_HTB_LEAF_DEL : 1573 destroying ? TC_HT 1573 destroying ? TC_HTB_LEAF_DEL_LAST_FORCE : 1574 TC_HTB_LEAF_DEL_LA 1574 TC_HTB_LEAF_DEL_LAST, 1575 .classid = cl->common.classid 1575 .classid = cl->common.classid, 1576 .extack = extack, 1576 .extack = extack, 1577 }; 1577 }; 1578 err = htb_offload(qdisc_dev(sch), &of 1578 err = htb_offload(qdisc_dev(sch), &offload_opt); 1579 1579 1580 if (!destroying) { 1580 if (!destroying) { 1581 if (!err) 1581 if (!err) 1582 qdisc_put(old); 1582 qdisc_put(old); 1583 else 1583 else 1584 htb_graft_helper(dev_ 1584 htb_graft_helper(dev_queue, old); 1585 } 1585 } 1586 1586 1587 if (last_child) 1587 if (last_child) 1588 return err; 1588 return err; 1589 1589 1590 if (!err && offload_opt.classid != TC 1590 if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) { 1591 u32 classid = TC_H_MAJ(sch->h 1591 u32 classid = TC_H_MAJ(sch->handle) | 1592 TC_H_MIN(offloa 1592 TC_H_MIN(offload_opt.classid); 1593 struct htb_class *moved_cl = 1593 struct htb_class *moved_cl = htb_find(classid, sch); 1594 1594 1595 htb_offload_move_qdisc(sch, m 1595 htb_offload_move_qdisc(sch, moved_cl, cl, destroying); 1596 } 1596 } 1597 1597 1598 return err; 1598 return err; 1599 } 1599 } 1600 1600 1601 static void htb_destroy_class(struct Qdisc *s 1601 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) 1602 { 1602 { 1603 if (!cl->level) { 1603 if (!cl->level) { 1604 WARN_ON(!cl->leaf.q); 1604 WARN_ON(!cl->leaf.q); 1605 qdisc_put(cl->leaf.q); 1605 qdisc_put(cl->leaf.q); 1606 } 1606 } 1607 gen_kill_estimator(&cl->rate_est); 1607 gen_kill_estimator(&cl->rate_est); 1608 tcf_block_put(cl->block); 1608 tcf_block_put(cl->block); 1609 kfree(cl); 1609 kfree(cl); 1610 } 1610 } 1611 1611 1612 static void htb_destroy(struct Qdisc *sch) 1612 static void htb_destroy(struct Qdisc *sch) 1613 { 1613 { 1614 struct net_device *dev = qdisc_dev(sc 1614 struct net_device *dev = qdisc_dev(sch); 1615 struct tc_htb_qopt_offload offload_op 1615 struct tc_htb_qopt_offload offload_opt; 1616 struct htb_sched *q = qdisc_priv(sch) 1616 struct htb_sched *q = qdisc_priv(sch); 1617 struct hlist_node *next; 1617 struct hlist_node *next; 1618 bool nonempty, changed; 1618 bool nonempty, changed; 1619 struct htb_class *cl; 1619 struct htb_class *cl; 1620 unsigned int i; 1620 unsigned int i; 1621 1621 1622 cancel_work_sync(&q->work); 1622 cancel_work_sync(&q->work); 1623 qdisc_watchdog_cancel(&q->watchdog); 1623 qdisc_watchdog_cancel(&q->watchdog); 1624 /* This line used to be after htb_des 1624 /* This line used to be after htb_destroy_class call below 1625 * and surprisingly it worked in 2.4. 1625 * and surprisingly it worked in 2.4. But it must precede it 1626 * because filter need its target cla 1626 * because filter need its target class alive to be able to call 1627 * unbind_filter on it (without Oops) 1627 * unbind_filter on it (without Oops). 1628 */ 1628 */ 1629 tcf_block_put(q->block); 1629 tcf_block_put(q->block); 1630 1630 1631 for (i = 0; i < q->clhash.hashsize; i 1631 for (i = 0; i < q->clhash.hashsize; i++) { 1632 hlist_for_each_entry(cl, &q-> 1632 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 1633 tcf_block_put(cl->blo 1633 tcf_block_put(cl->block); 1634 cl->block = NULL; 1634 cl->block = NULL; 1635 } 1635 } 1636 } 1636 } 1637 1637 1638 do { 1638 do { 1639 nonempty = false; 1639 nonempty = false; 1640 changed = false; 1640 changed = false; 1641 for (i = 0; i < q->clhash.has 1641 for (i = 0; i < q->clhash.hashsize; i++) { 1642 hlist_for_each_entry_ 1642 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], 1643 1643 common.hnode) { 1644 bool last_chi 1644 bool last_child; 1645 1645 1646 if (!q->offlo 1646 if (!q->offload) { 1647 htb_d 1647 htb_destroy_class(sch, cl); 1648 conti 1648 continue; 1649 } 1649 } 1650 1650 1651 nonempty = tr 1651 nonempty = true; 1652 1652 1653 if (cl->level 1653 if (cl->level) 1654 conti 1654 continue; 1655 1655 1656 changed = tru 1656 changed = true; 1657 1657 1658 last_child = 1658 last_child = htb_parent_last_child(cl); 1659 htb_destroy_c 1659 htb_destroy_class_offload(sch, cl, last_child, 1660 1660 true, NULL); 1661 qdisc_class_h 1661 qdisc_class_hash_remove(&q->clhash, 1662 1662 &cl->common); 1663 if (cl->paren 1663 if (cl->parent) 1664 cl->p 1664 cl->parent->children--; 1665 if (last_chil 1665 if (last_child) 1666 htb_p 1666 htb_parent_to_leaf(sch, cl, NULL); 1667 htb_destroy_c 1667 htb_destroy_class(sch, cl); 1668 } 1668 } 1669 } 1669 } 1670 } while (changed); 1670 } while (changed); 1671 WARN_ON(nonempty); 1671 WARN_ON(nonempty); 1672 1672 1673 qdisc_class_hash_destroy(&q->clhash); 1673 qdisc_class_hash_destroy(&q->clhash); 1674 __qdisc_reset_queue(&q->direct_queue) 1674 __qdisc_reset_queue(&q->direct_queue); 1675 1675 1676 if (q->offload) { 1676 if (q->offload) { 1677 offload_opt = (struct tc_htb_ 1677 offload_opt = (struct tc_htb_qopt_offload) { 1678 .command = TC_HTB_DES 1678 .command = TC_HTB_DESTROY, 1679 }; 1679 }; 1680 htb_offload(dev, &offload_opt 1680 htb_offload(dev, &offload_opt); 1681 } 1681 } 1682 1682 1683 if (!q->direct_qdiscs) 1683 if (!q->direct_qdiscs) 1684 return; 1684 return; 1685 for (i = 0; i < q->num_direct_qdiscs 1685 for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++) 1686 qdisc_put(q->direct_qdiscs[i] 1686 qdisc_put(q->direct_qdiscs[i]); 1687 kfree(q->direct_qdiscs); 1687 kfree(q->direct_qdiscs); 1688 } 1688 } 1689 1689 1690 static int htb_delete(struct Qdisc *sch, unsi 1690 static int htb_delete(struct Qdisc *sch, unsigned long arg, 1691 struct netlink_ext_ack 1691 struct netlink_ext_ack *extack) 1692 { 1692 { 1693 struct htb_sched *q = qdisc_priv(sch) 1693 struct htb_sched *q = qdisc_priv(sch); 1694 struct htb_class *cl = (struct htb_cl 1694 struct htb_class *cl = (struct htb_class *)arg; 1695 struct Qdisc *new_q = NULL; 1695 struct Qdisc *new_q = NULL; 1696 int last_child = 0; 1696 int last_child = 0; 1697 int err; 1697 int err; 1698 1698 1699 /* TODO: why don't allow to delete su 1699 /* TODO: why don't allow to delete subtree ? references ? does 1700 * tc subsys guarantee us that in htb 1700 * tc subsys guarantee us that in htb_destroy it holds no class 1701 * refs so that we can remove childre 1701 * refs so that we can remove children safely there ? 1702 */ 1702 */ 1703 if (cl->children || qdisc_class_in_us 1703 if (cl->children || qdisc_class_in_use(&cl->common)) { 1704 NL_SET_ERR_MSG(extack, "HTB c 1704 NL_SET_ERR_MSG(extack, "HTB class in use"); 1705 return -EBUSY; 1705 return -EBUSY; 1706 } 1706 } 1707 1707 1708 if (!cl->level && htb_parent_last_chi 1708 if (!cl->level && htb_parent_last_child(cl)) 1709 last_child = 1; 1709 last_child = 1; 1710 1710 1711 if (q->offload) { 1711 if (q->offload) { 1712 err = htb_destroy_class_offlo 1712 err = htb_destroy_class_offload(sch, cl, last_child, false, 1713 1713 extack); 1714 if (err) 1714 if (err) 1715 return err; 1715 return err; 1716 } 1716 } 1717 1717 1718 if (last_child) { 1718 if (last_child) { 1719 struct netdev_queue *dev_queu 1719 struct netdev_queue *dev_queue = sch->dev_queue; 1720 1720 1721 if (q->offload) 1721 if (q->offload) 1722 dev_queue = htb_offlo 1722 dev_queue = htb_offload_get_queue(cl); 1723 1723 1724 new_q = qdisc_create_dflt(dev 1724 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, 1725 cl- 1725 cl->parent->common.classid, 1726 NUL 1726 NULL); 1727 if (q->offload) 1727 if (q->offload) 1728 htb_parent_to_leaf_of 1728 htb_parent_to_leaf_offload(sch, dev_queue, new_q); 1729 } 1729 } 1730 1730 1731 sch_tree_lock(sch); 1731 sch_tree_lock(sch); 1732 1732 1733 if (!cl->level) 1733 if (!cl->level) 1734 qdisc_purge_queue(cl->leaf.q) 1734 qdisc_purge_queue(cl->leaf.q); 1735 1735 1736 /* delete from hash and active; remai 1736 /* delete from hash and active; remainder in destroy_class */ 1737 qdisc_class_hash_remove(&q->clhash, & 1737 qdisc_class_hash_remove(&q->clhash, &cl->common); 1738 if (cl->parent) 1738 if (cl->parent) 1739 cl->parent->children--; 1739 cl->parent->children--; 1740 1740 1741 if (cl->prio_activity) 1741 if (cl->prio_activity) 1742 htb_deactivate(q, cl); 1742 htb_deactivate(q, cl); 1743 1743 1744 if (cl->cmode != HTB_CAN_SEND) 1744 if (cl->cmode != HTB_CAN_SEND) 1745 htb_safe_rb_erase(&cl->pq_nod 1745 htb_safe_rb_erase(&cl->pq_node, 1746 &q->hlevel[ 1746 &q->hlevel[cl->level].wait_pq); 1747 1747 1748 if (last_child) 1748 if (last_child) 1749 htb_parent_to_leaf(sch, cl, n 1749 htb_parent_to_leaf(sch, cl, new_q); 1750 1750 1751 sch_tree_unlock(sch); 1751 sch_tree_unlock(sch); 1752 1752 1753 htb_destroy_class(sch, cl); 1753 htb_destroy_class(sch, cl); 1754 return 0; 1754 return 0; 1755 } 1755 } 1756 1756 1757 static int htb_change_class(struct Qdisc *sch 1757 static int htb_change_class(struct Qdisc *sch, u32 classid, 1758 u32 parentid, str 1758 u32 parentid, struct nlattr **tca, 1759 unsigned long *ar 1759 unsigned long *arg, struct netlink_ext_ack *extack) 1760 { 1760 { 1761 int err = -EINVAL; 1761 int err = -EINVAL; 1762 struct htb_sched *q = qdisc_priv(sch) 1762 struct htb_sched *q = qdisc_priv(sch); 1763 struct htb_class *cl = (struct htb_cl 1763 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1764 struct tc_htb_qopt_offload offload_op 1764 struct tc_htb_qopt_offload offload_opt; 1765 struct nlattr *opt = tca[TCA_OPTIONS] 1765 struct nlattr *opt = tca[TCA_OPTIONS]; 1766 struct nlattr *tb[TCA_HTB_MAX + 1]; 1766 struct nlattr *tb[TCA_HTB_MAX + 1]; 1767 struct Qdisc *parent_qdisc = NULL; 1767 struct Qdisc *parent_qdisc = NULL; 1768 struct netdev_queue *dev_queue; 1768 struct netdev_queue *dev_queue; 1769 struct tc_htb_opt *hopt; 1769 struct tc_htb_opt *hopt; 1770 u64 rate64, ceil64; 1770 u64 rate64, ceil64; 1771 int warn = 0; 1771 int warn = 0; 1772 1772 1773 /* extract all subattrs from opt attr 1773 /* extract all subattrs from opt attr */ 1774 if (!opt) 1774 if (!opt) 1775 goto failure; 1775 goto failure; 1776 1776 1777 err = nla_parse_nested_deprecated(tb, 1777 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy, 1778 ext 1778 extack); 1779 if (err < 0) 1779 if (err < 0) 1780 goto failure; 1780 goto failure; 1781 1781 1782 err = -EINVAL; 1782 err = -EINVAL; 1783 if (tb[TCA_HTB_PARMS] == NULL) 1783 if (tb[TCA_HTB_PARMS] == NULL) 1784 goto failure; 1784 goto failure; 1785 1785 1786 parent = parentid == TC_H_ROOT ? NULL 1786 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); 1787 1787 1788 hopt = nla_data(tb[TCA_HTB_PARMS]); 1788 hopt = nla_data(tb[TCA_HTB_PARMS]); 1789 if (!hopt->rate.rate || !hopt->ceil.r 1789 if (!hopt->rate.rate || !hopt->ceil.rate) 1790 goto failure; 1790 goto failure; 1791 1791 1792 if (q->offload) { 1792 if (q->offload) { 1793 /* Options not supported by t 1793 /* Options not supported by the offload. */ 1794 if (hopt->rate.overhead || ho 1794 if (hopt->rate.overhead || hopt->ceil.overhead) { 1795 NL_SET_ERR_MSG(extack 1795 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the overhead parameter"); 1796 goto failure; 1796 goto failure; 1797 } 1797 } 1798 if (hopt->rate.mpu || hopt->c 1798 if (hopt->rate.mpu || hopt->ceil.mpu) { 1799 NL_SET_ERR_MSG(extack 1799 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the mpu parameter"); 1800 goto failure; 1800 goto failure; 1801 } 1801 } 1802 } 1802 } 1803 1803 1804 /* Keeping backward compatible with r 1804 /* Keeping backward compatible with rate_table based iproute2 tc */ 1805 if (hopt->rate.linklayer == TC_LINKLA 1805 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) 1806 qdisc_put_rtab(qdisc_get_rtab 1806 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB], 1807 1807 NULL)); 1808 1808 1809 if (hopt->ceil.linklayer == TC_LINKLA 1809 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) 1810 qdisc_put_rtab(qdisc_get_rtab 1810 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB], 1811 1811 NULL)); 1812 1812 1813 rate64 = tb[TCA_HTB_RATE64] ? nla_get 1813 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0; 1814 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get 1814 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0; 1815 1815 1816 if (!cl) { /* new class 1816 if (!cl) { /* new class */ 1817 struct net_device *dev = qdis 1817 struct net_device *dev = qdisc_dev(sch); 1818 struct Qdisc *new_q, *old_q; 1818 struct Qdisc *new_q, *old_q; 1819 int prio; 1819 int prio; 1820 struct { 1820 struct { 1821 struct nlattr 1821 struct nlattr nla; 1822 struct gnet_estimator 1822 struct gnet_estimator opt; 1823 } est = { 1823 } est = { 1824 .nla = { 1824 .nla = { 1825 .nla_len 1825 .nla_len = nla_attr_size(sizeof(est.opt)), 1826 .nla_type 1826 .nla_type = TCA_RATE, 1827 }, 1827 }, 1828 .opt = { 1828 .opt = { 1829 /* 4s interva 1829 /* 4s interval, 16s averaging constant */ 1830 .interval 1830 .interval = 2, 1831 .ewma_log 1831 .ewma_log = 2, 1832 }, 1832 }, 1833 }; 1833 }; 1834 1834 1835 /* check for valid classid */ 1835 /* check for valid classid */ 1836 if (!classid || TC_H_MAJ(clas 1836 if (!classid || TC_H_MAJ(classid ^ sch->handle) || 1837 htb_find(classid, sch)) 1837 htb_find(classid, sch)) 1838 goto failure; 1838 goto failure; 1839 1839 1840 /* check maximal depth */ 1840 /* check maximal depth */ 1841 if (parent && parent->parent 1841 if (parent && parent->parent && parent->parent->level < 2) { 1842 NL_SET_ERR_MSG_MOD(ex 1842 NL_SET_ERR_MSG_MOD(extack, "tree is too deep"); 1843 goto failure; 1843 goto failure; 1844 } 1844 } 1845 err = -ENOBUFS; 1845 err = -ENOBUFS; 1846 cl = kzalloc(sizeof(*cl), GFP 1846 cl = kzalloc(sizeof(*cl), GFP_KERNEL); 1847 if (!cl) 1847 if (!cl) 1848 goto failure; 1848 goto failure; 1849 1849 1850 gnet_stats_basic_sync_init(&c 1850 gnet_stats_basic_sync_init(&cl->bstats); 1851 gnet_stats_basic_sync_init(&c 1851 gnet_stats_basic_sync_init(&cl->bstats_bias); 1852 1852 1853 err = tcf_block_get(&cl->bloc 1853 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); 1854 if (err) { 1854 if (err) { 1855 kfree(cl); 1855 kfree(cl); 1856 goto failure; 1856 goto failure; 1857 } 1857 } 1858 if (htb_rate_est || tca[TCA_R 1858 if (htb_rate_est || tca[TCA_RATE]) { 1859 err = gen_new_estimat 1859 err = gen_new_estimator(&cl->bstats, NULL, 1860 1860 &cl->rate_est, 1861 1861 NULL, 1862 1862 true, 1863 1863 tca[TCA_RATE] ? : &est.nla); 1864 if (err) 1864 if (err) 1865 goto err_bloc 1865 goto err_block_put; 1866 } 1866 } 1867 1867 1868 cl->children = 0; 1868 cl->children = 0; 1869 RB_CLEAR_NODE(&cl->pq_node); 1869 RB_CLEAR_NODE(&cl->pq_node); 1870 1870 1871 for (prio = 0; prio < TC_HTB_ 1871 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++) 1872 RB_CLEAR_NODE(&cl->no 1872 RB_CLEAR_NODE(&cl->node[prio]); 1873 1873 1874 cl->common.classid = classid; 1874 cl->common.classid = classid; 1875 1875 1876 /* Make sure nothing interrup 1876 /* Make sure nothing interrupts us in between of two 1877 * ndo_setup_tc calls. 1877 * ndo_setup_tc calls. 1878 */ 1878 */ 1879 ASSERT_RTNL(); 1879 ASSERT_RTNL(); 1880 1880 1881 /* create leaf qdisc early be 1881 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) 1882 * so that can't be used insi 1882 * so that can't be used inside of sch_tree_lock 1883 * -- thanks to Karlis Peisen 1883 * -- thanks to Karlis Peisenieks 1884 */ 1884 */ 1885 if (!q->offload) { 1885 if (!q->offload) { 1886 dev_queue = sch->dev_ 1886 dev_queue = sch->dev_queue; 1887 } else if (!(parent && !paren 1887 } else if (!(parent && !parent->level)) { 1888 /* Assign a dev_queue 1888 /* Assign a dev_queue to this classid. */ 1889 offload_opt = (struct 1889 offload_opt = (struct tc_htb_qopt_offload) { 1890 .command = TC 1890 .command = TC_HTB_LEAF_ALLOC_QUEUE, 1891 .classid = cl 1891 .classid = cl->common.classid, 1892 .parent_class 1892 .parent_classid = parent ? 1893 TC_H_ 1893 TC_H_MIN(parent->common.classid) : 1894 TC_HT 1894 TC_HTB_CLASSID_ROOT, 1895 .rate = max_t 1895 .rate = max_t(u64, hopt->rate.rate, rate64), 1896 .ceil = max_t 1896 .ceil = max_t(u64, hopt->ceil.rate, ceil64), 1897 .prio = hopt- 1897 .prio = hopt->prio, 1898 .quantum = ho 1898 .quantum = hopt->quantum, 1899 .extack = ext 1899 .extack = extack, 1900 }; 1900 }; 1901 err = htb_offload(dev 1901 err = htb_offload(dev, &offload_opt); 1902 if (err) { 1902 if (err) { 1903 NL_SET_ERR_MS 1903 NL_SET_ERR_MSG_WEAK(extack, 1904 1904 "Failed to offload TC_HTB_LEAF_ALLOC_QUEUE"); 1905 goto err_kill 1905 goto err_kill_estimator; 1906 } 1906 } 1907 dev_queue = netdev_ge 1907 dev_queue = netdev_get_tx_queue(dev, offload_opt.qid); 1908 } else { /* First child. */ 1908 } else { /* First child. */ 1909 dev_queue = htb_offlo 1909 dev_queue = htb_offload_get_queue(parent); 1910 old_q = htb_graft_hel 1910 old_q = htb_graft_helper(dev_queue, NULL); 1911 WARN_ON(old_q != pare 1911 WARN_ON(old_q != parent->leaf.q); 1912 offload_opt = (struct 1912 offload_opt = (struct tc_htb_qopt_offload) { 1913 .command = TC 1913 .command = TC_HTB_LEAF_TO_INNER, 1914 .classid = cl 1914 .classid = cl->common.classid, 1915 .parent_class 1915 .parent_classid = 1916 TC_H_ 1916 TC_H_MIN(parent->common.classid), 1917 .rate = max_t 1917 .rate = max_t(u64, hopt->rate.rate, rate64), 1918 .ceil = max_t 1918 .ceil = max_t(u64, hopt->ceil.rate, ceil64), 1919 .prio = hopt- 1919 .prio = hopt->prio, 1920 .quantum = ho 1920 .quantum = hopt->quantum, 1921 .extack = ext 1921 .extack = extack, 1922 }; 1922 }; 1923 err = htb_offload(dev 1923 err = htb_offload(dev, &offload_opt); 1924 if (err) { 1924 if (err) { 1925 NL_SET_ERR_MS 1925 NL_SET_ERR_MSG_WEAK(extack, 1926 1926 "Failed to offload TC_HTB_LEAF_TO_INNER"); 1927 htb_graft_hel 1927 htb_graft_helper(dev_queue, old_q); 1928 goto err_kill 1928 goto err_kill_estimator; 1929 } 1929 } 1930 _bstats_update(&paren 1930 _bstats_update(&parent->bstats_bias, 1931 u64_st 1931 u64_stats_read(&old_q->bstats.bytes), 1932 u64_st 1932 u64_stats_read(&old_q->bstats.packets)); 1933 qdisc_put(old_q); 1933 qdisc_put(old_q); 1934 } 1934 } 1935 new_q = qdisc_create_dflt(dev 1935 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, 1936 cla 1936 classid, NULL); 1937 if (q->offload) { 1937 if (q->offload) { 1938 /* One ref for cl->le 1938 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ 1939 if (new_q) 1939 if (new_q) 1940 qdisc_refcoun 1940 qdisc_refcount_inc(new_q); 1941 old_q = htb_graft_hel 1941 old_q = htb_graft_helper(dev_queue, new_q); 1942 /* No qdisc_put neede 1942 /* No qdisc_put needed. */ 1943 WARN_ON(!(old_q->flag 1943 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); 1944 } 1944 } 1945 sch_tree_lock(sch); 1945 sch_tree_lock(sch); 1946 if (parent && !parent->level) 1946 if (parent && !parent->level) { 1947 /* turn parent into i 1947 /* turn parent into inner node */ 1948 qdisc_purge_queue(par 1948 qdisc_purge_queue(parent->leaf.q); 1949 parent_qdisc = parent 1949 parent_qdisc = parent->leaf.q; 1950 if (parent->prio_acti 1950 if (parent->prio_activity) 1951 htb_deactivat 1951 htb_deactivate(q, parent); 1952 1952 1953 /* remove from evt li 1953 /* remove from evt list because of level change */ 1954 if (parent->cmode != 1954 if (parent->cmode != HTB_CAN_SEND) { 1955 htb_safe_rb_e 1955 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq); 1956 parent->cmode 1956 parent->cmode = HTB_CAN_SEND; 1957 } 1957 } 1958 parent->level = (pare 1958 parent->level = (parent->parent ? parent->parent->level 1959 : TC 1959 : TC_HTB_MAXDEPTH) - 1; 1960 memset(&parent->inner 1960 memset(&parent->inner, 0, sizeof(parent->inner)); 1961 } 1961 } 1962 1962 1963 /* leaf (we) needs elementary 1963 /* leaf (we) needs elementary qdisc */ 1964 cl->leaf.q = new_q ? new_q : 1964 cl->leaf.q = new_q ? new_q : &noop_qdisc; 1965 if (q->offload) 1965 if (q->offload) 1966 cl->leaf.offload_queu 1966 cl->leaf.offload_queue = dev_queue; 1967 1967 1968 cl->parent = parent; 1968 cl->parent = parent; 1969 1969 1970 /* set class to be in HTB_CAN 1970 /* set class to be in HTB_CAN_SEND state */ 1971 cl->tokens = PSCHED_TICKS2NS( 1971 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); 1972 cl->ctokens = PSCHED_TICKS2NS 1972 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); 1973 cl->mbuffer = 60ULL * NSEC_PE 1973 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ 1974 cl->t_c = ktime_get_ns(); 1974 cl->t_c = ktime_get_ns(); 1975 cl->cmode = HTB_CAN_SEND; 1975 cl->cmode = HTB_CAN_SEND; 1976 1976 1977 /* attach to the hash list an 1977 /* attach to the hash list and parent's family */ 1978 qdisc_class_hash_insert(&q->c 1978 qdisc_class_hash_insert(&q->clhash, &cl->common); 1979 if (parent) 1979 if (parent) 1980 parent->children++; 1980 parent->children++; 1981 if (cl->leaf.q != &noop_qdisc 1981 if (cl->leaf.q != &noop_qdisc) 1982 qdisc_hash_add(cl->le 1982 qdisc_hash_add(cl->leaf.q, true); 1983 } else { 1983 } else { 1984 if (tca[TCA_RATE]) { 1984 if (tca[TCA_RATE]) { 1985 err = gen_replace_est 1985 err = gen_replace_estimator(&cl->bstats, NULL, 1986 1986 &cl->rate_est, 1987 1987 NULL, 1988 1988 true, 1989 1989 tca[TCA_RATE]); 1990 if (err) 1990 if (err) 1991 return err; 1991 return err; 1992 } 1992 } 1993 1993 1994 if (q->offload) { 1994 if (q->offload) { 1995 struct net_device *de 1995 struct net_device *dev = qdisc_dev(sch); 1996 1996 1997 offload_opt = (struct 1997 offload_opt = (struct tc_htb_qopt_offload) { 1998 .command = TC 1998 .command = TC_HTB_NODE_MODIFY, 1999 .classid = cl 1999 .classid = cl->common.classid, 2000 .rate = max_t 2000 .rate = max_t(u64, hopt->rate.rate, rate64), 2001 .ceil = max_t 2001 .ceil = max_t(u64, hopt->ceil.rate, ceil64), 2002 .prio = hopt- 2002 .prio = hopt->prio, 2003 .quantum = ho 2003 .quantum = hopt->quantum, 2004 .extack = ext 2004 .extack = extack, 2005 }; 2005 }; 2006 err = htb_offload(dev 2006 err = htb_offload(dev, &offload_opt); 2007 if (err) 2007 if (err) 2008 /* Estimator 2008 /* Estimator was replaced, and rollback may fail 2009 * as well, s 2009 * as well, so we don't try to recover it, and 2010 * the estima 2010 * the estimator won't work property with the 2011 * offload an 2011 * offload anyway, because bstats are updated 2012 * only when 2012 * only when the stats are queried. 2013 */ 2013 */ 2014 return err; 2014 return err; 2015 } 2015 } 2016 2016 2017 sch_tree_lock(sch); 2017 sch_tree_lock(sch); 2018 } 2018 } 2019 2019 2020 psched_ratecfg_precompute(&cl->rate, 2020 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); 2021 psched_ratecfg_precompute(&cl->ceil, 2021 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); 2022 2022 2023 /* it used to be a nasty bug here, we 2023 /* it used to be a nasty bug here, we have to check that node 2024 * is really leaf before changing cl- 2024 * is really leaf before changing cl->leaf ! 2025 */ 2025 */ 2026 if (!cl->level) { 2026 if (!cl->level) { 2027 u64 quantum = cl->rate.rate_b 2027 u64 quantum = cl->rate.rate_bytes_ps; 2028 2028 2029 do_div(quantum, q->rate2quant 2029 do_div(quantum, q->rate2quantum); 2030 cl->quantum = min_t(u64, quan 2030 cl->quantum = min_t(u64, quantum, INT_MAX); 2031 2031 2032 if (!hopt->quantum && cl->qua 2032 if (!hopt->quantum && cl->quantum < 1000) { 2033 warn = -1; 2033 warn = -1; 2034 cl->quantum = 1000; 2034 cl->quantum = 1000; 2035 } 2035 } 2036 if (!hopt->quantum && cl->qua 2036 if (!hopt->quantum && cl->quantum > 200000) { 2037 warn = 1; 2037 warn = 1; 2038 cl->quantum = 200000; 2038 cl->quantum = 200000; 2039 } 2039 } 2040 if (hopt->quantum) 2040 if (hopt->quantum) 2041 cl->quantum = hopt->q 2041 cl->quantum = hopt->quantum; 2042 if ((cl->prio = hopt->prio) > 2042 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO) 2043 cl->prio = TC_HTB_NUM 2043 cl->prio = TC_HTB_NUMPRIO - 1; 2044 } 2044 } 2045 2045 2046 cl->buffer = PSCHED_TICKS2NS(hopt->bu 2046 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); 2047 cl->cbuffer = PSCHED_TICKS2NS(hopt->c 2047 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); 2048 2048 2049 sch_tree_unlock(sch); 2049 sch_tree_unlock(sch); 2050 qdisc_put(parent_qdisc); 2050 qdisc_put(parent_qdisc); 2051 2051 2052 if (warn) 2052 if (warn) 2053 NL_SET_ERR_MSG_FMT_MOD(extack 2053 NL_SET_ERR_MSG_FMT_MOD(extack, 2054 "quant 2054 "quantum of class %X is %s. Consider r2q change.", 2055 cl->co 2055 cl->common.classid, (warn == -1 ? "small" : "big")); 2056 2056 2057 qdisc_class_hash_grow(sch, &q->clhash 2057 qdisc_class_hash_grow(sch, &q->clhash); 2058 2058 2059 *arg = (unsigned long)cl; 2059 *arg = (unsigned long)cl; 2060 return 0; 2060 return 0; 2061 2061 2062 err_kill_estimator: 2062 err_kill_estimator: 2063 gen_kill_estimator(&cl->rate_est); 2063 gen_kill_estimator(&cl->rate_est); 2064 err_block_put: 2064 err_block_put: 2065 tcf_block_put(cl->block); 2065 tcf_block_put(cl->block); 2066 kfree(cl); 2066 kfree(cl); 2067 failure: 2067 failure: 2068 return err; 2068 return err; 2069 } 2069 } 2070 2070 2071 static struct tcf_block *htb_tcf_block(struct 2071 static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg, 2072 struct 2072 struct netlink_ext_ack *extack) 2073 { 2073 { 2074 struct htb_sched *q = qdisc_priv(sch) 2074 struct htb_sched *q = qdisc_priv(sch); 2075 struct htb_class *cl = (struct htb_cl 2075 struct htb_class *cl = (struct htb_class *)arg; 2076 2076 2077 return cl ? cl->block : q->block; 2077 return cl ? cl->block : q->block; 2078 } 2078 } 2079 2079 2080 static unsigned long htb_bind_filter(struct Q 2080 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, 2081 u32 clas 2081 u32 classid) 2082 { 2082 { 2083 struct htb_class *cl = htb_find(class 2083 struct htb_class *cl = htb_find(classid, sch); 2084 2084 2085 /*if (cl && !cl->level) return 0; 2085 /*if (cl && !cl->level) return 0; 2086 * The line above used to be there to 2086 * The line above used to be there to prevent attaching filters to 2087 * leaves. But at least tc_index filt 2087 * leaves. But at least tc_index filter uses this just to get class 2088 * for other reasons so that we have 2088 * for other reasons so that we have to allow for it. 2089 * ---- 2089 * ---- 2090 * 19.6.2002 As Werner explained it i 2090 * 19.6.2002 As Werner explained it is ok - bind filter is just 2091 * another way to "lock" the class - 2091 * another way to "lock" the class - unlike "get" this lock can 2092 * be broken by class during destroy 2092 * be broken by class during destroy IIUC. 2093 */ 2093 */ 2094 if (cl) 2094 if (cl) 2095 qdisc_class_get(&cl->common); 2095 qdisc_class_get(&cl->common); 2096 return (unsigned long)cl; 2096 return (unsigned long)cl; 2097 } 2097 } 2098 2098 2099 static void htb_unbind_filter(struct Qdisc *s 2099 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg) 2100 { 2100 { 2101 struct htb_class *cl = (struct htb_cl 2101 struct htb_class *cl = (struct htb_class *)arg; 2102 2102 2103 qdisc_class_put(&cl->common); 2103 qdisc_class_put(&cl->common); 2104 } 2104 } 2105 2105 2106 static void htb_walk(struct Qdisc *sch, struc 2106 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) 2107 { 2107 { 2108 struct htb_sched *q = qdisc_priv(sch) 2108 struct htb_sched *q = qdisc_priv(sch); 2109 struct htb_class *cl; 2109 struct htb_class *cl; 2110 unsigned int i; 2110 unsigned int i; 2111 2111 2112 if (arg->stop) 2112 if (arg->stop) 2113 return; 2113 return; 2114 2114 2115 for (i = 0; i < q->clhash.hashsize; i 2115 for (i = 0; i < q->clhash.hashsize; i++) { 2116 hlist_for_each_entry(cl, &q-> 2116 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 2117 if (!tc_qdisc_stats_d 2117 if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg)) 2118 return; 2118 return; 2119 } 2119 } 2120 } 2120 } 2121 } 2121 } 2122 2122 2123 static const struct Qdisc_class_ops htb_class 2123 static const struct Qdisc_class_ops htb_class_ops = { 2124 .select_queue = htb_select_qu 2124 .select_queue = htb_select_queue, 2125 .graft = htb_graft, 2125 .graft = htb_graft, 2126 .leaf = htb_leaf, 2126 .leaf = htb_leaf, 2127 .qlen_notify = htb_qlen_noti 2127 .qlen_notify = htb_qlen_notify, 2128 .find = htb_search, 2128 .find = htb_search, 2129 .change = htb_change_cl 2129 .change = htb_change_class, 2130 .delete = htb_delete, 2130 .delete = htb_delete, 2131 .walk = htb_walk, 2131 .walk = htb_walk, 2132 .tcf_block = htb_tcf_block 2132 .tcf_block = htb_tcf_block, 2133 .bind_tcf = htb_bind_filt 2133 .bind_tcf = htb_bind_filter, 2134 .unbind_tcf = htb_unbind_fi 2134 .unbind_tcf = htb_unbind_filter, 2135 .dump = htb_dump_clas 2135 .dump = htb_dump_class, 2136 .dump_stats = htb_dump_clas 2136 .dump_stats = htb_dump_class_stats, 2137 }; 2137 }; 2138 2138 2139 static struct Qdisc_ops htb_qdisc_ops __read_ 2139 static struct Qdisc_ops htb_qdisc_ops __read_mostly = { 2140 .cl_ops = &htb_class_op 2140 .cl_ops = &htb_class_ops, 2141 .id = "htb", 2141 .id = "htb", 2142 .priv_size = sizeof(struct 2142 .priv_size = sizeof(struct htb_sched), 2143 .enqueue = htb_enqueue, 2143 .enqueue = htb_enqueue, 2144 .dequeue = htb_dequeue, 2144 .dequeue = htb_dequeue, 2145 .peek = qdisc_peek_de 2145 .peek = qdisc_peek_dequeued, 2146 .init = htb_init, 2146 .init = htb_init, 2147 .attach = htb_attach, 2147 .attach = htb_attach, 2148 .reset = htb_reset, 2148 .reset = htb_reset, 2149 .destroy = htb_destroy, 2149 .destroy = htb_destroy, 2150 .dump = htb_dump, 2150 .dump = htb_dump, 2151 .owner = THIS_MODULE, 2151 .owner = THIS_MODULE, 2152 }; 2152 }; 2153 MODULE_ALIAS_NET_SCH("htb"); 2153 MODULE_ALIAS_NET_SCH("htb"); 2154 2154 2155 static int __init htb_module_init(void) 2155 static int __init htb_module_init(void) 2156 { 2156 { 2157 return register_qdisc(&htb_qdisc_ops) 2157 return register_qdisc(&htb_qdisc_ops); 2158 } 2158 } 2159 static void __exit htb_module_exit(void) 2159 static void __exit htb_module_exit(void) 2160 { 2160 { 2161 unregister_qdisc(&htb_qdisc_ops); 2161 unregister_qdisc(&htb_qdisc_ops); 2162 } 2162 } 2163 2163 2164 module_init(htb_module_init) 2164 module_init(htb_module_init) 2165 module_exit(htb_module_exit) 2165 module_exit(htb_module_exit) 2166 MODULE_LICENSE("GPL"); 2166 MODULE_LICENSE("GPL"); 2167 MODULE_DESCRIPTION("Hierarchical Token Bucket 2167 MODULE_DESCRIPTION("Hierarchical Token Bucket scheduler"); 2168 2168
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.