1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* 1 /* 3 * net/sched/sch_htb.c Hierarchical token buc 2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version 4 * 3 * >> 4 * This program is free software; you can redistribute it and/or >> 5 * modify it under the terms of the GNU General Public License >> 6 * as published by the Free Software Foundation; either version >> 7 * 2 of the License, or (at your option) any later version. >> 8 * 5 * Authors: Martin Devera, <devik@cdi.cz> 9 * Authors: Martin Devera, <devik@cdi.cz> 6 * 10 * 7 * Credits (in time order) for older HTB versi 11 * Credits (in time order) for older HTB versions: 8 * Stef Coene <stef.coene@docum.o 12 * Stef Coene <stef.coene@docum.org> 9 * HTB support at LARTC m 13 * HTB support at LARTC mailing list 10 * Ondrej Kraus, <krauso@barr.cz> 14 * Ondrej Kraus, <krauso@barr.cz> 11 * found missing INIT_QDI 15 * found missing INIT_QDISC(htb) 12 * Vladimir Smelhaus, Aamer Akhte 16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert 13 * helped a lot to locate 17 * helped a lot to locate nasty class stall bug 14 * Andi Kleen, Jamal Hadi, Bert H 18 * Andi Kleen, Jamal Hadi, Bert Hubert 15 * code review and helpfu 19 * code review and helpful comments on shaping 16 * Tomasz Wrona, <tw@eter.tym.pl> 20 * Tomasz Wrona, <tw@eter.tym.pl> 17 * created test case so t 21 * created test case so that I was able to fix nasty bug 18 * Wilfried Weissmann 22 * Wilfried Weissmann 19 * spotted bug in dequeue 23 * spotted bug in dequeue code and helped with fix 20 * Jiri Fojtasek 24 * Jiri Fojtasek 21 * fixed requeue routine 25 * fixed requeue routine 22 * and many others. thanks. 26 * and many others. thanks. 23 */ 27 */ 24 #include <linux/module.h> 28 #include <linux/module.h> 25 #include <linux/moduleparam.h> 29 #include <linux/moduleparam.h> 26 #include <linux/types.h> 30 #include <linux/types.h> 27 #include <linux/kernel.h> 31 #include <linux/kernel.h> 28 #include <linux/string.h> 32 #include <linux/string.h> 29 #include <linux/errno.h> 33 #include <linux/errno.h> 30 #include <linux/skbuff.h> 34 #include <linux/skbuff.h> 31 #include <linux/list.h> 35 #include <linux/list.h> 32 #include <linux/compiler.h> 36 #include <linux/compiler.h> 33 #include <linux/rbtree.h> 37 #include <linux/rbtree.h> 34 #include <linux/workqueue.h> 38 #include <linux/workqueue.h> 35 #include <linux/slab.h> 39 #include <linux/slab.h> 36 #include <net/netlink.h> 40 #include <net/netlink.h> 37 #include <net/sch_generic.h> 41 #include <net/sch_generic.h> 38 #include <net/pkt_sched.h> 42 #include <net/pkt_sched.h> 39 #include <net/pkt_cls.h> << 40 43 41 /* HTB algorithm. 44 /* HTB algorithm. 42 Author: devik@cdi.cz 45 Author: devik@cdi.cz 43 ========================================== 46 ======================================================================== 44 HTB is like TBF with multiple classes. It 47 HTB is like TBF with multiple classes. It is also similar to CBQ because 45 it allows to assign priority to each class 48 it allows to assign priority to each class in hierarchy. 46 In fact it is another implementation of Fl 49 In fact it is another implementation of Floyd's formal sharing. 47 50 48 Levels: 51 Levels: 49 Each class is assigned level. Leaf has ALW 52 Each class is assigned level. Leaf has ALWAYS level 0 and root 50 classes have level TC_HTB_MAXDEPTH-1. Inte 53 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level 51 one less than their parent. 54 one less than their parent. 52 */ 55 */ 53 56 54 static int htb_hysteresis __read_mostly = 0; / 57 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */ 55 #define HTB_VER 0x30011 /* major must !! 58 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */ 56 59 57 #if HTB_VER >> 16 != TC_HTB_PROTOVER 60 #if HTB_VER >> 16 != TC_HTB_PROTOVER 58 #error "Mismatched sch_htb.c and pkt_sch.h" 61 #error "Mismatched sch_htb.c and pkt_sch.h" 59 #endif 62 #endif 60 63 61 /* Module parameter and sysfs export */ 64 /* Module parameter and sysfs export */ 62 module_param (htb_hysteresis, int, 0640); 65 module_param (htb_hysteresis, int, 0640); 63 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis m 66 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate"); 64 67 65 static int htb_rate_est = 0; /* htb classes ha << 66 module_param(htb_rate_est, int, 0640); << 67 MODULE_PARM_DESC(htb_rate_est, "setup a defaul << 68 << 69 /* used internaly to keep status of single cla 68 /* used internaly to keep status of single class */ 70 enum htb_cmode { 69 enum htb_cmode { 71 HTB_CANT_SEND, /* class can't 70 HTB_CANT_SEND, /* class can't send and can't borrow */ 72 HTB_MAY_BORROW, /* class can't 71 HTB_MAY_BORROW, /* class can't send but may borrow */ 73 HTB_CAN_SEND /* class can s 72 HTB_CAN_SEND /* class can send */ 74 }; 73 }; 75 74 76 struct htb_prio { !! 75 /* interior & leaf nodes; props specific to leaves are marked L: */ 77 union { << 78 struct rb_root row; << 79 struct rb_root feed; << 80 }; << 81 struct rb_node *ptr; << 82 /* When class changes from state 1->2 << 83 * parent's feed then we lost ptr valu << 84 * first child again. Here we store cl << 85 * last valid ptr (used when ptr is NU << 86 */ << 87 u32 last_ptr_id; << 88 }; << 89 << 90 /* interior & leaf nodes; props specific to le << 91 * To reduce false sharing, place mostly read << 92 * and mostly written ones at the end. << 93 */ << 94 struct htb_class { 76 struct htb_class { 95 struct Qdisc_class_common common; 77 struct Qdisc_class_common common; 96 struct psched_ratecfg rate; !! 78 /* general class parameters */ 97 struct psched_ratecfg ceil; !! 79 struct gnet_stats_basic_packed bstats; 98 s64 buffer, cbuffe !! 80 struct gnet_stats_queue qstats; 99 s64 mbuffer; !! 81 struct gnet_stats_rate_est rate_est; 100 u32 prio; !! 82 struct tc_htb_xstats xstats; /* our special stats */ 101 int quantum; !! 83 int refcnt; /* usage count of this class */ 102 !! 84 103 struct tcf_proto __rcu *filter_list; !! 85 /* topology */ 104 struct tcf_block *block; !! 86 int level; /* our level (see above) */ 105 !! 87 unsigned int children; 106 int level; !! 88 struct htb_class *parent; /* parent class */ 107 unsigned int children; << 108 struct htb_class *parent; << 109 << 110 struct net_rate_estimator __rcu *rate_ << 111 << 112 /* << 113 * Written often fields << 114 */ << 115 struct gnet_stats_basic_sync bstats; << 116 struct gnet_stats_basic_sync bstats_bi << 117 struct tc_htb_xstats xstats; /* our << 118 89 119 /* token bucket parameters */ !! 90 u32 prio; /* these two are used only by leaves... */ 120 s64 tokens, ctoken !! 91 int quantum; /* but stored for parent-to-leaf return */ 121 s64 t_c; << 122 92 123 union { 93 union { 124 struct htb_class_leaf { 94 struct htb_class_leaf { 125 int defici !! 95 struct Qdisc *q; 126 struct Qdisc *q; !! 96 int deficit[TC_HTB_MAXDEPTH]; 127 struct netdev_queue *o !! 97 struct list_head drop_list; 128 } leaf; 98 } leaf; 129 struct htb_class_inner { 99 struct htb_class_inner { 130 struct htb_prio clprio !! 100 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */ >> 101 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */ >> 102 /* When class changes from state 1->2 and disconnects from >> 103 * parent's feed then we lost ptr value and start from the >> 104 * first child again. Here we store classid of the >> 105 * last valid ptr (used when ptr is NULL). >> 106 */ >> 107 u32 last_ptr_id[TC_HTB_NUMPRIO]; 131 } inner; 108 } inner; 132 }; !! 109 } un; 133 s64 pq_key; !! 110 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ 134 !! 111 struct rb_node pq_node; /* node for event queue */ 135 int prio_activity; !! 112 s64 pq_key; 136 enum htb_cmode cmode; !! 113 137 struct rb_node pq_node; !! 114 int prio_activity; /* for which prios are we active */ 138 struct rb_node node[TC_HTB_NU !! 115 enum htb_cmode cmode; /* current mode of the class */ >> 116 >> 117 /* class attached filters */ >> 118 struct tcf_proto *filter_list; >> 119 int filter_cnt; 139 120 140 unsigned int drops ____cacheline_align !! 121 /* token bucket parameters */ 141 unsigned int overlimits; !! 122 struct psched_ratecfg rate; 142 }; !! 123 struct psched_ratecfg ceil; 143 !! 124 s64 buffer, cbuffer; /* token bucket depth/rate */ 144 struct htb_level { !! 125 s64 mbuffer; /* max wait time */ 145 struct rb_root wait_pq; !! 126 s64 tokens, ctokens; /* current number of tokens */ 146 struct htb_prio hprio[TC_HTB_NUMPRIO]; !! 127 s64 t_c; /* checkpoint time */ 147 }; 128 }; 148 129 149 struct htb_sched { 130 struct htb_sched { 150 struct Qdisc_class_hash clhash; 131 struct Qdisc_class_hash clhash; 151 int defcls; !! 132 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ 152 int rate2quantum; << 153 133 154 /* filters for qdisc itself */ !! 134 /* self list - roots of self generating tree */ 155 struct tcf_proto __rcu *filter_list; !! 135 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; 156 struct tcf_block *block; !! 136 int row_mask[TC_HTB_MAXDEPTH]; >> 137 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; >> 138 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; 157 139 158 #define HTB_WARN_TOOMANYEVENTS 0x1 !! 140 /* self wait list - roots of wait PQs per row */ 159 unsigned int warned; /* onl !! 141 struct rb_root wait_pq[TC_HTB_MAXDEPTH]; 160 int direct_qlen; << 161 struct work_struct work; << 162 << 163 /* non shaped skbs; let them go direct << 164 struct qdisc_skb_head direct_queue; << 165 u32 direct_pkts; << 166 u32 overlimits; << 167 142 168 struct qdisc_watchdog watchdog; !! 143 /* time of nearest event per level (row) */ >> 144 s64 near_ev_cache[TC_HTB_MAXDEPTH]; 169 145 170 s64 now; /* cac !! 146 int defcls; /* class where unclassified flows go to */ 171 147 172 /* time of nearest event per level (ro !! 148 /* filters for qdisc itself */ 173 s64 near_ev_cache[ !! 149 struct tcf_proto *filter_list; 174 150 175 int row_mask[TC_HT !! 151 int rate2quantum; /* quant = rate / rate2quantum */ >> 152 s64 now; /* cached dequeue time */ >> 153 struct qdisc_watchdog watchdog; 176 154 177 struct htb_level hlevel[TC_HTB_ !! 155 /* non shaped skbs; let them go directly thru */ >> 156 struct sk_buff_head direct_queue; >> 157 int direct_qlen; /* max qlen of above */ 178 158 179 struct Qdisc **direct_qdisc !! 159 long direct_pkts; 180 unsigned int num_direct_qdi << 181 160 182 bool offload; !! 161 #define HTB_WARN_TOOMANYEVENTS 0x1 >> 162 unsigned int warned; /* only one warning */ >> 163 struct work_struct work; 183 }; 164 }; 184 165 185 /* find class in global hash table using given 166 /* find class in global hash table using given handle */ 186 static inline struct htb_class *htb_find(u32 h 167 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) 187 { 168 { 188 struct htb_sched *q = qdisc_priv(sch); 169 struct htb_sched *q = qdisc_priv(sch); 189 struct Qdisc_class_common *clc; 170 struct Qdisc_class_common *clc; 190 171 191 clc = qdisc_class_find(&q->clhash, han 172 clc = qdisc_class_find(&q->clhash, handle); 192 if (clc == NULL) 173 if (clc == NULL) 193 return NULL; 174 return NULL; 194 return container_of(clc, struct htb_cl 175 return container_of(clc, struct htb_class, common); 195 } 176 } 196 177 197 static unsigned long htb_search(struct Qdisc * << 198 { << 199 return (unsigned long)htb_find(handle, << 200 } << 201 << 202 #define HTB_DIRECT ((struct htb_class *)-1L) << 203 << 204 /** 178 /** 205 * htb_classify - classify a packet into class 179 * htb_classify - classify a packet into class 206 * @skb: the socket buffer << 207 * @sch: the active queue discipline << 208 * @qerr: pointer for returned status code << 209 * 180 * 210 * It returns NULL if the packet should be dro 181 * It returns NULL if the packet should be dropped or -1 if the packet 211 * should be passed directly thru. In all othe 182 * should be passed directly thru. In all other cases leaf class is returned. 212 * We allow direct class selection by classid 183 * We allow direct class selection by classid in priority. The we examine 213 * filters in qdisc and in inner nodes (if hig 184 * filters in qdisc and in inner nodes (if higher filter points to the inner 214 * node). If we end up with classid MAJOR:0 we 185 * node). If we end up with classid MAJOR:0 we enqueue the skb into special 215 * internal fifo (direct). These packets then 186 * internal fifo (direct). These packets then go directly thru. If we still 216 * have no valid leaf we try to use MAJOR:defa 187 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful 217 * then finish and return direct queue. 188 * then finish and return direct queue. 218 */ 189 */ >> 190 #define HTB_DIRECT ((struct htb_class *)-1L) >> 191 219 static struct htb_class *htb_classify(struct s 192 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, 220 int *qer 193 int *qerr) 221 { 194 { 222 struct htb_sched *q = qdisc_priv(sch); 195 struct htb_sched *q = qdisc_priv(sch); 223 struct htb_class *cl; 196 struct htb_class *cl; 224 struct tcf_result res; 197 struct tcf_result res; 225 struct tcf_proto *tcf; 198 struct tcf_proto *tcf; 226 int result; 199 int result; 227 200 228 /* allow to select class by setting sk 201 /* allow to select class by setting skb->priority to valid classid; 229 * note that nfmark can be used too by 202 * note that nfmark can be used too by attaching filter fw with no 230 * rules in it 203 * rules in it 231 */ 204 */ 232 if (skb->priority == sch->handle) 205 if (skb->priority == sch->handle) 233 return HTB_DIRECT; /* X:0 206 return HTB_DIRECT; /* X:0 (direct flow) selected */ 234 cl = htb_find(skb->priority, sch); 207 cl = htb_find(skb->priority, sch); 235 if (cl) { !! 208 if (cl && cl->level == 0) 236 if (cl->level == 0) !! 209 return cl; 237 return cl; << 238 /* Start with inner filter cha << 239 tcf = rcu_dereference_bh(cl->f << 240 } else { << 241 tcf = rcu_dereference_bh(q->fi << 242 } << 243 210 244 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_ 211 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 245 while (tcf && (result = tcf_classify(s !! 212 tcf = q->filter_list; >> 213 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 246 #ifdef CONFIG_NET_CLS_ACT 214 #ifdef CONFIG_NET_CLS_ACT 247 switch (result) { 215 switch (result) { 248 case TC_ACT_QUEUED: 216 case TC_ACT_QUEUED: 249 case TC_ACT_STOLEN: 217 case TC_ACT_STOLEN: 250 case TC_ACT_TRAP: << 251 *qerr = NET_XMIT_SUCCE 218 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 252 fallthrough; << 253 case TC_ACT_SHOT: 219 case TC_ACT_SHOT: 254 return NULL; 220 return NULL; 255 } 221 } 256 #endif 222 #endif 257 cl = (void *)res.class; 223 cl = (void *)res.class; 258 if (!cl) { 224 if (!cl) { 259 if (res.classid == sch 225 if (res.classid == sch->handle) 260 return HTB_DIR 226 return HTB_DIRECT; /* X:0 (direct flow) */ 261 cl = htb_find(res.clas 227 cl = htb_find(res.classid, sch); 262 if (!cl) 228 if (!cl) 263 break; /* fil 229 break; /* filter selected invalid classid */ 264 } 230 } 265 if (!cl->level) 231 if (!cl->level) 266 return cl; /* we 232 return cl; /* we hit leaf; return it */ 267 233 268 /* we have got inner class; ap 234 /* we have got inner class; apply inner filter chain */ 269 tcf = rcu_dereference_bh(cl->f !! 235 tcf = cl->filter_list; 270 } 236 } 271 /* classification failed; try to use d 237 /* classification failed; try to use default class */ 272 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch-> 238 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); 273 if (!cl || cl->level) 239 if (!cl || cl->level) 274 return HTB_DIRECT; /* bad 240 return HTB_DIRECT; /* bad default .. this is safe bet */ 275 return cl; 241 return cl; 276 } 242 } 277 243 278 /** 244 /** 279 * htb_add_to_id_tree - adds class to the roun 245 * htb_add_to_id_tree - adds class to the round robin list 280 * @root: the root of the tree << 281 * @cl: the class to add << 282 * @prio: the give prio in class << 283 * 246 * 284 * Routine adds class to the list (actually tr 247 * Routine adds class to the list (actually tree) sorted by classid. 285 * Make sure that class is not already on such 248 * Make sure that class is not already on such list for given prio. 286 */ 249 */ 287 static void htb_add_to_id_tree(struct rb_root 250 static void htb_add_to_id_tree(struct rb_root *root, 288 struct htb_clas 251 struct htb_class *cl, int prio) 289 { 252 { 290 struct rb_node **p = &root->rb_node, * 253 struct rb_node **p = &root->rb_node, *parent = NULL; 291 254 292 while (*p) { 255 while (*p) { 293 struct htb_class *c; 256 struct htb_class *c; 294 parent = *p; 257 parent = *p; 295 c = rb_entry(parent, struct ht 258 c = rb_entry(parent, struct htb_class, node[prio]); 296 259 297 if (cl->common.classid > c->co 260 if (cl->common.classid > c->common.classid) 298 p = &parent->rb_right; 261 p = &parent->rb_right; 299 else 262 else 300 p = &parent->rb_left; 263 p = &parent->rb_left; 301 } 264 } 302 rb_link_node(&cl->node[prio], parent, 265 rb_link_node(&cl->node[prio], parent, p); 303 rb_insert_color(&cl->node[prio], root) 266 rb_insert_color(&cl->node[prio], root); 304 } 267 } 305 268 306 /** 269 /** 307 * htb_add_to_wait_tree - adds class to the ev 270 * htb_add_to_wait_tree - adds class to the event queue with delay 308 * @q: the priority event queue << 309 * @cl: the class to add << 310 * @delay: delay in microseconds << 311 * 271 * 312 * The class is added to priority event queue 272 * The class is added to priority event queue to indicate that class will 313 * change its mode in cl->pq_key microseconds. 273 * change its mode in cl->pq_key microseconds. Make sure that class is not 314 * already in the queue. 274 * already in the queue. 315 */ 275 */ 316 static void htb_add_to_wait_tree(struct htb_sc 276 static void htb_add_to_wait_tree(struct htb_sched *q, 317 struct htb_cl 277 struct htb_class *cl, s64 delay) 318 { 278 { 319 struct rb_node **p = &q->hlevel[cl->le !! 279 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; 320 280 321 cl->pq_key = q->now + delay; 281 cl->pq_key = q->now + delay; 322 if (cl->pq_key == q->now) 282 if (cl->pq_key == q->now) 323 cl->pq_key++; 283 cl->pq_key++; 324 284 325 /* update the nearest event cache */ 285 /* update the nearest event cache */ 326 if (q->near_ev_cache[cl->level] > cl-> 286 if (q->near_ev_cache[cl->level] > cl->pq_key) 327 q->near_ev_cache[cl->level] = 287 q->near_ev_cache[cl->level] = cl->pq_key; 328 288 329 while (*p) { 289 while (*p) { 330 struct htb_class *c; 290 struct htb_class *c; 331 parent = *p; 291 parent = *p; 332 c = rb_entry(parent, struct ht 292 c = rb_entry(parent, struct htb_class, pq_node); 333 if (cl->pq_key >= c->pq_key) 293 if (cl->pq_key >= c->pq_key) 334 p = &parent->rb_right; 294 p = &parent->rb_right; 335 else 295 else 336 p = &parent->rb_left; 296 p = &parent->rb_left; 337 } 297 } 338 rb_link_node(&cl->pq_node, parent, p); 298 rb_link_node(&cl->pq_node, parent, p); 339 rb_insert_color(&cl->pq_node, &q->hlev !! 299 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]); 340 } 300 } 341 301 342 /** 302 /** 343 * htb_next_rb_node - finds next node in binar 303 * htb_next_rb_node - finds next node in binary tree 344 * @n: the current node in binary tree << 345 * 304 * 346 * When we are past last key we return NULL. 305 * When we are past last key we return NULL. 347 * Average complexity is 2 steps per call. 306 * Average complexity is 2 steps per call. 348 */ 307 */ 349 static inline void htb_next_rb_node(struct rb_ 308 static inline void htb_next_rb_node(struct rb_node **n) 350 { 309 { 351 *n = rb_next(*n); 310 *n = rb_next(*n); 352 } 311 } 353 312 354 /** 313 /** 355 * htb_add_class_to_row - add class to its row 314 * htb_add_class_to_row - add class to its row 356 * @q: the priority event queue << 357 * @cl: the class to add << 358 * @mask: the given priorities in class in bit << 359 * 315 * 360 * The class is added to row at priorities mar 316 * The class is added to row at priorities marked in mask. 361 * It does nothing if mask == 0. 317 * It does nothing if mask == 0. 362 */ 318 */ 363 static inline void htb_add_class_to_row(struct 319 static inline void htb_add_class_to_row(struct htb_sched *q, 364 struct 320 struct htb_class *cl, int mask) 365 { 321 { 366 q->row_mask[cl->level] |= mask; 322 q->row_mask[cl->level] |= mask; 367 while (mask) { 323 while (mask) { 368 int prio = ffz(~mask); 324 int prio = ffz(~mask); 369 mask &= ~(1 << prio); 325 mask &= ~(1 << prio); 370 htb_add_to_id_tree(&q->hlevel[ !! 326 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio); 371 } 327 } 372 } 328 } 373 329 374 /* If this triggers, it is a bug in this code, 330 /* If this triggers, it is a bug in this code, but it need not be fatal */ 375 static void htb_safe_rb_erase(struct rb_node * 331 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root) 376 { 332 { 377 if (RB_EMPTY_NODE(rb)) { 333 if (RB_EMPTY_NODE(rb)) { 378 WARN_ON(1); 334 WARN_ON(1); 379 } else { 335 } else { 380 rb_erase(rb, root); 336 rb_erase(rb, root); 381 RB_CLEAR_NODE(rb); 337 RB_CLEAR_NODE(rb); 382 } 338 } 383 } 339 } 384 340 385 341 386 /** 342 /** 387 * htb_remove_class_from_row - removes class f 343 * htb_remove_class_from_row - removes class from its row 388 * @q: the priority event queue << 389 * @cl: the class to add << 390 * @mask: the given priorities in class in bit << 391 * 344 * 392 * The class is removed from row at priorities 345 * The class is removed from row at priorities marked in mask. 393 * It does nothing if mask == 0. 346 * It does nothing if mask == 0. 394 */ 347 */ 395 static inline void htb_remove_class_from_row(s 348 static inline void htb_remove_class_from_row(struct htb_sched *q, 396 349 struct htb_class *cl, int mask) 397 { 350 { 398 int m = 0; 351 int m = 0; 399 struct htb_level *hlevel = &q->hlevel[ << 400 352 401 while (mask) { 353 while (mask) { 402 int prio = ffz(~mask); 354 int prio = ffz(~mask); 403 struct htb_prio *hprio = &hlev << 404 355 405 mask &= ~(1 << prio); 356 mask &= ~(1 << prio); 406 if (hprio->ptr == cl->node + p !! 357 if (q->ptr[cl->level][prio] == cl->node + prio) 407 htb_next_rb_node(&hpri !! 358 htb_next_rb_node(q->ptr[cl->level] + prio); 408 359 409 htb_safe_rb_erase(cl->node + p !! 360 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio); 410 if (!hprio->row.rb_node) !! 361 if (!q->row[cl->level][prio].rb_node) 411 m |= 1 << prio; 362 m |= 1 << prio; 412 } 363 } 413 q->row_mask[cl->level] &= ~m; 364 q->row_mask[cl->level] &= ~m; 414 } 365 } 415 366 416 /** 367 /** 417 * htb_activate_prios - creates active classe' 368 * htb_activate_prios - creates active classe's feed chain 418 * @q: the priority event queue << 419 * @cl: the class to activate << 420 * 369 * 421 * The class is connected to ancestors and/or 370 * The class is connected to ancestors and/or appropriate rows 422 * for priorities it is participating on. cl-> 371 * for priorities it is participating on. cl->cmode must be new 423 * (activated) mode. It does nothing if cl->pr 372 * (activated) mode. It does nothing if cl->prio_activity == 0. 424 */ 373 */ 425 static void htb_activate_prios(struct htb_sche 374 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) 426 { 375 { 427 struct htb_class *p = cl->parent; 376 struct htb_class *p = cl->parent; 428 long m, mask = cl->prio_activity; 377 long m, mask = cl->prio_activity; 429 378 430 while (cl->cmode == HTB_MAY_BORROW && 379 while (cl->cmode == HTB_MAY_BORROW && p && mask) { 431 m = mask; 380 m = mask; 432 while (m) { 381 while (m) { 433 unsigned int prio = ff !! 382 int prio = ffz(~m); 434 << 435 if (WARN_ON_ONCE(prio << 436 break; << 437 m &= ~(1 << prio); 383 m &= ~(1 << prio); 438 384 439 if (p->inner.clprio[pr !! 385 if (p->un.inner.feed[prio].rb_node) 440 /* parent alre 386 /* parent already has its feed in use so that 441 * reset bit i 387 * reset bit in mask as parent is already ok 442 */ 388 */ 443 mask &= ~(1 << 389 mask &= ~(1 << prio); 444 390 445 htb_add_to_id_tree(&p- !! 391 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); 446 } 392 } 447 p->prio_activity |= mask; 393 p->prio_activity |= mask; 448 cl = p; 394 cl = p; 449 p = cl->parent; 395 p = cl->parent; 450 396 451 } 397 } 452 if (cl->cmode == HTB_CAN_SEND && mask) 398 if (cl->cmode == HTB_CAN_SEND && mask) 453 htb_add_class_to_row(q, cl, ma 399 htb_add_class_to_row(q, cl, mask); 454 } 400 } 455 401 456 /** 402 /** 457 * htb_deactivate_prios - remove class from fe 403 * htb_deactivate_prios - remove class from feed chain 458 * @q: the priority event queue << 459 * @cl: the class to deactivate << 460 * 404 * 461 * cl->cmode must represent old mode (before d 405 * cl->cmode must represent old mode (before deactivation). It does 462 * nothing if cl->prio_activity == 0. Class is 406 * nothing if cl->prio_activity == 0. Class is removed from all feed 463 * chains and rows. 407 * chains and rows. 464 */ 408 */ 465 static void htb_deactivate_prios(struct htb_sc 409 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) 466 { 410 { 467 struct htb_class *p = cl->parent; 411 struct htb_class *p = cl->parent; 468 long m, mask = cl->prio_activity; 412 long m, mask = cl->prio_activity; 469 413 470 while (cl->cmode == HTB_MAY_BORROW && 414 while (cl->cmode == HTB_MAY_BORROW && p && mask) { 471 m = mask; 415 m = mask; 472 mask = 0; 416 mask = 0; 473 while (m) { 417 while (m) { 474 int prio = ffz(~m); 418 int prio = ffz(~m); 475 m &= ~(1 << prio); 419 m &= ~(1 << prio); 476 420 477 if (p->inner.clprio[pr !! 421 if (p->un.inner.ptr[prio] == cl->node + prio) { 478 /* we are remo 422 /* we are removing child which is pointed to from 479 * parent feed 423 * parent feed - forget the pointer but remember 480 * classid 424 * classid 481 */ 425 */ 482 p->inner.clpri !! 426 p->un.inner.last_ptr_id[prio] = cl->common.classid; 483 p->inner.clpri !! 427 p->un.inner.ptr[prio] = NULL; 484 } 428 } 485 429 486 htb_safe_rb_erase(cl-> !! 430 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio); 487 &p-> << 488 431 489 if (!p->inner.clprio[p !! 432 if (!p->un.inner.feed[prio].rb_node) 490 mask |= 1 << p 433 mask |= 1 << prio; 491 } 434 } 492 435 493 p->prio_activity &= ~mask; 436 p->prio_activity &= ~mask; 494 cl = p; 437 cl = p; 495 p = cl->parent; 438 p = cl->parent; 496 439 497 } 440 } 498 if (cl->cmode == HTB_CAN_SEND && mask) 441 if (cl->cmode == HTB_CAN_SEND && mask) 499 htb_remove_class_from_row(q, c 442 htb_remove_class_from_row(q, cl, mask); 500 } 443 } 501 444 502 static inline s64 htb_lowater(const struct htb 445 static inline s64 htb_lowater(const struct htb_class *cl) 503 { 446 { 504 if (htb_hysteresis) 447 if (htb_hysteresis) 505 return cl->cmode != HTB_CANT_S 448 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; 506 else 449 else 507 return 0; 450 return 0; 508 } 451 } 509 static inline s64 htb_hiwater(const struct htb 452 static inline s64 htb_hiwater(const struct htb_class *cl) 510 { 453 { 511 if (htb_hysteresis) 454 if (htb_hysteresis) 512 return cl->cmode == HTB_CAN_SE 455 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; 513 else 456 else 514 return 0; 457 return 0; 515 } 458 } 516 459 517 460 518 /** 461 /** 519 * htb_class_mode - computes and returns curre 462 * htb_class_mode - computes and returns current class mode 520 * @cl: the target class << 521 * @diff: diff time in microseconds << 522 * 463 * 523 * It computes cl's mode at time cl->t_c+diff 464 * It computes cl's mode at time cl->t_c+diff and returns it. If mode 524 * is not HTB_CAN_SEND then cl->pq_key is upda 465 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference 525 * from now to time when cl will change its st 466 * from now to time when cl will change its state. 526 * Also it is worth to note that class mode do 467 * Also it is worth to note that class mode doesn't change simply 527 * at cl->{c,}tokens == 0 but there can rather 468 * at cl->{c,}tokens == 0 but there can rather be hysteresis of 528 * 0 .. -cl->{c,}buffer range. It is meant to 469 * 0 .. -cl->{c,}buffer range. It is meant to limit number of 529 * mode transitions per time unit. The speed g 470 * mode transitions per time unit. The speed gain is about 1/6. 530 */ 471 */ 531 static inline enum htb_cmode 472 static inline enum htb_cmode 532 htb_class_mode(struct htb_class *cl, s64 *diff 473 htb_class_mode(struct htb_class *cl, s64 *diff) 533 { 474 { 534 s64 toks; 475 s64 toks; 535 476 536 if ((toks = (cl->ctokens + *diff)) < h 477 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { 537 *diff = -toks; 478 *diff = -toks; 538 return HTB_CANT_SEND; 479 return HTB_CANT_SEND; 539 } 480 } 540 481 541 if ((toks = (cl->tokens + *diff)) >= h 482 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl)) 542 return HTB_CAN_SEND; 483 return HTB_CAN_SEND; 543 484 544 *diff = -toks; 485 *diff = -toks; 545 return HTB_MAY_BORROW; 486 return HTB_MAY_BORROW; 546 } 487 } 547 488 548 /** 489 /** 549 * htb_change_class_mode - changes classe's mo 490 * htb_change_class_mode - changes classe's mode 550 * @q: the priority event queue << 551 * @cl: the target class << 552 * @diff: diff time in microseconds << 553 * 491 * 554 * This should be the only way how to change c 492 * This should be the only way how to change classe's mode under normal 555 * circumstances. Routine will update feed lis !! 493 * cirsumstances. Routine will update feed lists linkage, change mode 556 * and add class to the wait event queue if ap 494 * and add class to the wait event queue if appropriate. New mode should 557 * be different from old one and cl->pq_key ha 495 * be different from old one and cl->pq_key has to be valid if changing 558 * to mode other than HTB_CAN_SEND (see htb_ad 496 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree). 559 */ 497 */ 560 static void 498 static void 561 htb_change_class_mode(struct htb_sched *q, str 499 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) 562 { 500 { 563 enum htb_cmode new_mode = htb_class_mo 501 enum htb_cmode new_mode = htb_class_mode(cl, diff); 564 502 565 if (new_mode == cl->cmode) 503 if (new_mode == cl->cmode) 566 return; 504 return; 567 505 568 if (new_mode == HTB_CANT_SEND) { << 569 cl->overlimits++; << 570 q->overlimits++; << 571 } << 572 << 573 if (cl->prio_activity) { /* not 506 if (cl->prio_activity) { /* not necessary: speed optimization */ 574 if (cl->cmode != HTB_CANT_SEND 507 if (cl->cmode != HTB_CANT_SEND) 575 htb_deactivate_prios(q 508 htb_deactivate_prios(q, cl); 576 cl->cmode = new_mode; 509 cl->cmode = new_mode; 577 if (new_mode != HTB_CANT_SEND) 510 if (new_mode != HTB_CANT_SEND) 578 htb_activate_prios(q, 511 htb_activate_prios(q, cl); 579 } else 512 } else 580 cl->cmode = new_mode; 513 cl->cmode = new_mode; 581 } 514 } 582 515 583 /** 516 /** 584 * htb_activate - inserts leaf cl into appropr 517 * htb_activate - inserts leaf cl into appropriate active feeds 585 * @q: the priority event queue << 586 * @cl: the target class << 587 * 518 * 588 * Routine learns (new) priority of leaf and a 519 * Routine learns (new) priority of leaf and activates feed chain 589 * for the prio. It can be called on already a 520 * for the prio. It can be called on already active leaf safely. 590 * It also adds leaf into droplist. 521 * It also adds leaf into droplist. 591 */ 522 */ 592 static inline void htb_activate(struct htb_sch 523 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) 593 { 524 { 594 WARN_ON(cl->level || !cl->leaf.q || !c !! 525 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen); 595 526 596 if (!cl->prio_activity) { 527 if (!cl->prio_activity) { 597 cl->prio_activity = 1 << cl->p 528 cl->prio_activity = 1 << cl->prio; 598 htb_activate_prios(q, cl); 529 htb_activate_prios(q, cl); >> 530 list_add_tail(&cl->un.leaf.drop_list, >> 531 q->drops + cl->prio); 599 } 532 } 600 } 533 } 601 534 602 /** 535 /** 603 * htb_deactivate - remove leaf cl from active 536 * htb_deactivate - remove leaf cl from active feeds 604 * @q: the priority event queue << 605 * @cl: the target class << 606 * 537 * 607 * Make sure that leaf is active. In the other 538 * Make sure that leaf is active. In the other words it can't be called 608 * with non-active leaf. It also removes class 539 * with non-active leaf. It also removes class from the drop list. 609 */ 540 */ 610 static inline void htb_deactivate(struct htb_s 541 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) 611 { 542 { 612 WARN_ON(!cl->prio_activity); 543 WARN_ON(!cl->prio_activity); 613 544 614 htb_deactivate_prios(q, cl); 545 htb_deactivate_prios(q, cl); 615 cl->prio_activity = 0; 546 cl->prio_activity = 0; >> 547 list_del_init(&cl->un.leaf.drop_list); 616 } 548 } 617 549 618 static int htb_enqueue(struct sk_buff *skb, st !! 550 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) 619 struct sk_buff **to_fre << 620 { 551 { 621 int ret; !! 552 int uninitialized_var(ret); 622 unsigned int len = qdisc_pkt_len(skb); << 623 struct htb_sched *q = qdisc_priv(sch); 553 struct htb_sched *q = qdisc_priv(sch); 624 struct htb_class *cl = htb_classify(sk 554 struct htb_class *cl = htb_classify(skb, sch, &ret); 625 555 626 if (cl == HTB_DIRECT) { 556 if (cl == HTB_DIRECT) { 627 /* enqueue to helper queue */ 557 /* enqueue to helper queue */ 628 if (q->direct_queue.qlen < q-> 558 if (q->direct_queue.qlen < q->direct_qlen) { 629 __qdisc_enqueue_tail(s !! 559 __skb_queue_tail(&q->direct_queue, skb); 630 q->direct_pkts++; 560 q->direct_pkts++; 631 } else { 561 } else { 632 return qdisc_drop(skb, !! 562 return qdisc_drop(skb, sch); 633 } 563 } 634 #ifdef CONFIG_NET_CLS_ACT 564 #ifdef CONFIG_NET_CLS_ACT 635 } else if (!cl) { 565 } else if (!cl) { 636 if (ret & __NET_XMIT_BYPASS) 566 if (ret & __NET_XMIT_BYPASS) 637 qdisc_qstats_drop(sch) !! 567 sch->qstats.drops++; 638 __qdisc_drop(skb, to_free); !! 568 kfree_skb(skb); 639 return ret; 569 return ret; 640 #endif 570 #endif 641 } else if ((ret = qdisc_enqueue(skb, c !! 571 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { 642 to_fre << 643 if (net_xmit_drop_count(ret)) 572 if (net_xmit_drop_count(ret)) { 644 qdisc_qstats_drop(sch) !! 573 sch->qstats.drops++; 645 cl->drops++; !! 574 cl->qstats.drops++; 646 } 575 } 647 return ret; 576 return ret; 648 } else { 577 } else { 649 htb_activate(q, cl); 578 htb_activate(q, cl); 650 } 579 } 651 580 652 sch->qstats.backlog += len; << 653 sch->q.qlen++; 581 sch->q.qlen++; 654 return NET_XMIT_SUCCESS; 582 return NET_XMIT_SUCCESS; 655 } 583 } 656 584 657 static inline void htb_accnt_tokens(struct htb 585 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff) 658 { 586 { 659 s64 toks = diff + cl->tokens; 587 s64 toks = diff + cl->tokens; 660 588 661 if (toks > cl->buffer) 589 if (toks > cl->buffer) 662 toks = cl->buffer; 590 toks = cl->buffer; 663 toks -= (s64) psched_l2t_ns(&cl->rate, 591 toks -= (s64) psched_l2t_ns(&cl->rate, bytes); 664 if (toks <= -cl->mbuffer) 592 if (toks <= -cl->mbuffer) 665 toks = 1 - cl->mbuffer; 593 toks = 1 - cl->mbuffer; 666 594 667 cl->tokens = toks; 595 cl->tokens = toks; 668 } 596 } 669 597 670 static inline void htb_accnt_ctokens(struct ht 598 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff) 671 { 599 { 672 s64 toks = diff + cl->ctokens; 600 s64 toks = diff + cl->ctokens; 673 601 674 if (toks > cl->cbuffer) 602 if (toks > cl->cbuffer) 675 toks = cl->cbuffer; 603 toks = cl->cbuffer; 676 toks -= (s64) psched_l2t_ns(&cl->ceil, 604 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes); 677 if (toks <= -cl->mbuffer) 605 if (toks <= -cl->mbuffer) 678 toks = 1 - cl->mbuffer; 606 toks = 1 - cl->mbuffer; 679 607 680 cl->ctokens = toks; 608 cl->ctokens = toks; 681 } 609 } 682 610 683 /** 611 /** 684 * htb_charge_class - charges amount "bytes" t 612 * htb_charge_class - charges amount "bytes" to leaf and ancestors 685 * @q: the priority event queue << 686 * @cl: the class to start iterate << 687 * @level: the minimum level to account << 688 * @skb: the socket buffer << 689 * 613 * 690 * Routine assumes that packet "bytes" long wa 614 * Routine assumes that packet "bytes" long was dequeued from leaf cl 691 * borrowing from "level". It accounts bytes t 615 * borrowing from "level". It accounts bytes to ceil leaky bucket for 692 * leaf and all ancestors and to rate bucket f 616 * leaf and all ancestors and to rate bucket for ancestors at levels 693 * "level" and higher. It also handles possibl 617 * "level" and higher. It also handles possible change of mode resulting 694 * from the update. Note that mode can also in 618 * from the update. Note that mode can also increase here (MAY_BORROW to 695 * CAN_SEND) because we can use more precise c 619 * CAN_SEND) because we can use more precise clock that event queue here. 696 * In such case we remove class from event que 620 * In such case we remove class from event queue first. 697 */ 621 */ 698 static void htb_charge_class(struct htb_sched 622 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, 699 int level, struct 623 int level, struct sk_buff *skb) 700 { 624 { 701 int bytes = qdisc_pkt_len(skb); 625 int bytes = qdisc_pkt_len(skb); 702 enum htb_cmode old_mode; 626 enum htb_cmode old_mode; 703 s64 diff; 627 s64 diff; 704 628 705 while (cl) { 629 while (cl) { 706 diff = min_t(s64, q->now - cl- 630 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); 707 if (cl->level >= level) { 631 if (cl->level >= level) { 708 if (cl->level == level 632 if (cl->level == level) 709 cl->xstats.len 633 cl->xstats.lends++; 710 htb_accnt_tokens(cl, b 634 htb_accnt_tokens(cl, bytes, diff); 711 } else { 635 } else { 712 cl->xstats.borrows++; 636 cl->xstats.borrows++; 713 cl->tokens += diff; 637 cl->tokens += diff; /* we moved t_c; update tokens */ 714 } 638 } 715 htb_accnt_ctokens(cl, bytes, d 639 htb_accnt_ctokens(cl, bytes, diff); 716 cl->t_c = q->now; 640 cl->t_c = q->now; 717 641 718 old_mode = cl->cmode; 642 old_mode = cl->cmode; 719 diff = 0; 643 diff = 0; 720 htb_change_class_mode(q, cl, & 644 htb_change_class_mode(q, cl, &diff); 721 if (old_mode != cl->cmode) { 645 if (old_mode != cl->cmode) { 722 if (old_mode != HTB_CA 646 if (old_mode != HTB_CAN_SEND) 723 htb_safe_rb_er !! 647 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); 724 if (cl->cmode != HTB_C 648 if (cl->cmode != HTB_CAN_SEND) 725 htb_add_to_wai 649 htb_add_to_wait_tree(q, cl, diff); 726 } 650 } 727 651 728 /* update basic stats except f 652 /* update basic stats except for leaves which are already updated */ 729 if (cl->level) 653 if (cl->level) 730 bstats_update(&cl->bst 654 bstats_update(&cl->bstats, skb); 731 655 732 cl = cl->parent; 656 cl = cl->parent; 733 } 657 } 734 } 658 } 735 659 736 /** 660 /** 737 * htb_do_events - make mode changes to classe 661 * htb_do_events - make mode changes to classes at the level 738 * @q: the priority event queue << 739 * @level: which wait_pq in 'q->hlevel' << 740 * @start: start jiffies << 741 * 662 * 742 * Scans event queue for pending events and ap 663 * Scans event queue for pending events and applies them. Returns time of 743 * next pending event (0 for no event in pq, q 664 * next pending event (0 for no event in pq, q->now for too many events). 744 * Note: Applied are events whose have cl->pq_ 665 * Note: Applied are events whose have cl->pq_key <= q->now. 745 */ 666 */ 746 static s64 htb_do_events(struct htb_sched *q, !! 667 static s64 htb_do_events(struct htb_sched *q, int level, 747 unsigned long start) 668 unsigned long start) 748 { 669 { 749 /* don't run for longer than 2 jiffies 670 /* don't run for longer than 2 jiffies; 2 is used instead of 750 * 1 to simplify things when jiffy is 671 * 1 to simplify things when jiffy is going to be incremented 751 * too soon 672 * too soon 752 */ 673 */ 753 unsigned long stop_at = start + 2; 674 unsigned long stop_at = start + 2; 754 struct rb_root *wait_pq = &q->hlevel[l << 755 << 756 while (time_before(jiffies, stop_at)) 675 while (time_before(jiffies, stop_at)) { 757 struct htb_class *cl; 676 struct htb_class *cl; 758 s64 diff; 677 s64 diff; 759 struct rb_node *p = rb_first(w !! 678 struct rb_node *p = rb_first(&q->wait_pq[level]); 760 679 761 if (!p) 680 if (!p) 762 return 0; 681 return 0; 763 682 764 cl = rb_entry(p, struct htb_cl 683 cl = rb_entry(p, struct htb_class, pq_node); 765 if (cl->pq_key > q->now) 684 if (cl->pq_key > q->now) 766 return cl->pq_key; 685 return cl->pq_key; 767 686 768 htb_safe_rb_erase(p, wait_pq); !! 687 htb_safe_rb_erase(p, q->wait_pq + level); 769 diff = min_t(s64, q->now - cl- 688 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); 770 htb_change_class_mode(q, cl, & 689 htb_change_class_mode(q, cl, &diff); 771 if (cl->cmode != HTB_CAN_SEND) 690 if (cl->cmode != HTB_CAN_SEND) 772 htb_add_to_wait_tree(q 691 htb_add_to_wait_tree(q, cl, diff); 773 } 692 } 774 693 775 /* too much load - let's continue afte 694 /* too much load - let's continue after a break for scheduling */ 776 if (!(q->warned & HTB_WARN_TOOMANYEVEN 695 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { 777 pr_warn("htb: too many events! !! 696 pr_warning("htb: too many events!\n"); 778 q->warned |= HTB_WARN_TOOMANYE 697 q->warned |= HTB_WARN_TOOMANYEVENTS; 779 } 698 } 780 699 781 return q->now; 700 return q->now; 782 } 701 } 783 702 784 /* Returns class->node+prio from id-tree where 703 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL 785 * is no such one exists. 704 * is no such one exists. 786 */ 705 */ 787 static struct rb_node *htb_id_find_next_upper( 706 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, 788 707 u32 id) 789 { 708 { 790 struct rb_node *r = NULL; 709 struct rb_node *r = NULL; 791 while (n) { 710 while (n) { 792 struct htb_class *cl = 711 struct htb_class *cl = 793 rb_entry(n, struct htb_cla 712 rb_entry(n, struct htb_class, node[prio]); 794 713 795 if (id > cl->common.classid) { 714 if (id > cl->common.classid) { 796 n = n->rb_right; 715 n = n->rb_right; 797 } else if (id < cl->common.cla 716 } else if (id < cl->common.classid) { 798 r = n; 717 r = n; 799 n = n->rb_left; 718 n = n->rb_left; 800 } else { 719 } else { 801 return n; 720 return n; 802 } 721 } 803 } 722 } 804 return r; 723 return r; 805 } 724 } 806 725 807 /** 726 /** 808 * htb_lookup_leaf - returns next leaf class i 727 * htb_lookup_leaf - returns next leaf class in DRR order 809 * @hprio: the current one << 810 * @prio: which prio in class << 811 * 728 * 812 * Find leaf where current feed pointers point 729 * Find leaf where current feed pointers points to. 813 */ 730 */ 814 static struct htb_class *htb_lookup_leaf(struc !! 731 static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio, >> 732 struct rb_node **pptr, u32 * pid) 815 { 733 { 816 int i; 734 int i; 817 struct { 735 struct { 818 struct rb_node *root; 736 struct rb_node *root; 819 struct rb_node **pptr; 737 struct rb_node **pptr; 820 u32 *pid; 738 u32 *pid; 821 } stk[TC_HTB_MAXDEPTH], *sp = stk; 739 } stk[TC_HTB_MAXDEPTH], *sp = stk; 822 740 823 BUG_ON(!hprio->row.rb_node); !! 741 BUG_ON(!tree->rb_node); 824 sp->root = hprio->row.rb_node; !! 742 sp->root = tree->rb_node; 825 sp->pptr = &hprio->ptr; !! 743 sp->pptr = pptr; 826 sp->pid = &hprio->last_ptr_id; !! 744 sp->pid = pid; 827 745 828 for (i = 0; i < 65535; i++) { 746 for (i = 0; i < 65535; i++) { 829 if (!*sp->pptr && *sp->pid) { 747 if (!*sp->pptr && *sp->pid) { 830 /* ptr was invalidated 748 /* ptr was invalidated but id is valid - try to recover 831 * the original or nex 749 * the original or next ptr 832 */ 750 */ 833 *sp->pptr = 751 *sp->pptr = 834 htb_id_find_next_u 752 htb_id_find_next_upper(prio, sp->root, *sp->pid); 835 } 753 } 836 *sp->pid = 0; /* ptr is vali 754 *sp->pid = 0; /* ptr is valid now so that remove this hint as it 837 * can become 755 * can become out of date quickly 838 */ 756 */ 839 if (!*sp->pptr) { /* we 757 if (!*sp->pptr) { /* we are at right end; rewind & go up */ 840 *sp->pptr = sp->root; 758 *sp->pptr = sp->root; 841 while ((*sp->pptr)->rb 759 while ((*sp->pptr)->rb_left) 842 *sp->pptr = (* 760 *sp->pptr = (*sp->pptr)->rb_left; 843 if (sp > stk) { 761 if (sp > stk) { 844 sp--; 762 sp--; 845 if (!*sp->pptr 763 if (!*sp->pptr) { 846 WARN_O 764 WARN_ON(1); 847 return 765 return NULL; 848 } 766 } 849 htb_next_rb_no 767 htb_next_rb_node(sp->pptr); 850 } 768 } 851 } else { 769 } else { 852 struct htb_class *cl; 770 struct htb_class *cl; 853 struct htb_prio *clp; << 854 << 855 cl = rb_entry(*sp->ppt 771 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); 856 if (!cl->level) 772 if (!cl->level) 857 return cl; 773 return cl; 858 clp = &cl->inner.clpri !! 774 (++sp)->root = cl->un.inner.feed[prio].rb_node; 859 (++sp)->root = clp->fe !! 775 sp->pptr = cl->un.inner.ptr + prio; 860 sp->pptr = &clp->ptr; !! 776 sp->pid = cl->un.inner.last_ptr_id + prio; 861 sp->pid = &clp->last_p << 862 } 777 } 863 } 778 } 864 WARN_ON(1); 779 WARN_ON(1); 865 return NULL; 780 return NULL; 866 } 781 } 867 782 868 /* dequeues packet at given priority and level 783 /* dequeues packet at given priority and level; call only if 869 * you are sure that there is active class at 784 * you are sure that there is active class at prio/level 870 */ 785 */ 871 static struct sk_buff *htb_dequeue_tree(struct !! 786 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, 872 const !! 787 int level) 873 { 788 { 874 struct sk_buff *skb = NULL; 789 struct sk_buff *skb = NULL; 875 struct htb_class *cl, *start; 790 struct htb_class *cl, *start; 876 struct htb_level *hlevel = &q->hlevel[ << 877 struct htb_prio *hprio = &hlevel->hpri << 878 << 879 /* look initial class up in the row */ 791 /* look initial class up in the row */ 880 start = cl = htb_lookup_leaf(hprio, pr !! 792 start = cl = htb_lookup_leaf(q->row[level] + prio, prio, >> 793 q->ptr[level] + prio, >> 794 q->last_ptr_id[level] + prio); 881 795 882 do { 796 do { 883 next: 797 next: 884 if (unlikely(!cl)) 798 if (unlikely(!cl)) 885 return NULL; 799 return NULL; 886 800 887 /* class can be empty - it is 801 /* class can be empty - it is unlikely but can be true if leaf 888 * qdisc drops packets in enqu 802 * qdisc drops packets in enqueue routine or if someone used 889 * graft operation on the leaf 803 * graft operation on the leaf since last dequeue; 890 * simply deactivate and skip 804 * simply deactivate and skip such class 891 */ 805 */ 892 if (unlikely(cl->leaf.q->q.qle !! 806 if (unlikely(cl->un.leaf.q->q.qlen == 0)) { 893 struct htb_class *next 807 struct htb_class *next; 894 htb_deactivate(q, cl); 808 htb_deactivate(q, cl); 895 809 896 /* row/level might bec 810 /* row/level might become empty */ 897 if ((q->row_mask[level 811 if ((q->row_mask[level] & (1 << prio)) == 0) 898 return NULL; 812 return NULL; 899 813 900 next = htb_lookup_leaf !! 814 next = htb_lookup_leaf(q->row[level] + prio, >> 815 prio, q->ptr[level] + prio, >> 816 q->last_ptr_id[level] + prio); 901 817 902 if (cl == start) 818 if (cl == start) /* fix start if we just deleted it */ 903 start = next; 819 start = next; 904 cl = next; 820 cl = next; 905 goto next; 821 goto next; 906 } 822 } 907 823 908 skb = cl->leaf.q->dequeue(cl-> !! 824 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q); 909 if (likely(skb != NULL)) 825 if (likely(skb != NULL)) 910 break; 826 break; 911 827 912 qdisc_warn_nonwc("htb", cl->le !! 828 qdisc_warn_nonwc("htb", cl->un.leaf.q); 913 htb_next_rb_node(level ? &cl-> !! 829 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> 914 &q->h !! 830 ptr[0]) + prio); 915 cl = htb_lookup_leaf(hprio, pr !! 831 cl = htb_lookup_leaf(q->row[level] + prio, prio, >> 832 q->ptr[level] + prio, >> 833 q->last_ptr_id[level] + prio); 916 834 917 } while (cl != start); 835 } while (cl != start); 918 836 919 if (likely(skb != NULL)) { 837 if (likely(skb != NULL)) { 920 bstats_update(&cl->bstats, skb 838 bstats_update(&cl->bstats, skb); 921 cl->leaf.deficit[level] -= qdi !! 839 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb); 922 if (cl->leaf.deficit[level] < !! 840 if (cl->un.leaf.deficit[level] < 0) { 923 cl->leaf.deficit[level !! 841 cl->un.leaf.deficit[level] += cl->quantum; 924 htb_next_rb_node(level !! 842 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> 925 !! 843 ptr[0]) + prio); 926 } 844 } 927 /* this used to be after charg 845 /* this used to be after charge_class but this constelation 928 * gives us slightly better pe 846 * gives us slightly better performance 929 */ 847 */ 930 if (!cl->leaf.q->q.qlen) !! 848 if (!cl->un.leaf.q->q.qlen) 931 htb_deactivate(q, cl); 849 htb_deactivate(q, cl); 932 htb_charge_class(q, cl, level, 850 htb_charge_class(q, cl, level, skb); 933 } 851 } 934 return skb; 852 return skb; 935 } 853 } 936 854 937 static struct sk_buff *htb_dequeue(struct Qdis 855 static struct sk_buff *htb_dequeue(struct Qdisc *sch) 938 { 856 { 939 struct sk_buff *skb; 857 struct sk_buff *skb; 940 struct htb_sched *q = qdisc_priv(sch); 858 struct htb_sched *q = qdisc_priv(sch); 941 int level; 859 int level; 942 s64 next_event; 860 s64 next_event; 943 unsigned long start_at; 861 unsigned long start_at; 944 862 945 /* try to dequeue direct packets as hi 863 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 946 skb = __qdisc_dequeue_head(&q->direct_ !! 864 skb = __skb_dequeue(&q->direct_queue); 947 if (skb != NULL) { 865 if (skb != NULL) { 948 ok: 866 ok: 949 qdisc_bstats_update(sch, skb); 867 qdisc_bstats_update(sch, skb); 950 qdisc_qstats_backlog_dec(sch, !! 868 qdisc_unthrottled(sch); 951 sch->q.qlen--; 869 sch->q.qlen--; 952 return skb; 870 return skb; 953 } 871 } 954 872 955 if (!sch->q.qlen) 873 if (!sch->q.qlen) 956 goto fin; 874 goto fin; 957 q->now = ktime_get_ns(); !! 875 q->now = ktime_to_ns(ktime_get()); 958 start_at = jiffies; 876 start_at = jiffies; 959 877 960 next_event = q->now + 5LLU * NSEC_PER_ 878 next_event = q->now + 5LLU * NSEC_PER_SEC; 961 879 962 for (level = 0; level < TC_HTB_MAXDEPT 880 for (level = 0; level < TC_HTB_MAXDEPTH; level++) { 963 /* common case optimization - 881 /* common case optimization - skip event handler quickly */ 964 int m; 882 int m; 965 s64 event = q->near_ev_cache[l !! 883 s64 event; 966 884 967 if (q->now >= event) { !! 885 if (q->now >= q->near_ev_cache[level]) { 968 event = htb_do_events( 886 event = htb_do_events(q, level, start_at); 969 if (!event) 887 if (!event) 970 event = q->now 888 event = q->now + NSEC_PER_SEC; 971 q->near_ev_cache[level 889 q->near_ev_cache[level] = event; 972 } !! 890 } else >> 891 event = q->near_ev_cache[level]; 973 892 974 if (next_event > event) 893 if (next_event > event) 975 next_event = event; 894 next_event = event; 976 895 977 m = ~q->row_mask[level]; 896 m = ~q->row_mask[level]; 978 while (m != (int)(-1)) { 897 while (m != (int)(-1)) { 979 int prio = ffz(m); 898 int prio = ffz(m); 980 899 981 m |= 1 << prio; 900 m |= 1 << prio; 982 skb = htb_dequeue_tree 901 skb = htb_dequeue_tree(q, prio, level); 983 if (likely(skb != NULL 902 if (likely(skb != NULL)) 984 goto ok; 903 goto ok; 985 } 904 } 986 } 905 } 987 if (likely(next_event > q->now)) !! 906 sch->qstats.overlimits++; 988 qdisc_watchdog_schedule_ns(&q- !! 907 if (likely(next_event > q->now)) { 989 else !! 908 if (!test_bit(__QDISC_STATE_DEACTIVATED, >> 909 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { >> 910 ktime_t time = ns_to_ktime(next_event); >> 911 qdisc_throttled(q->watchdog.qdisc); >> 912 hrtimer_start(&q->watchdog.timer, time, >> 913 HRTIMER_MODE_ABS); >> 914 } >> 915 } else { 990 schedule_work(&q->work); 916 schedule_work(&q->work); >> 917 } 991 fin: 918 fin: 992 return skb; 919 return skb; 993 } 920 } 994 921 >> 922 /* try to drop from each class (by prio) until one succeed */ >> 923 static unsigned int htb_drop(struct Qdisc *sch) >> 924 { >> 925 struct htb_sched *q = qdisc_priv(sch); >> 926 int prio; >> 927 >> 928 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) { >> 929 struct list_head *p; >> 930 list_for_each(p, q->drops + prio) { >> 931 struct htb_class *cl = list_entry(p, struct htb_class, >> 932 un.leaf.drop_list); >> 933 unsigned int len; >> 934 if (cl->un.leaf.q->ops->drop && >> 935 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) { >> 936 sch->q.qlen--; >> 937 if (!cl->un.leaf.q->q.qlen) >> 938 htb_deactivate(q, cl); >> 939 return len; >> 940 } >> 941 } >> 942 } >> 943 return 0; >> 944 } >> 945 995 /* reset all classes */ 946 /* reset all classes */ 996 /* always caled under BH & queue lock */ 947 /* always caled under BH & queue lock */ 997 static void htb_reset(struct Qdisc *sch) 948 static void htb_reset(struct Qdisc *sch) 998 { 949 { 999 struct htb_sched *q = qdisc_priv(sch); 950 struct htb_sched *q = qdisc_priv(sch); 1000 struct htb_class *cl; 951 struct htb_class *cl; 1001 unsigned int i; 952 unsigned int i; 1002 953 1003 for (i = 0; i < q->clhash.hashsize; i 954 for (i = 0; i < q->clhash.hashsize; i++) { 1004 hlist_for_each_entry(cl, &q-> 955 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 1005 if (cl->level) 956 if (cl->level) 1006 memset(&cl->i !! 957 memset(&cl->un.inner, 0, sizeof(cl->un.inner)); 1007 else { 958 else { 1008 if (cl->leaf. !! 959 if (cl->un.leaf.q) 1009 qdisc !! 960 qdisc_reset(cl->un.leaf.q); >> 961 INIT_LIST_HEAD(&cl->un.leaf.drop_list); 1010 } 962 } 1011 cl->prio_activity = 0 963 cl->prio_activity = 0; 1012 cl->cmode = HTB_CAN_S 964 cl->cmode = HTB_CAN_SEND; >> 965 1013 } 966 } 1014 } 967 } 1015 qdisc_watchdog_cancel(&q->watchdog); 968 qdisc_watchdog_cancel(&q->watchdog); 1016 __qdisc_reset_queue(&q->direct_queue) !! 969 __skb_queue_purge(&q->direct_queue); 1017 memset(q->hlevel, 0, sizeof(q->hlevel !! 970 sch->q.qlen = 0; >> 971 memset(q->row, 0, sizeof(q->row)); 1018 memset(q->row_mask, 0, sizeof(q->row_ 972 memset(q->row_mask, 0, sizeof(q->row_mask)); >> 973 memset(q->wait_pq, 0, sizeof(q->wait_pq)); >> 974 memset(q->ptr, 0, sizeof(q->ptr)); >> 975 for (i = 0; i < TC_HTB_NUMPRIO; i++) >> 976 INIT_LIST_HEAD(q->drops + i); 1019 } 977 } 1020 978 1021 static const struct nla_policy htb_policy[TCA 979 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = { 1022 [TCA_HTB_PARMS] = { .len = sizeof(str 980 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) }, 1023 [TCA_HTB_INIT] = { .len = sizeof(str 981 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) }, 1024 [TCA_HTB_CTAB] = { .type = NLA_BINAR 982 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 1025 [TCA_HTB_RTAB] = { .type = NLA_BINAR 983 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 1026 [TCA_HTB_DIRECT_QLEN] = { .type = NLA 984 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 }, 1027 [TCA_HTB_RATE64] = { .type = NLA_U64 << 1028 [TCA_HTB_CEIL64] = { .type = NLA_U64 << 1029 [TCA_HTB_OFFLOAD] = { .type = NLA_FLA << 1030 }; 985 }; 1031 986 1032 static void htb_work_func(struct work_struct 987 static void htb_work_func(struct work_struct *work) 1033 { 988 { 1034 struct htb_sched *q = container_of(wo 989 struct htb_sched *q = container_of(work, struct htb_sched, work); 1035 struct Qdisc *sch = q->watchdog.qdisc 990 struct Qdisc *sch = q->watchdog.qdisc; 1036 991 1037 rcu_read_lock(); << 1038 __netif_schedule(qdisc_root(sch)); 992 __netif_schedule(qdisc_root(sch)); 1039 rcu_read_unlock(); << 1040 } 993 } 1041 994 1042 static int htb_offload(struct net_device *dev !! 995 static int htb_init(struct Qdisc *sch, struct nlattr *opt) 1043 { 996 { 1044 return dev->netdev_ops->ndo_setup_tc( << 1045 } << 1046 << 1047 static int htb_init(struct Qdisc *sch, struct << 1048 struct netlink_ext_ack *e << 1049 { << 1050 struct net_device *dev = qdisc_dev(sc << 1051 struct tc_htb_qopt_offload offload_op << 1052 struct htb_sched *q = qdisc_priv(sch) 997 struct htb_sched *q = qdisc_priv(sch); 1053 struct nlattr *tb[TCA_HTB_MAX + 1]; 998 struct nlattr *tb[TCA_HTB_MAX + 1]; 1054 struct tc_htb_glob *gopt; 999 struct tc_htb_glob *gopt; 1055 unsigned int ntx; << 1056 bool offload; << 1057 int err; 1000 int err; 1058 !! 1001 int i; 1059 qdisc_watchdog_init(&q->watchdog, sch << 1060 INIT_WORK(&q->work, htb_work_func); << 1061 1002 1062 if (!opt) 1003 if (!opt) 1063 return -EINVAL; 1004 return -EINVAL; 1064 1005 1065 err = tcf_block_get(&q->block, &q->fi !! 1006 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy); 1066 if (err) << 1067 return err; << 1068 << 1069 err = nla_parse_nested_deprecated(tb, << 1070 NUL << 1071 if (err < 0) 1007 if (err < 0) 1072 return err; 1008 return err; 1073 1009 1074 if (!tb[TCA_HTB_INIT]) 1010 if (!tb[TCA_HTB_INIT]) 1075 return -EINVAL; 1011 return -EINVAL; 1076 1012 1077 gopt = nla_data(tb[TCA_HTB_INIT]); 1013 gopt = nla_data(tb[TCA_HTB_INIT]); 1078 if (gopt->version != HTB_VER >> 16) 1014 if (gopt->version != HTB_VER >> 16) 1079 return -EINVAL; 1015 return -EINVAL; 1080 1016 1081 offload = nla_get_flag(tb[TCA_HTB_OFF << 1082 << 1083 if (offload) { << 1084 if (sch->parent != TC_H_ROOT) << 1085 NL_SET_ERR_MSG(extack << 1086 return -EOPNOTSUPP; << 1087 } << 1088 << 1089 if (!tc_can_offload(dev) || ! << 1090 NL_SET_ERR_MSG(extack << 1091 return -EOPNOTSUPP; << 1092 } << 1093 << 1094 q->num_direct_qdiscs = dev->r << 1095 q->direct_qdiscs = kcalloc(q- << 1096 si << 1097 GF << 1098 if (!q->direct_qdiscs) << 1099 return -ENOMEM; << 1100 } << 1101 << 1102 err = qdisc_class_hash_init(&q->clhas 1017 err = qdisc_class_hash_init(&q->clhash); 1103 if (err < 0) 1018 if (err < 0) 1104 return err; 1019 return err; >> 1020 for (i = 0; i < TC_HTB_NUMPRIO; i++) >> 1021 INIT_LIST_HEAD(q->drops + i); >> 1022 >> 1023 qdisc_watchdog_init(&q->watchdog, sch); >> 1024 INIT_WORK(&q->work, htb_work_func); >> 1025 skb_queue_head_init(&q->direct_queue); 1105 1026 1106 if (tb[TCA_HTB_DIRECT_QLEN]) 1027 if (tb[TCA_HTB_DIRECT_QLEN]) 1107 q->direct_qlen = nla_get_u32( 1028 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); 1108 else !! 1029 else { 1109 q->direct_qlen = qdisc_dev(sc 1030 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; 1110 !! 1031 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ >> 1032 q->direct_qlen = 2; >> 1033 } 1111 if ((q->rate2quantum = gopt->rate2qua 1034 if ((q->rate2quantum = gopt->rate2quantum) < 1) 1112 q->rate2quantum = 1; 1035 q->rate2quantum = 1; 1113 q->defcls = gopt->defcls; 1036 q->defcls = gopt->defcls; 1114 1037 1115 if (!offload) << 1116 return 0; << 1117 << 1118 for (ntx = 0; ntx < q->num_direct_qdi << 1119 struct netdev_queue *dev_queu << 1120 struct Qdisc *qdisc; << 1121 << 1122 qdisc = qdisc_create_dflt(dev << 1123 TC_ << 1124 if (!qdisc) { << 1125 return -ENOMEM; << 1126 } << 1127 << 1128 q->direct_qdiscs[ntx] = qdisc << 1129 qdisc->flags |= TCQ_F_ONETXQU << 1130 } << 1131 << 1132 sch->flags |= TCQ_F_MQROOT; << 1133 << 1134 offload_opt = (struct tc_htb_qopt_off << 1135 .command = TC_HTB_CREATE, << 1136 .parent_classid = TC_H_MAJ(sc << 1137 .classid = TC_H_MIN(q->defcls << 1138 .extack = extack, << 1139 }; << 1140 err = htb_offload(dev, &offload_opt); << 1141 if (err) << 1142 return err; << 1143 << 1144 /* Defer this assignment, so that htb << 1145 * parts (especially calling ndo_setu << 1146 */ << 1147 q->offload = true; << 1148 << 1149 return 0; 1038 return 0; 1150 } 1039 } 1151 1040 1152 static void htb_attach_offload(struct Qdisc * << 1153 { << 1154 struct net_device *dev = qdisc_dev(sc << 1155 struct htb_sched *q = qdisc_priv(sch) << 1156 unsigned int ntx; << 1157 << 1158 for (ntx = 0; ntx < q->num_direct_qdi << 1159 struct Qdisc *old, *qdisc = q << 1160 << 1161 old = dev_graft_qdisc(qdisc-> << 1162 qdisc_put(old); << 1163 qdisc_hash_add(qdisc, false); << 1164 } << 1165 for (ntx = q->num_direct_qdiscs; ntx << 1166 struct netdev_queue *dev_queu << 1167 struct Qdisc *old = dev_graft << 1168 << 1169 qdisc_put(old); << 1170 } << 1171 << 1172 kfree(q->direct_qdiscs); << 1173 q->direct_qdiscs = NULL; << 1174 } << 1175 << 1176 static void htb_attach_software(struct Qdisc << 1177 { << 1178 struct net_device *dev = qdisc_dev(sc << 1179 unsigned int ntx; << 1180 << 1181 /* Resemble qdisc_graft behavior. */ << 1182 for (ntx = 0; ntx < dev->num_tx_queue << 1183 struct netdev_queue *dev_queu << 1184 struct Qdisc *old = dev_graft << 1185 << 1186 qdisc_refcount_inc(sch); << 1187 << 1188 qdisc_put(old); << 1189 } << 1190 } << 1191 << 1192 static void htb_attach(struct Qdisc *sch) << 1193 { << 1194 struct htb_sched *q = qdisc_priv(sch) << 1195 << 1196 if (q->offload) << 1197 htb_attach_offload(sch); << 1198 else << 1199 htb_attach_software(sch); << 1200 } << 1201 << 1202 static int htb_dump(struct Qdisc *sch, struct 1041 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1203 { 1042 { >> 1043 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); 1204 struct htb_sched *q = qdisc_priv(sch) 1044 struct htb_sched *q = qdisc_priv(sch); 1205 struct nlattr *nest; 1045 struct nlattr *nest; 1206 struct tc_htb_glob gopt; 1046 struct tc_htb_glob gopt; 1207 1047 1208 if (q->offload) !! 1048 spin_lock_bh(root_lock); 1209 sch->flags |= TCQ_F_OFFLOADED << 1210 else << 1211 sch->flags &= ~TCQ_F_OFFLOADE << 1212 << 1213 sch->qstats.overlimits = q->overlimit << 1214 /* Its safe to not acquire qdisc lock << 1215 * no change can happen on the qdisc << 1216 */ << 1217 1049 1218 gopt.direct_pkts = q->direct_pkts; 1050 gopt.direct_pkts = q->direct_pkts; 1219 gopt.version = HTB_VER; 1051 gopt.version = HTB_VER; 1220 gopt.rate2quantum = q->rate2quantum; 1052 gopt.rate2quantum = q->rate2quantum; 1221 gopt.defcls = q->defcls; 1053 gopt.defcls = q->defcls; 1222 gopt.debug = 0; 1054 gopt.debug = 0; 1223 1055 1224 nest = nla_nest_start_noflag(skb, TCA !! 1056 nest = nla_nest_start(skb, TCA_OPTIONS); 1225 if (nest == NULL) 1057 if (nest == NULL) 1226 goto nla_put_failure; 1058 goto nla_put_failure; 1227 if (nla_put(skb, TCA_HTB_INIT, sizeof 1059 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) || 1228 nla_put_u32(skb, TCA_HTB_DIRECT_Q 1060 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) 1229 goto nla_put_failure; 1061 goto nla_put_failure; 1230 if (q->offload && nla_put_flag(skb, T !! 1062 nla_nest_end(skb, nest); 1231 goto nla_put_failure; << 1232 1063 1233 return nla_nest_end(skb, nest); !! 1064 spin_unlock_bh(root_lock); >> 1065 return skb->len; 1234 1066 1235 nla_put_failure: 1067 nla_put_failure: >> 1068 spin_unlock_bh(root_lock); 1236 nla_nest_cancel(skb, nest); 1069 nla_nest_cancel(skb, nest); 1237 return -1; 1070 return -1; 1238 } 1071 } 1239 1072 1240 static int htb_dump_class(struct Qdisc *sch, 1073 static int htb_dump_class(struct Qdisc *sch, unsigned long arg, 1241 struct sk_buff *skb 1074 struct sk_buff *skb, struct tcmsg *tcm) 1242 { 1075 { 1243 struct htb_class *cl = (struct htb_cl 1076 struct htb_class *cl = (struct htb_class *)arg; 1244 struct htb_sched *q = qdisc_priv(sch) !! 1077 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); 1245 struct nlattr *nest; 1078 struct nlattr *nest; 1246 struct tc_htb_opt opt; 1079 struct tc_htb_opt opt; 1247 1080 1248 /* Its safe to not acquire qdisc lock !! 1081 spin_lock_bh(root_lock); 1249 * no change can happen on the class << 1250 */ << 1251 tcm->tcm_parent = cl->parent ? cl->pa 1082 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; 1252 tcm->tcm_handle = cl->common.classid; 1083 tcm->tcm_handle = cl->common.classid; 1253 if (!cl->level && cl->leaf.q) !! 1084 if (!cl->level && cl->un.leaf.q) 1254 tcm->tcm_info = cl->leaf.q->h !! 1085 tcm->tcm_info = cl->un.leaf.q->handle; 1255 1086 1256 nest = nla_nest_start_noflag(skb, TCA !! 1087 nest = nla_nest_start(skb, TCA_OPTIONS); 1257 if (nest == NULL) 1088 if (nest == NULL) 1258 goto nla_put_failure; 1089 goto nla_put_failure; 1259 1090 1260 memset(&opt, 0, sizeof(opt)); 1091 memset(&opt, 0, sizeof(opt)); 1261 1092 1262 psched_ratecfg_getrate(&opt.rate, &cl 1093 psched_ratecfg_getrate(&opt.rate, &cl->rate); 1263 opt.buffer = PSCHED_NS2TICKS(cl->buff 1094 opt.buffer = PSCHED_NS2TICKS(cl->buffer); 1264 psched_ratecfg_getrate(&opt.ceil, &cl 1095 psched_ratecfg_getrate(&opt.ceil, &cl->ceil); 1265 opt.cbuffer = PSCHED_NS2TICKS(cl->cbu 1096 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); 1266 opt.quantum = cl->quantum; 1097 opt.quantum = cl->quantum; 1267 opt.prio = cl->prio; 1098 opt.prio = cl->prio; 1268 opt.level = cl->level; 1099 opt.level = cl->level; 1269 if (nla_put(skb, TCA_HTB_PARMS, sizeo 1100 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt)) 1270 goto nla_put_failure; 1101 goto nla_put_failure; 1271 if (q->offload && nla_put_flag(skb, T << 1272 goto nla_put_failure; << 1273 if ((cl->rate.rate_bytes_ps >= (1ULL << 1274 nla_put_u64_64bit(skb, TCA_HTB_RA << 1275 TCA_HTB_PAD)) << 1276 goto nla_put_failure; << 1277 if ((cl->ceil.rate_bytes_ps >= (1ULL << 1278 nla_put_u64_64bit(skb, TCA_HTB_CE << 1279 TCA_HTB_PAD)) << 1280 goto nla_put_failure; << 1281 1102 1282 return nla_nest_end(skb, nest); !! 1103 nla_nest_end(skb, nest); >> 1104 spin_unlock_bh(root_lock); >> 1105 return skb->len; 1283 1106 1284 nla_put_failure: 1107 nla_put_failure: >> 1108 spin_unlock_bh(root_lock); 1285 nla_nest_cancel(skb, nest); 1109 nla_nest_cancel(skb, nest); 1286 return -1; 1110 return -1; 1287 } 1111 } 1288 1112 1289 static void htb_offload_aggregate_stats(struc << 1290 struc << 1291 { << 1292 u64 bytes = 0, packets = 0; << 1293 struct htb_class *c; << 1294 unsigned int i; << 1295 << 1296 gnet_stats_basic_sync_init(&cl->bstat << 1297 << 1298 for (i = 0; i < q->clhash.hashsize; i << 1299 hlist_for_each_entry(c, &q->c << 1300 struct htb_class *p = << 1301 << 1302 while (p && p->level << 1303 p = p->parent << 1304 << 1305 if (p != cl) << 1306 continue; << 1307 << 1308 bytes += u64_stats_re << 1309 packets += u64_stats_ << 1310 if (c->level == 0) { << 1311 bytes += u64_ << 1312 packets += u6 << 1313 } << 1314 } << 1315 } << 1316 _bstats_update(&cl->bstats, bytes, pa << 1317 } << 1318 << 1319 static int 1113 static int 1320 htb_dump_class_stats(struct Qdisc *sch, unsig 1114 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) 1321 { 1115 { 1322 struct htb_class *cl = (struct htb_cl 1116 struct htb_class *cl = (struct htb_class *)arg; 1323 struct htb_sched *q = qdisc_priv(sch) << 1324 struct gnet_stats_queue qs = { << 1325 .drops = cl->drops, << 1326 .overlimits = cl->overlimits, << 1327 }; << 1328 __u32 qlen = 0; << 1329 << 1330 if (!cl->level && cl->leaf.q) << 1331 qdisc_qstats_qlen_backlog(cl- << 1332 << 1333 cl->xstats.tokens = clamp_t(s64, PSCH << 1334 INT_MIN, << 1335 cl->xstats.ctokens = clamp_t(s64, PSC << 1336 INT_MIN, << 1337 << 1338 if (q->offload) { << 1339 if (!cl->level) { << 1340 if (cl->leaf.q) << 1341 cl->bstats = << 1342 else << 1343 gnet_stats_ba << 1344 _bstats_update(&cl->b << 1345 u64_st << 1346 u64_st << 1347 } else { << 1348 htb_offload_aggregate << 1349 } << 1350 } << 1351 1117 1352 if (gnet_stats_copy_basic(d, NULL, &c !! 1118 if (!cl->level && cl->un.leaf.q) 1353 gnet_stats_copy_rate_est(d, &cl-> !! 1119 cl->qstats.qlen = cl->un.leaf.q->q.qlen; 1354 gnet_stats_copy_queue(d, NULL, &q !! 1120 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); >> 1121 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); >> 1122 >> 1123 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || >> 1124 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || >> 1125 gnet_stats_copy_queue(d, &cl->qstats) < 0) 1355 return -1; 1126 return -1; 1356 1127 1357 return gnet_stats_copy_app(d, &cl->xs 1128 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); 1358 } 1129 } 1359 1130 1360 static struct netdev_queue * << 1361 htb_select_queue(struct Qdisc *sch, struct tc << 1362 { << 1363 struct net_device *dev = qdisc_dev(sc << 1364 struct tc_htb_qopt_offload offload_op << 1365 struct htb_sched *q = qdisc_priv(sch) << 1366 int err; << 1367 << 1368 if (!q->offload) << 1369 return sch->dev_queue; << 1370 << 1371 offload_opt = (struct tc_htb_qopt_off << 1372 .command = TC_HTB_LEAF_QUERY_ << 1373 .classid = TC_H_MIN(tcm->tcm_ << 1374 }; << 1375 err = htb_offload(dev, &offload_opt); << 1376 if (err || offload_opt.qid >= dev->nu << 1377 return NULL; << 1378 return netdev_get_tx_queue(dev, offlo << 1379 } << 1380 << 1381 static struct Qdisc * << 1382 htb_graft_helper(struct netdev_queue *dev_que << 1383 { << 1384 struct net_device *dev = dev_queue->d << 1385 struct Qdisc *old_q; << 1386 << 1387 if (dev->flags & IFF_UP) << 1388 dev_deactivate(dev); << 1389 old_q = dev_graft_qdisc(dev_queue, ne << 1390 if (new_q) << 1391 new_q->flags |= TCQ_F_ONETXQU << 1392 if (dev->flags & IFF_UP) << 1393 dev_activate(dev); << 1394 << 1395 return old_q; << 1396 } << 1397 << 1398 static struct netdev_queue *htb_offload_get_q << 1399 { << 1400 struct netdev_queue *queue; << 1401 << 1402 queue = cl->leaf.offload_queue; << 1403 if (!(cl->leaf.q->flags & TCQ_F_BUILT << 1404 WARN_ON(cl->leaf.q->dev_queue << 1405 << 1406 return queue; << 1407 } << 1408 << 1409 static void htb_offload_move_qdisc(struct Qdi << 1410 struct htb << 1411 { << 1412 struct netdev_queue *queue_old, *queu << 1413 struct net_device *dev = qdisc_dev(sc << 1414 << 1415 queue_old = htb_offload_get_queue(cl_ << 1416 queue_new = htb_offload_get_queue(cl_ << 1417 << 1418 if (!destroying) { << 1419 struct Qdisc *qdisc; << 1420 << 1421 if (dev->flags & IFF_UP) << 1422 dev_deactivate(dev); << 1423 qdisc = dev_graft_qdisc(queue << 1424 WARN_ON(qdisc != cl_old->leaf << 1425 } << 1426 << 1427 if (!(cl_old->leaf.q->flags & TCQ_F_B << 1428 cl_old->leaf.q->dev_queue = q << 1429 cl_old->leaf.offload_queue = queue_ne << 1430 << 1431 if (!destroying) { << 1432 struct Qdisc *qdisc; << 1433 << 1434 qdisc = dev_graft_qdisc(queue << 1435 if (dev->flags & IFF_UP) << 1436 dev_activate(dev); << 1437 WARN_ON(!(qdisc->flags & TCQ_ << 1438 } << 1439 } << 1440 << 1441 static int htb_graft(struct Qdisc *sch, unsig 1131 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1442 struct Qdisc **old, stru !! 1132 struct Qdisc **old) 1443 { 1133 { 1444 struct netdev_queue *dev_queue = sch- << 1445 struct htb_class *cl = (struct htb_cl 1134 struct htb_class *cl = (struct htb_class *)arg; 1446 struct htb_sched *q = qdisc_priv(sch) << 1447 struct Qdisc *old_q; << 1448 1135 1449 if (cl->level) 1136 if (cl->level) 1450 return -EINVAL; 1137 return -EINVAL; >> 1138 if (new == NULL && >> 1139 (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, >> 1140 cl->common.classid)) == NULL) >> 1141 return -ENOBUFS; 1451 1142 1452 if (q->offload) !! 1143 sch_tree_lock(sch); 1453 dev_queue = htb_offload_get_q !! 1144 *old = cl->un.leaf.q; 1454 !! 1145 cl->un.leaf.q = new; 1455 if (!new) { !! 1146 if (*old != NULL) { 1456 new = qdisc_create_dflt(dev_q !! 1147 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); 1457 cl->c !! 1148 qdisc_reset(*old); 1458 if (!new) << 1459 return -ENOBUFS; << 1460 } << 1461 << 1462 if (q->offload) { << 1463 /* One ref for cl->leaf.q, th << 1464 qdisc_refcount_inc(new); << 1465 old_q = htb_graft_helper(dev_ << 1466 } << 1467 << 1468 *old = qdisc_replace(sch, new, &cl->l << 1469 << 1470 if (q->offload) { << 1471 WARN_ON(old_q != *old); << 1472 qdisc_put(old_q); << 1473 } 1149 } 1474 !! 1150 sch_tree_unlock(sch); 1475 return 0; 1151 return 0; 1476 } 1152 } 1477 1153 1478 static struct Qdisc *htb_leaf(struct Qdisc *s 1154 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) 1479 { 1155 { 1480 struct htb_class *cl = (struct htb_cl 1156 struct htb_class *cl = (struct htb_class *)arg; 1481 return !cl->level ? cl->leaf.q : NULL !! 1157 return !cl->level ? cl->un.leaf.q : NULL; 1482 } 1158 } 1483 1159 1484 static void htb_qlen_notify(struct Qdisc *sch 1160 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) 1485 { 1161 { 1486 struct htb_class *cl = (struct htb_cl 1162 struct htb_class *cl = (struct htb_class *)arg; 1487 1163 1488 htb_deactivate(qdisc_priv(sch), cl); !! 1164 if (cl->un.leaf.q->q.qlen == 0) >> 1165 htb_deactivate(qdisc_priv(sch), cl); >> 1166 } >> 1167 >> 1168 static unsigned long htb_get(struct Qdisc *sch, u32 classid) >> 1169 { >> 1170 struct htb_class *cl = htb_find(classid, sch); >> 1171 if (cl) >> 1172 cl->refcnt++; >> 1173 return (unsigned long)cl; 1489 } 1174 } 1490 1175 1491 static inline int htb_parent_last_child(struc 1176 static inline int htb_parent_last_child(struct htb_class *cl) 1492 { 1177 { 1493 if (!cl->parent) 1178 if (!cl->parent) 1494 /* the root class */ 1179 /* the root class */ 1495 return 0; 1180 return 0; 1496 if (cl->parent->children > 1) 1181 if (cl->parent->children > 1) 1497 /* not the last child */ 1182 /* not the last child */ 1498 return 0; 1183 return 0; 1499 return 1; 1184 return 1; 1500 } 1185 } 1501 1186 1502 static void htb_parent_to_leaf(struct Qdisc * !! 1187 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, 1503 struct Qdisc * 1188 struct Qdisc *new_q) 1504 { 1189 { 1505 struct htb_sched *q = qdisc_priv(sch) << 1506 struct htb_class *parent = cl->parent 1190 struct htb_class *parent = cl->parent; 1507 1191 1508 WARN_ON(cl->level || !cl->leaf.q || c !! 1192 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity); 1509 1193 1510 if (parent->cmode != HTB_CAN_SEND) 1194 if (parent->cmode != HTB_CAN_SEND) 1511 htb_safe_rb_erase(&parent->pq !! 1195 htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level); 1512 &q->hlevel[ << 1513 1196 1514 parent->level = 0; 1197 parent->level = 0; 1515 memset(&parent->inner, 0, sizeof(pare !! 1198 memset(&parent->un.inner, 0, sizeof(parent->un.inner)); 1516 parent->leaf.q = new_q ? new_q : &noo !! 1199 INIT_LIST_HEAD(&parent->un.leaf.drop_list); >> 1200 parent->un.leaf.q = new_q ? new_q : &noop_qdisc; 1517 parent->tokens = parent->buffer; 1201 parent->tokens = parent->buffer; 1518 parent->ctokens = parent->cbuffer; 1202 parent->ctokens = parent->cbuffer; 1519 parent->t_c = ktime_get_ns(); !! 1203 parent->t_c = ktime_to_ns(ktime_get()); 1520 parent->cmode = HTB_CAN_SEND; 1204 parent->cmode = HTB_CAN_SEND; 1521 if (q->offload) << 1522 parent->leaf.offload_queue = << 1523 } << 1524 << 1525 static void htb_parent_to_leaf_offload(struct << 1526 struct << 1527 struct << 1528 { << 1529 struct Qdisc *old_q; << 1530 << 1531 /* One ref for cl->leaf.q, the other << 1532 if (new_q) << 1533 qdisc_refcount_inc(new_q); << 1534 old_q = htb_graft_helper(dev_queue, n << 1535 WARN_ON(!(old_q->flags & TCQ_F_BUILTI << 1536 } << 1537 << 1538 static int htb_destroy_class_offload(struct Q << 1539 bool las << 1540 struct n << 1541 { << 1542 struct tc_htb_qopt_offload offload_op << 1543 struct netdev_queue *dev_queue; << 1544 struct Qdisc *q = cl->leaf.q; << 1545 struct Qdisc *old; << 1546 int err; << 1547 << 1548 if (cl->level) << 1549 return -EINVAL; << 1550 << 1551 WARN_ON(!q); << 1552 dev_queue = htb_offload_get_queue(cl) << 1553 /* When destroying, caller qdisc_graf << 1554 * qdisc_put for the qdisc being dest << 1555 * does not need to graft or qdisc_pu << 1556 */ << 1557 if (!destroying) { << 1558 old = htb_graft_helper(dev_qu << 1559 /* Last qdisc grafted should << 1560 * calling htb_delete. << 1561 */ << 1562 WARN_ON(old != q); << 1563 } << 1564 << 1565 if (cl->parent) { << 1566 _bstats_update(&cl->parent->b << 1567 u64_stats_read << 1568 u64_stats_read << 1569 } << 1570 << 1571 offload_opt = (struct tc_htb_qopt_off << 1572 .command = !last_child ? TC_H << 1573 destroying ? TC_HT << 1574 TC_HTB_LEAF_DEL_LA << 1575 .classid = cl->common.classid << 1576 .extack = extack, << 1577 }; << 1578 err = htb_offload(qdisc_dev(sch), &of << 1579 << 1580 if (!destroying) { << 1581 if (!err) << 1582 qdisc_put(old); << 1583 else << 1584 htb_graft_helper(dev_ << 1585 } << 1586 << 1587 if (last_child) << 1588 return err; << 1589 << 1590 if (!err && offload_opt.classid != TC << 1591 u32 classid = TC_H_MAJ(sch->h << 1592 TC_H_MIN(offloa << 1593 struct htb_class *moved_cl = << 1594 << 1595 htb_offload_move_qdisc(sch, m << 1596 } << 1597 << 1598 return err; << 1599 } 1205 } 1600 1206 1601 static void htb_destroy_class(struct Qdisc *s 1207 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) 1602 { 1208 { 1603 if (!cl->level) { 1209 if (!cl->level) { 1604 WARN_ON(!cl->leaf.q); !! 1210 WARN_ON(!cl->un.leaf.q); 1605 qdisc_put(cl->leaf.q); !! 1211 qdisc_destroy(cl->un.leaf.q); 1606 } 1212 } 1607 gen_kill_estimator(&cl->rate_est); !! 1213 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1608 tcf_block_put(cl->block); !! 1214 tcf_destroy_chain(&cl->filter_list); 1609 kfree(cl); 1215 kfree(cl); 1610 } 1216 } 1611 1217 1612 static void htb_destroy(struct Qdisc *sch) 1218 static void htb_destroy(struct Qdisc *sch) 1613 { 1219 { 1614 struct net_device *dev = qdisc_dev(sc << 1615 struct tc_htb_qopt_offload offload_op << 1616 struct htb_sched *q = qdisc_priv(sch) 1220 struct htb_sched *q = qdisc_priv(sch); 1617 struct hlist_node *next; 1221 struct hlist_node *next; 1618 bool nonempty, changed; << 1619 struct htb_class *cl; 1222 struct htb_class *cl; 1620 unsigned int i; 1223 unsigned int i; 1621 1224 1622 cancel_work_sync(&q->work); 1225 cancel_work_sync(&q->work); 1623 qdisc_watchdog_cancel(&q->watchdog); 1226 qdisc_watchdog_cancel(&q->watchdog); 1624 /* This line used to be after htb_des 1227 /* This line used to be after htb_destroy_class call below 1625 * and surprisingly it worked in 2.4. 1228 * and surprisingly it worked in 2.4. But it must precede it 1626 * because filter need its target cla 1229 * because filter need its target class alive to be able to call 1627 * unbind_filter on it (without Oops) 1230 * unbind_filter on it (without Oops). 1628 */ 1231 */ 1629 tcf_block_put(q->block); !! 1232 tcf_destroy_chain(&q->filter_list); 1630 1233 1631 for (i = 0; i < q->clhash.hashsize; i 1234 for (i = 0; i < q->clhash.hashsize; i++) { 1632 hlist_for_each_entry(cl, &q-> !! 1235 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) 1633 tcf_block_put(cl->blo !! 1236 tcf_destroy_chain(&cl->filter_list); 1634 cl->block = NULL; << 1635 } << 1636 } 1237 } 1637 !! 1238 for (i = 0; i < q->clhash.hashsize; i++) { 1638 do { !! 1239 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], 1639 nonempty = false; !! 1240 common.hnode) 1640 changed = false; !! 1241 htb_destroy_class(sch, cl); 1641 for (i = 0; i < q->clhash.has << 1642 hlist_for_each_entry_ << 1643 << 1644 bool last_chi << 1645 << 1646 if (!q->offlo << 1647 htb_d << 1648 conti << 1649 } << 1650 << 1651 nonempty = tr << 1652 << 1653 if (cl->level << 1654 conti << 1655 << 1656 changed = tru << 1657 << 1658 last_child = << 1659 htb_destroy_c << 1660 << 1661 qdisc_class_h << 1662 << 1663 if (cl->paren << 1664 cl->p << 1665 if (last_chil << 1666 htb_p << 1667 htb_destroy_c << 1668 } << 1669 } << 1670 } while (changed); << 1671 WARN_ON(nonempty); << 1672 << 1673 qdisc_class_hash_destroy(&q->clhash); << 1674 __qdisc_reset_queue(&q->direct_queue) << 1675 << 1676 if (q->offload) { << 1677 offload_opt = (struct tc_htb_ << 1678 .command = TC_HTB_DES << 1679 }; << 1680 htb_offload(dev, &offload_opt << 1681 } 1242 } 1682 !! 1243 qdisc_class_hash_destroy(&q->clhash); 1683 if (!q->direct_qdiscs) !! 1244 __skb_queue_purge(&q->direct_queue); 1684 return; << 1685 for (i = 0; i < q->num_direct_qdiscs << 1686 qdisc_put(q->direct_qdiscs[i] << 1687 kfree(q->direct_qdiscs); << 1688 } 1245 } 1689 1246 1690 static int htb_delete(struct Qdisc *sch, unsi !! 1247 static int htb_delete(struct Qdisc *sch, unsigned long arg) 1691 struct netlink_ext_ack << 1692 { 1248 { 1693 struct htb_sched *q = qdisc_priv(sch) 1249 struct htb_sched *q = qdisc_priv(sch); 1694 struct htb_class *cl = (struct htb_cl 1250 struct htb_class *cl = (struct htb_class *)arg; >> 1251 unsigned int qlen; 1695 struct Qdisc *new_q = NULL; 1252 struct Qdisc *new_q = NULL; 1696 int last_child = 0; 1253 int last_child = 0; 1697 int err; << 1698 1254 1699 /* TODO: why don't allow to delete su !! 1255 // TODO: why don't allow to delete subtree ? references ? does 1700 * tc subsys guarantee us that in htb !! 1256 // tc subsys quarantee us that in htb_destroy it holds no class 1701 * refs so that we can remove childre !! 1257 // refs so that we can remove children safely there ? 1702 */ !! 1258 if (cl->children || cl->filter_cnt) 1703 if (cl->children || qdisc_class_in_us << 1704 NL_SET_ERR_MSG(extack, "HTB c << 1705 return -EBUSY; 1259 return -EBUSY; 1706 } << 1707 1260 1708 if (!cl->level && htb_parent_last_chi !! 1261 if (!cl->level && htb_parent_last_child(cl)) { >> 1262 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, >> 1263 cl->parent->common.classid); 1709 last_child = 1; 1264 last_child = 1; 1710 << 1711 if (q->offload) { << 1712 err = htb_destroy_class_offlo << 1713 << 1714 if (err) << 1715 return err; << 1716 } << 1717 << 1718 if (last_child) { << 1719 struct netdev_queue *dev_queu << 1720 << 1721 if (q->offload) << 1722 dev_queue = htb_offlo << 1723 << 1724 new_q = qdisc_create_dflt(dev << 1725 cl- << 1726 NUL << 1727 if (q->offload) << 1728 htb_parent_to_leaf_of << 1729 } 1265 } 1730 1266 1731 sch_tree_lock(sch); 1267 sch_tree_lock(sch); 1732 1268 1733 if (!cl->level) !! 1269 if (!cl->level) { 1734 qdisc_purge_queue(cl->leaf.q) !! 1270 qlen = cl->un.leaf.q->q.qlen; >> 1271 qdisc_reset(cl->un.leaf.q); >> 1272 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen); >> 1273 } 1735 1274 1736 /* delete from hash and active; remai 1275 /* delete from hash and active; remainder in destroy_class */ 1737 qdisc_class_hash_remove(&q->clhash, & 1276 qdisc_class_hash_remove(&q->clhash, &cl->common); 1738 if (cl->parent) 1277 if (cl->parent) 1739 cl->parent->children--; 1278 cl->parent->children--; 1740 1279 1741 if (cl->prio_activity) 1280 if (cl->prio_activity) 1742 htb_deactivate(q, cl); 1281 htb_deactivate(q, cl); 1743 1282 1744 if (cl->cmode != HTB_CAN_SEND) 1283 if (cl->cmode != HTB_CAN_SEND) 1745 htb_safe_rb_erase(&cl->pq_nod !! 1284 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); 1746 &q->hlevel[ << 1747 1285 1748 if (last_child) 1286 if (last_child) 1749 htb_parent_to_leaf(sch, cl, n !! 1287 htb_parent_to_leaf(q, cl, new_q); 1750 1288 1751 sch_tree_unlock(sch); !! 1289 BUG_ON(--cl->refcnt == 0); >> 1290 /* >> 1291 * This shouldn't happen: we "hold" one cops->get() when called >> 1292 * from tc_ctl_tclass; the destroy method is done from cops->put(). >> 1293 */ 1752 1294 1753 htb_destroy_class(sch, cl); !! 1295 sch_tree_unlock(sch); 1754 return 0; 1296 return 0; 1755 } 1297 } 1756 1298 >> 1299 static void htb_put(struct Qdisc *sch, unsigned long arg) >> 1300 { >> 1301 struct htb_class *cl = (struct htb_class *)arg; >> 1302 >> 1303 if (--cl->refcnt == 0) >> 1304 htb_destroy_class(sch, cl); >> 1305 } >> 1306 1757 static int htb_change_class(struct Qdisc *sch 1307 static int htb_change_class(struct Qdisc *sch, u32 classid, 1758 u32 parentid, str 1308 u32 parentid, struct nlattr **tca, 1759 unsigned long *ar !! 1309 unsigned long *arg) 1760 { 1310 { 1761 int err = -EINVAL; 1311 int err = -EINVAL; 1762 struct htb_sched *q = qdisc_priv(sch) 1312 struct htb_sched *q = qdisc_priv(sch); 1763 struct htb_class *cl = (struct htb_cl 1313 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1764 struct tc_htb_qopt_offload offload_op << 1765 struct nlattr *opt = tca[TCA_OPTIONS] 1314 struct nlattr *opt = tca[TCA_OPTIONS]; >> 1315 struct qdisc_rate_table *rtab = NULL, *ctab = NULL; 1766 struct nlattr *tb[TCA_HTB_MAX + 1]; 1316 struct nlattr *tb[TCA_HTB_MAX + 1]; 1767 struct Qdisc *parent_qdisc = NULL; << 1768 struct netdev_queue *dev_queue; << 1769 struct tc_htb_opt *hopt; 1317 struct tc_htb_opt *hopt; 1770 u64 rate64, ceil64; << 1771 int warn = 0; << 1772 1318 1773 /* extract all subattrs from opt attr 1319 /* extract all subattrs from opt attr */ 1774 if (!opt) 1320 if (!opt) 1775 goto failure; 1321 goto failure; 1776 1322 1777 err = nla_parse_nested_deprecated(tb, !! 1323 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy); 1778 ext << 1779 if (err < 0) 1324 if (err < 0) 1780 goto failure; 1325 goto failure; 1781 1326 1782 err = -EINVAL; 1327 err = -EINVAL; 1783 if (tb[TCA_HTB_PARMS] == NULL) 1328 if (tb[TCA_HTB_PARMS] == NULL) 1784 goto failure; 1329 goto failure; 1785 1330 1786 parent = parentid == TC_H_ROOT ? NULL 1331 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); 1787 1332 1788 hopt = nla_data(tb[TCA_HTB_PARMS]); 1333 hopt = nla_data(tb[TCA_HTB_PARMS]); 1789 if (!hopt->rate.rate || !hopt->ceil.r 1334 if (!hopt->rate.rate || !hopt->ceil.rate) 1790 goto failure; 1335 goto failure; 1791 1336 1792 if (q->offload) { << 1793 /* Options not supported by t << 1794 if (hopt->rate.overhead || ho << 1795 NL_SET_ERR_MSG(extack << 1796 goto failure; << 1797 } << 1798 if (hopt->rate.mpu || hopt->c << 1799 NL_SET_ERR_MSG(extack << 1800 goto failure; << 1801 } << 1802 } << 1803 << 1804 /* Keeping backward compatible with r 1337 /* Keeping backward compatible with rate_table based iproute2 tc */ 1805 if (hopt->rate.linklayer == TC_LINKLA !! 1338 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) { 1806 qdisc_put_rtab(qdisc_get_rtab !! 1339 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]); 1807 !! 1340 if (rtab) 1808 !! 1341 qdisc_put_rtab(rtab); 1809 if (hopt->ceil.linklayer == TC_LINKLA !! 1342 } 1810 qdisc_put_rtab(qdisc_get_rtab !! 1343 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) { 1811 !! 1344 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]); 1812 !! 1345 if (ctab) 1813 rate64 = tb[TCA_HTB_RATE64] ? nla_get !! 1346 qdisc_put_rtab(ctab); 1814 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get !! 1347 } 1815 1348 1816 if (!cl) { /* new class 1349 if (!cl) { /* new class */ 1817 struct net_device *dev = qdis !! 1350 struct Qdisc *new_q; 1818 struct Qdisc *new_q, *old_q; << 1819 int prio; 1351 int prio; 1820 struct { 1352 struct { 1821 struct nlattr 1353 struct nlattr nla; 1822 struct gnet_estimator 1354 struct gnet_estimator opt; 1823 } est = { 1355 } est = { 1824 .nla = { 1356 .nla = { 1825 .nla_len 1357 .nla_len = nla_attr_size(sizeof(est.opt)), 1826 .nla_type 1358 .nla_type = TCA_RATE, 1827 }, 1359 }, 1828 .opt = { 1360 .opt = { 1829 /* 4s interva 1361 /* 4s interval, 16s averaging constant */ 1830 .interval 1362 .interval = 2, 1831 .ewma_log 1363 .ewma_log = 2, 1832 }, 1364 }, 1833 }; 1365 }; 1834 1366 1835 /* check for valid classid */ 1367 /* check for valid classid */ 1836 if (!classid || TC_H_MAJ(clas 1368 if (!classid || TC_H_MAJ(classid ^ sch->handle) || 1837 htb_find(classid, sch)) 1369 htb_find(classid, sch)) 1838 goto failure; 1370 goto failure; 1839 1371 1840 /* check maximal depth */ 1372 /* check maximal depth */ 1841 if (parent && parent->parent 1373 if (parent && parent->parent && parent->parent->level < 2) { 1842 NL_SET_ERR_MSG_MOD(ex !! 1374 pr_err("htb: tree is too deep\n"); 1843 goto failure; 1375 goto failure; 1844 } 1376 } 1845 err = -ENOBUFS; 1377 err = -ENOBUFS; 1846 cl = kzalloc(sizeof(*cl), GFP 1378 cl = kzalloc(sizeof(*cl), GFP_KERNEL); 1847 if (!cl) 1379 if (!cl) 1848 goto failure; 1380 goto failure; 1849 1381 1850 gnet_stats_basic_sync_init(&c !! 1382 err = gen_new_estimator(&cl->bstats, &cl->rate_est, 1851 gnet_stats_basic_sync_init(&c !! 1383 qdisc_root_sleeping_lock(sch), 1852 !! 1384 tca[TCA_RATE] ? : &est.nla); 1853 err = tcf_block_get(&cl->bloc << 1854 if (err) { 1385 if (err) { 1855 kfree(cl); 1386 kfree(cl); 1856 goto failure; 1387 goto failure; 1857 } 1388 } 1858 if (htb_rate_est || tca[TCA_R << 1859 err = gen_new_estimat << 1860 << 1861 << 1862 << 1863 << 1864 if (err) << 1865 goto err_bloc << 1866 } << 1867 1389 >> 1390 cl->refcnt = 1; 1868 cl->children = 0; 1391 cl->children = 0; >> 1392 INIT_LIST_HEAD(&cl->un.leaf.drop_list); 1869 RB_CLEAR_NODE(&cl->pq_node); 1393 RB_CLEAR_NODE(&cl->pq_node); 1870 1394 1871 for (prio = 0; prio < TC_HTB_ 1395 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++) 1872 RB_CLEAR_NODE(&cl->no 1396 RB_CLEAR_NODE(&cl->node[prio]); 1873 1397 1874 cl->common.classid = classid; << 1875 << 1876 /* Make sure nothing interrup << 1877 * ndo_setup_tc calls. << 1878 */ << 1879 ASSERT_RTNL(); << 1880 << 1881 /* create leaf qdisc early be 1398 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) 1882 * so that can't be used insi 1399 * so that can't be used inside of sch_tree_lock 1883 * -- thanks to Karlis Peisen 1400 * -- thanks to Karlis Peisenieks 1884 */ 1401 */ 1885 if (!q->offload) { !! 1402 new_q = qdisc_create_dflt(sch->dev_queue, 1886 dev_queue = sch->dev_ !! 1403 &pfifo_qdisc_ops, classid); 1887 } else if (!(parent && !paren << 1888 /* Assign a dev_queue << 1889 offload_opt = (struct << 1890 .command = TC << 1891 .classid = cl << 1892 .parent_class << 1893 TC_H_ << 1894 TC_HT << 1895 .rate = max_t << 1896 .ceil = max_t << 1897 .prio = hopt- << 1898 .quantum = ho << 1899 .extack = ext << 1900 }; << 1901 err = htb_offload(dev << 1902 if (err) { << 1903 NL_SET_ERR_MS << 1904 << 1905 goto err_kill << 1906 } << 1907 dev_queue = netdev_ge << 1908 } else { /* First child. */ << 1909 dev_queue = htb_offlo << 1910 old_q = htb_graft_hel << 1911 WARN_ON(old_q != pare << 1912 offload_opt = (struct << 1913 .command = TC << 1914 .classid = cl << 1915 .parent_class << 1916 TC_H_ << 1917 .rate = max_t << 1918 .ceil = max_t << 1919 .prio = hopt- << 1920 .quantum = ho << 1921 .extack = ext << 1922 }; << 1923 err = htb_offload(dev << 1924 if (err) { << 1925 NL_SET_ERR_MS << 1926 << 1927 htb_graft_hel << 1928 goto err_kill << 1929 } << 1930 _bstats_update(&paren << 1931 u64_st << 1932 u64_st << 1933 qdisc_put(old_q); << 1934 } << 1935 new_q = qdisc_create_dflt(dev << 1936 cla << 1937 if (q->offload) { << 1938 /* One ref for cl->le << 1939 if (new_q) << 1940 qdisc_refcoun << 1941 old_q = htb_graft_hel << 1942 /* No qdisc_put neede << 1943 WARN_ON(!(old_q->flag << 1944 } << 1945 sch_tree_lock(sch); 1404 sch_tree_lock(sch); 1946 if (parent && !parent->level) 1405 if (parent && !parent->level) { >> 1406 unsigned int qlen = parent->un.leaf.q->q.qlen; >> 1407 1947 /* turn parent into i 1408 /* turn parent into inner node */ 1948 qdisc_purge_queue(par !! 1409 qdisc_reset(parent->un.leaf.q); 1949 parent_qdisc = parent !! 1410 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen); >> 1411 qdisc_destroy(parent->un.leaf.q); 1950 if (parent->prio_acti 1412 if (parent->prio_activity) 1951 htb_deactivat 1413 htb_deactivate(q, parent); 1952 1414 1953 /* remove from evt li 1415 /* remove from evt list because of level change */ 1954 if (parent->cmode != 1416 if (parent->cmode != HTB_CAN_SEND) { 1955 htb_safe_rb_e !! 1417 htb_safe_rb_erase(&parent->pq_node, q->wait_pq); 1956 parent->cmode 1418 parent->cmode = HTB_CAN_SEND; 1957 } 1419 } 1958 parent->level = (pare 1420 parent->level = (parent->parent ? parent->parent->level 1959 : TC 1421 : TC_HTB_MAXDEPTH) - 1; 1960 memset(&parent->inner !! 1422 memset(&parent->un.inner, 0, sizeof(parent->un.inner)); 1961 } 1423 } 1962 << 1963 /* leaf (we) needs elementary 1424 /* leaf (we) needs elementary qdisc */ 1964 cl->leaf.q = new_q ? new_q : !! 1425 cl->un.leaf.q = new_q ? new_q : &noop_qdisc; 1965 if (q->offload) << 1966 cl->leaf.offload_queu << 1967 1426 >> 1427 cl->common.classid = classid; 1968 cl->parent = parent; 1428 cl->parent = parent; 1969 1429 1970 /* set class to be in HTB_CAN 1430 /* set class to be in HTB_CAN_SEND state */ 1971 cl->tokens = PSCHED_TICKS2NS( 1431 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); 1972 cl->ctokens = PSCHED_TICKS2NS 1432 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); 1973 cl->mbuffer = 60ULL * NSEC_PE 1433 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ 1974 cl->t_c = ktime_get_ns(); !! 1434 cl->t_c = ktime_to_ns(ktime_get()); 1975 cl->cmode = HTB_CAN_SEND; 1435 cl->cmode = HTB_CAN_SEND; 1976 1436 1977 /* attach to the hash list an 1437 /* attach to the hash list and parent's family */ 1978 qdisc_class_hash_insert(&q->c 1438 qdisc_class_hash_insert(&q->clhash, &cl->common); 1979 if (parent) 1439 if (parent) 1980 parent->children++; 1440 parent->children++; 1981 if (cl->leaf.q != &noop_qdisc << 1982 qdisc_hash_add(cl->le << 1983 } else { 1441 } else { 1984 if (tca[TCA_RATE]) { 1442 if (tca[TCA_RATE]) { 1985 err = gen_replace_est !! 1443 err = gen_replace_estimator(&cl->bstats, &cl->rate_est, 1986 !! 1444 qdisc_root_sleeping_lock(sch), 1987 << 1988 << 1989 1445 tca[TCA_RATE]); 1990 if (err) 1446 if (err) 1991 return err; 1447 return err; 1992 } 1448 } 1993 << 1994 if (q->offload) { << 1995 struct net_device *de << 1996 << 1997 offload_opt = (struct << 1998 .command = TC << 1999 .classid = cl << 2000 .rate = max_t << 2001 .ceil = max_t << 2002 .prio = hopt- << 2003 .quantum = ho << 2004 .extack = ext << 2005 }; << 2006 err = htb_offload(dev << 2007 if (err) << 2008 /* Estimator << 2009 * as well, s << 2010 * the estima << 2011 * offload an << 2012 * only when << 2013 */ << 2014 return err; << 2015 } << 2016 << 2017 sch_tree_lock(sch); 1449 sch_tree_lock(sch); 2018 } 1450 } 2019 1451 2020 psched_ratecfg_precompute(&cl->rate, << 2021 psched_ratecfg_precompute(&cl->ceil, << 2022 << 2023 /* it used to be a nasty bug here, we 1452 /* it used to be a nasty bug here, we have to check that node 2024 * is really leaf before changing cl- !! 1453 * is really leaf before changing cl->un.leaf ! 2025 */ 1454 */ 2026 if (!cl->level) { 1455 if (!cl->level) { 2027 u64 quantum = cl->rate.rate_b !! 1456 cl->quantum = hopt->rate.rate / q->rate2quantum; 2028 << 2029 do_div(quantum, q->rate2quant << 2030 cl->quantum = min_t(u64, quan << 2031 << 2032 if (!hopt->quantum && cl->qua 1457 if (!hopt->quantum && cl->quantum < 1000) { 2033 warn = -1; !! 1458 pr_warning( >> 1459 "HTB: quantum of class %X is small. Consider r2q change.\n", >> 1460 cl->common.classid); 2034 cl->quantum = 1000; 1461 cl->quantum = 1000; 2035 } 1462 } 2036 if (!hopt->quantum && cl->qua 1463 if (!hopt->quantum && cl->quantum > 200000) { 2037 warn = 1; !! 1464 pr_warning( >> 1465 "HTB: quantum of class %X is big. Consider r2q change.\n", >> 1466 cl->common.classid); 2038 cl->quantum = 200000; 1467 cl->quantum = 200000; 2039 } 1468 } 2040 if (hopt->quantum) 1469 if (hopt->quantum) 2041 cl->quantum = hopt->q 1470 cl->quantum = hopt->quantum; 2042 if ((cl->prio = hopt->prio) > 1471 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO) 2043 cl->prio = TC_HTB_NUM 1472 cl->prio = TC_HTB_NUMPRIO - 1; 2044 } 1473 } 2045 1474 >> 1475 psched_ratecfg_precompute(&cl->rate, &hopt->rate); >> 1476 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil); >> 1477 2046 cl->buffer = PSCHED_TICKS2NS(hopt->bu 1478 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); 2047 cl->cbuffer = PSCHED_TICKS2NS(hopt->c 1479 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); 2048 1480 2049 sch_tree_unlock(sch); 1481 sch_tree_unlock(sch); 2050 qdisc_put(parent_qdisc); << 2051 << 2052 if (warn) << 2053 NL_SET_ERR_MSG_FMT_MOD(extack << 2054 "quant << 2055 cl->co << 2056 1482 2057 qdisc_class_hash_grow(sch, &q->clhash 1483 qdisc_class_hash_grow(sch, &q->clhash); 2058 1484 2059 *arg = (unsigned long)cl; 1485 *arg = (unsigned long)cl; 2060 return 0; 1486 return 0; 2061 1487 2062 err_kill_estimator: << 2063 gen_kill_estimator(&cl->rate_est); << 2064 err_block_put: << 2065 tcf_block_put(cl->block); << 2066 kfree(cl); << 2067 failure: 1488 failure: 2068 return err; 1489 return err; 2069 } 1490 } 2070 1491 2071 static struct tcf_block *htb_tcf_block(struct !! 1492 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg) 2072 struct << 2073 { 1493 { 2074 struct htb_sched *q = qdisc_priv(sch) 1494 struct htb_sched *q = qdisc_priv(sch); 2075 struct htb_class *cl = (struct htb_cl 1495 struct htb_class *cl = (struct htb_class *)arg; >> 1496 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list; 2076 1497 2077 return cl ? cl->block : q->block; !! 1498 return fl; 2078 } 1499 } 2079 1500 2080 static unsigned long htb_bind_filter(struct Q 1501 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, 2081 u32 clas 1502 u32 classid) 2082 { 1503 { 2083 struct htb_class *cl = htb_find(class 1504 struct htb_class *cl = htb_find(classid, sch); 2084 1505 2085 /*if (cl && !cl->level) return 0; 1506 /*if (cl && !cl->level) return 0; 2086 * The line above used to be there to 1507 * The line above used to be there to prevent attaching filters to 2087 * leaves. But at least tc_index filt 1508 * leaves. But at least tc_index filter uses this just to get class 2088 * for other reasons so that we have 1509 * for other reasons so that we have to allow for it. 2089 * ---- 1510 * ---- 2090 * 19.6.2002 As Werner explained it i 1511 * 19.6.2002 As Werner explained it is ok - bind filter is just 2091 * another way to "lock" the class - 1512 * another way to "lock" the class - unlike "get" this lock can 2092 * be broken by class during destroy 1513 * be broken by class during destroy IIUC. 2093 */ 1514 */ 2094 if (cl) 1515 if (cl) 2095 qdisc_class_get(&cl->common); !! 1516 cl->filter_cnt++; 2096 return (unsigned long)cl; 1517 return (unsigned long)cl; 2097 } 1518 } 2098 1519 2099 static void htb_unbind_filter(struct Qdisc *s 1520 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg) 2100 { 1521 { 2101 struct htb_class *cl = (struct htb_cl 1522 struct htb_class *cl = (struct htb_class *)arg; 2102 1523 2103 qdisc_class_put(&cl->common); !! 1524 if (cl) >> 1525 cl->filter_cnt--; 2104 } 1526 } 2105 1527 2106 static void htb_walk(struct Qdisc *sch, struc 1528 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) 2107 { 1529 { 2108 struct htb_sched *q = qdisc_priv(sch) 1530 struct htb_sched *q = qdisc_priv(sch); 2109 struct htb_class *cl; 1531 struct htb_class *cl; 2110 unsigned int i; 1532 unsigned int i; 2111 1533 2112 if (arg->stop) 1534 if (arg->stop) 2113 return; 1535 return; 2114 1536 2115 for (i = 0; i < q->clhash.hashsize; i 1537 for (i = 0; i < q->clhash.hashsize; i++) { 2116 hlist_for_each_entry(cl, &q-> 1538 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 2117 if (!tc_qdisc_stats_d !! 1539 if (arg->count < arg->skip) { >> 1540 arg->count++; >> 1541 continue; >> 1542 } >> 1543 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { >> 1544 arg->stop = 1; 2118 return; 1545 return; >> 1546 } >> 1547 arg->count++; 2119 } 1548 } 2120 } 1549 } 2121 } 1550 } 2122 1551 2123 static const struct Qdisc_class_ops htb_class 1552 static const struct Qdisc_class_ops htb_class_ops = { 2124 .select_queue = htb_select_qu << 2125 .graft = htb_graft, 1553 .graft = htb_graft, 2126 .leaf = htb_leaf, 1554 .leaf = htb_leaf, 2127 .qlen_notify = htb_qlen_noti 1555 .qlen_notify = htb_qlen_notify, 2128 .find = htb_search, !! 1556 .get = htb_get, >> 1557 .put = htb_put, 2129 .change = htb_change_cl 1558 .change = htb_change_class, 2130 .delete = htb_delete, 1559 .delete = htb_delete, 2131 .walk = htb_walk, 1560 .walk = htb_walk, 2132 .tcf_block = htb_tcf_block !! 1561 .tcf_chain = htb_find_tcf, 2133 .bind_tcf = htb_bind_filt 1562 .bind_tcf = htb_bind_filter, 2134 .unbind_tcf = htb_unbind_fi 1563 .unbind_tcf = htb_unbind_filter, 2135 .dump = htb_dump_clas 1564 .dump = htb_dump_class, 2136 .dump_stats = htb_dump_clas 1565 .dump_stats = htb_dump_class_stats, 2137 }; 1566 }; 2138 1567 2139 static struct Qdisc_ops htb_qdisc_ops __read_ 1568 static struct Qdisc_ops htb_qdisc_ops __read_mostly = { 2140 .cl_ops = &htb_class_op 1569 .cl_ops = &htb_class_ops, 2141 .id = "htb", 1570 .id = "htb", 2142 .priv_size = sizeof(struct 1571 .priv_size = sizeof(struct htb_sched), 2143 .enqueue = htb_enqueue, 1572 .enqueue = htb_enqueue, 2144 .dequeue = htb_dequeue, 1573 .dequeue = htb_dequeue, 2145 .peek = qdisc_peek_de 1574 .peek = qdisc_peek_dequeued, >> 1575 .drop = htb_drop, 2146 .init = htb_init, 1576 .init = htb_init, 2147 .attach = htb_attach, << 2148 .reset = htb_reset, 1577 .reset = htb_reset, 2149 .destroy = htb_destroy, 1578 .destroy = htb_destroy, 2150 .dump = htb_dump, 1579 .dump = htb_dump, 2151 .owner = THIS_MODULE, 1580 .owner = THIS_MODULE, 2152 }; 1581 }; 2153 MODULE_ALIAS_NET_SCH("htb"); << 2154 1582 2155 static int __init htb_module_init(void) 1583 static int __init htb_module_init(void) 2156 { 1584 { 2157 return register_qdisc(&htb_qdisc_ops) 1585 return register_qdisc(&htb_qdisc_ops); 2158 } 1586 } 2159 static void __exit htb_module_exit(void) 1587 static void __exit htb_module_exit(void) 2160 { 1588 { 2161 unregister_qdisc(&htb_qdisc_ops); 1589 unregister_qdisc(&htb_qdisc_ops); 2162 } 1590 } 2163 1591 2164 module_init(htb_module_init) 1592 module_init(htb_module_init) 2165 module_exit(htb_module_exit) 1593 module_exit(htb_module_exit) 2166 MODULE_LICENSE("GPL"); 1594 MODULE_LICENSE("GPL"); 2167 MODULE_DESCRIPTION("Hierarchical Token Bucket << 2168 1595
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.