1 // SPDX-License-Identifier: GPL-2.0-or-later 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 2 /* 3 * net/sched/sch_htb.c Hierarchical token buc 3 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version 4 * 4 * 5 * Authors: Martin Devera, <devik@cdi.cz> 5 * Authors: Martin Devera, <devik@cdi.cz> 6 * 6 * 7 * Credits (in time order) for older HTB versi 7 * Credits (in time order) for older HTB versions: 8 * Stef Coene <stef.coene@docum.o 8 * Stef Coene <stef.coene@docum.org> 9 * HTB support at LARTC m 9 * HTB support at LARTC mailing list 10 * Ondrej Kraus, <krauso@barr.cz> 10 * Ondrej Kraus, <krauso@barr.cz> 11 * found missing INIT_QDI 11 * found missing INIT_QDISC(htb) 12 * Vladimir Smelhaus, Aamer Akhte 12 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert 13 * helped a lot to locate 13 * helped a lot to locate nasty class stall bug 14 * Andi Kleen, Jamal Hadi, Bert H 14 * Andi Kleen, Jamal Hadi, Bert Hubert 15 * code review and helpfu 15 * code review and helpful comments on shaping 16 * Tomasz Wrona, <tw@eter.tym.pl> 16 * Tomasz Wrona, <tw@eter.tym.pl> 17 * created test case so t 17 * created test case so that I was able to fix nasty bug 18 * Wilfried Weissmann 18 * Wilfried Weissmann 19 * spotted bug in dequeue 19 * spotted bug in dequeue code and helped with fix 20 * Jiri Fojtasek 20 * Jiri Fojtasek 21 * fixed requeue routine 21 * fixed requeue routine 22 * and many others. thanks. 22 * and many others. thanks. 23 */ 23 */ 24 #include <linux/module.h> 24 #include <linux/module.h> 25 #include <linux/moduleparam.h> 25 #include <linux/moduleparam.h> 26 #include <linux/types.h> 26 #include <linux/types.h> 27 #include <linux/kernel.h> 27 #include <linux/kernel.h> 28 #include <linux/string.h> 28 #include <linux/string.h> 29 #include <linux/errno.h> 29 #include <linux/errno.h> 30 #include <linux/skbuff.h> 30 #include <linux/skbuff.h> 31 #include <linux/list.h> 31 #include <linux/list.h> 32 #include <linux/compiler.h> 32 #include <linux/compiler.h> 33 #include <linux/rbtree.h> 33 #include <linux/rbtree.h> 34 #include <linux/workqueue.h> 34 #include <linux/workqueue.h> 35 #include <linux/slab.h> 35 #include <linux/slab.h> 36 #include <net/netlink.h> 36 #include <net/netlink.h> 37 #include <net/sch_generic.h> 37 #include <net/sch_generic.h> 38 #include <net/pkt_sched.h> 38 #include <net/pkt_sched.h> 39 #include <net/pkt_cls.h> 39 #include <net/pkt_cls.h> 40 40 41 /* HTB algorithm. 41 /* HTB algorithm. 42 Author: devik@cdi.cz 42 Author: devik@cdi.cz 43 ========================================== 43 ======================================================================== 44 HTB is like TBF with multiple classes. It 44 HTB is like TBF with multiple classes. It is also similar to CBQ because 45 it allows to assign priority to each class 45 it allows to assign priority to each class in hierarchy. 46 In fact it is another implementation of Fl 46 In fact it is another implementation of Floyd's formal sharing. 47 47 48 Levels: 48 Levels: 49 Each class is assigned level. Leaf has ALW 49 Each class is assigned level. Leaf has ALWAYS level 0 and root 50 classes have level TC_HTB_MAXDEPTH-1. Inte 50 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level 51 one less than their parent. 51 one less than their parent. 52 */ 52 */ 53 53 54 static int htb_hysteresis __read_mostly = 0; / 54 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */ 55 #define HTB_VER 0x30011 /* major must 55 #define HTB_VER 0x30011 /* major must be matched with number supplied by TC as version */ 56 56 57 #if HTB_VER >> 16 != TC_HTB_PROTOVER 57 #if HTB_VER >> 16 != TC_HTB_PROTOVER 58 #error "Mismatched sch_htb.c and pkt_sch.h" 58 #error "Mismatched sch_htb.c and pkt_sch.h" 59 #endif 59 #endif 60 60 61 /* Module parameter and sysfs export */ 61 /* Module parameter and sysfs export */ 62 module_param (htb_hysteresis, int, 0640); 62 module_param (htb_hysteresis, int, 0640); 63 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis m 63 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate"); 64 64 65 static int htb_rate_est = 0; /* htb classes ha 65 static int htb_rate_est = 0; /* htb classes have a default rate estimator */ 66 module_param(htb_rate_est, int, 0640); 66 module_param(htb_rate_est, int, 0640); 67 MODULE_PARM_DESC(htb_rate_est, "setup a defaul 67 MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes"); 68 68 69 /* used internaly to keep status of single cla 69 /* used internaly to keep status of single class */ 70 enum htb_cmode { 70 enum htb_cmode { 71 HTB_CANT_SEND, /* class can't 71 HTB_CANT_SEND, /* class can't send and can't borrow */ 72 HTB_MAY_BORROW, /* class can't 72 HTB_MAY_BORROW, /* class can't send but may borrow */ 73 HTB_CAN_SEND /* class can s 73 HTB_CAN_SEND /* class can send */ 74 }; 74 }; 75 75 76 struct htb_prio { 76 struct htb_prio { 77 union { 77 union { 78 struct rb_root row; 78 struct rb_root row; 79 struct rb_root feed; 79 struct rb_root feed; 80 }; 80 }; 81 struct rb_node *ptr; 81 struct rb_node *ptr; 82 /* When class changes from state 1->2 82 /* When class changes from state 1->2 and disconnects from 83 * parent's feed then we lost ptr valu 83 * parent's feed then we lost ptr value and start from the 84 * first child again. Here we store cl 84 * first child again. Here we store classid of the 85 * last valid ptr (used when ptr is NU 85 * last valid ptr (used when ptr is NULL). 86 */ 86 */ 87 u32 last_ptr_id; 87 u32 last_ptr_id; 88 }; 88 }; 89 89 90 /* interior & leaf nodes; props specific to le 90 /* interior & leaf nodes; props specific to leaves are marked L: 91 * To reduce false sharing, place mostly read 91 * To reduce false sharing, place mostly read fields at beginning, 92 * and mostly written ones at the end. 92 * and mostly written ones at the end. 93 */ 93 */ 94 struct htb_class { 94 struct htb_class { 95 struct Qdisc_class_common common; 95 struct Qdisc_class_common common; 96 struct psched_ratecfg rate; 96 struct psched_ratecfg rate; 97 struct psched_ratecfg ceil; 97 struct psched_ratecfg ceil; 98 s64 buffer, cbuffe 98 s64 buffer, cbuffer;/* token bucket depth/rate */ 99 s64 mbuffer; 99 s64 mbuffer; /* max wait time */ 100 u32 prio; 100 u32 prio; /* these two are used only by leaves... */ 101 int quantum; 101 int quantum; /* but stored for parent-to-leaf return */ 102 102 103 struct tcf_proto __rcu *filter_list; 103 struct tcf_proto __rcu *filter_list; /* class attached filters */ 104 struct tcf_block *block; 104 struct tcf_block *block; >> 105 int filter_cnt; 105 106 106 int level; 107 int level; /* our level (see above) */ 107 unsigned int children; 108 unsigned int children; 108 struct htb_class *parent; 109 struct htb_class *parent; /* parent class */ 109 110 110 struct net_rate_estimator __rcu *rate_ 111 struct net_rate_estimator __rcu *rate_est; 111 112 112 /* 113 /* 113 * Written often fields 114 * Written often fields 114 */ 115 */ 115 struct gnet_stats_basic_sync bstats; 116 struct gnet_stats_basic_sync bstats; 116 struct gnet_stats_basic_sync bstats_bi 117 struct gnet_stats_basic_sync bstats_bias; 117 struct tc_htb_xstats xstats; /* our 118 struct tc_htb_xstats xstats; /* our special stats */ 118 119 119 /* token bucket parameters */ 120 /* token bucket parameters */ 120 s64 tokens, ctoken 121 s64 tokens, ctokens;/* current number of tokens */ 121 s64 t_c; 122 s64 t_c; /* checkpoint time */ 122 123 123 union { 124 union { 124 struct htb_class_leaf { 125 struct htb_class_leaf { 125 int defici 126 int deficit[TC_HTB_MAXDEPTH]; 126 struct Qdisc *q; 127 struct Qdisc *q; 127 struct netdev_queue *o 128 struct netdev_queue *offload_queue; 128 } leaf; 129 } leaf; 129 struct htb_class_inner { 130 struct htb_class_inner { 130 struct htb_prio clprio 131 struct htb_prio clprio[TC_HTB_NUMPRIO]; 131 } inner; 132 } inner; 132 }; 133 }; 133 s64 pq_key; 134 s64 pq_key; 134 135 135 int prio_activity; 136 int prio_activity; /* for which prios are we active */ 136 enum htb_cmode cmode; 137 enum htb_cmode cmode; /* current mode of the class */ 137 struct rb_node pq_node; 138 struct rb_node pq_node; /* node for event queue */ 138 struct rb_node node[TC_HTB_NU 139 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ 139 140 140 unsigned int drops ____cacheline_align 141 unsigned int drops ____cacheline_aligned_in_smp; 141 unsigned int overlimits; 142 unsigned int overlimits; 142 }; 143 }; 143 144 144 struct htb_level { 145 struct htb_level { 145 struct rb_root wait_pq; 146 struct rb_root wait_pq; 146 struct htb_prio hprio[TC_HTB_NUMPRIO]; 147 struct htb_prio hprio[TC_HTB_NUMPRIO]; 147 }; 148 }; 148 149 149 struct htb_sched { 150 struct htb_sched { 150 struct Qdisc_class_hash clhash; 151 struct Qdisc_class_hash clhash; 151 int defcls; 152 int defcls; /* class where unclassified flows go to */ 152 int rate2quantum; 153 int rate2quantum; /* quant = rate / rate2quantum */ 153 154 154 /* filters for qdisc itself */ 155 /* filters for qdisc itself */ 155 struct tcf_proto __rcu *filter_list; 156 struct tcf_proto __rcu *filter_list; 156 struct tcf_block *block; 157 struct tcf_block *block; 157 158 158 #define HTB_WARN_TOOMANYEVENTS 0x1 159 #define HTB_WARN_TOOMANYEVENTS 0x1 159 unsigned int warned; /* onl 160 unsigned int warned; /* only one warning */ 160 int direct_qlen; 161 int direct_qlen; 161 struct work_struct work; 162 struct work_struct work; 162 163 163 /* non shaped skbs; let them go direct 164 /* non shaped skbs; let them go directly thru */ 164 struct qdisc_skb_head direct_queue; 165 struct qdisc_skb_head direct_queue; 165 u32 direct_pkts; 166 u32 direct_pkts; 166 u32 overlimits; 167 u32 overlimits; 167 168 168 struct qdisc_watchdog watchdog; 169 struct qdisc_watchdog watchdog; 169 170 170 s64 now; /* cac 171 s64 now; /* cached dequeue time */ 171 172 172 /* time of nearest event per level (ro 173 /* time of nearest event per level (row) */ 173 s64 near_ev_cache[ 174 s64 near_ev_cache[TC_HTB_MAXDEPTH]; 174 175 175 int row_mask[TC_HT 176 int row_mask[TC_HTB_MAXDEPTH]; 176 177 177 struct htb_level hlevel[TC_HTB_ 178 struct htb_level hlevel[TC_HTB_MAXDEPTH]; 178 179 179 struct Qdisc **direct_qdisc 180 struct Qdisc **direct_qdiscs; 180 unsigned int num_direct_qdi 181 unsigned int num_direct_qdiscs; 181 182 182 bool offload; 183 bool offload; 183 }; 184 }; 184 185 185 /* find class in global hash table using given 186 /* find class in global hash table using given handle */ 186 static inline struct htb_class *htb_find(u32 h 187 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) 187 { 188 { 188 struct htb_sched *q = qdisc_priv(sch); 189 struct htb_sched *q = qdisc_priv(sch); 189 struct Qdisc_class_common *clc; 190 struct Qdisc_class_common *clc; 190 191 191 clc = qdisc_class_find(&q->clhash, han 192 clc = qdisc_class_find(&q->clhash, handle); 192 if (clc == NULL) 193 if (clc == NULL) 193 return NULL; 194 return NULL; 194 return container_of(clc, struct htb_cl 195 return container_of(clc, struct htb_class, common); 195 } 196 } 196 197 197 static unsigned long htb_search(struct Qdisc * 198 static unsigned long htb_search(struct Qdisc *sch, u32 handle) 198 { 199 { 199 return (unsigned long)htb_find(handle, 200 return (unsigned long)htb_find(handle, sch); 200 } 201 } 201 202 202 #define HTB_DIRECT ((struct htb_class *)-1L) 203 #define HTB_DIRECT ((struct htb_class *)-1L) 203 204 204 /** 205 /** 205 * htb_classify - classify a packet into class 206 * htb_classify - classify a packet into class 206 * @skb: the socket buffer 207 * @skb: the socket buffer 207 * @sch: the active queue discipline 208 * @sch: the active queue discipline 208 * @qerr: pointer for returned status code 209 * @qerr: pointer for returned status code 209 * 210 * 210 * It returns NULL if the packet should be dro 211 * It returns NULL if the packet should be dropped or -1 if the packet 211 * should be passed directly thru. In all othe 212 * should be passed directly thru. In all other cases leaf class is returned. 212 * We allow direct class selection by classid 213 * We allow direct class selection by classid in priority. The we examine 213 * filters in qdisc and in inner nodes (if hig 214 * filters in qdisc and in inner nodes (if higher filter points to the inner 214 * node). If we end up with classid MAJOR:0 we 215 * node). If we end up with classid MAJOR:0 we enqueue the skb into special 215 * internal fifo (direct). These packets then 216 * internal fifo (direct). These packets then go directly thru. If we still 216 * have no valid leaf we try to use MAJOR:defa 217 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful 217 * then finish and return direct queue. 218 * then finish and return direct queue. 218 */ 219 */ 219 static struct htb_class *htb_classify(struct s 220 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, 220 int *qer 221 int *qerr) 221 { 222 { 222 struct htb_sched *q = qdisc_priv(sch); 223 struct htb_sched *q = qdisc_priv(sch); 223 struct htb_class *cl; 224 struct htb_class *cl; 224 struct tcf_result res; 225 struct tcf_result res; 225 struct tcf_proto *tcf; 226 struct tcf_proto *tcf; 226 int result; 227 int result; 227 228 228 /* allow to select class by setting sk 229 /* allow to select class by setting skb->priority to valid classid; 229 * note that nfmark can be used too by 230 * note that nfmark can be used too by attaching filter fw with no 230 * rules in it 231 * rules in it 231 */ 232 */ 232 if (skb->priority == sch->handle) 233 if (skb->priority == sch->handle) 233 return HTB_DIRECT; /* X:0 234 return HTB_DIRECT; /* X:0 (direct flow) selected */ 234 cl = htb_find(skb->priority, sch); 235 cl = htb_find(skb->priority, sch); 235 if (cl) { 236 if (cl) { 236 if (cl->level == 0) 237 if (cl->level == 0) 237 return cl; 238 return cl; 238 /* Start with inner filter cha 239 /* Start with inner filter chain if a non-leaf class is selected */ 239 tcf = rcu_dereference_bh(cl->f 240 tcf = rcu_dereference_bh(cl->filter_list); 240 } else { 241 } else { 241 tcf = rcu_dereference_bh(q->fi 242 tcf = rcu_dereference_bh(q->filter_list); 242 } 243 } 243 244 244 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_ 245 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 245 while (tcf && (result = tcf_classify(s 246 while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) { 246 #ifdef CONFIG_NET_CLS_ACT 247 #ifdef CONFIG_NET_CLS_ACT 247 switch (result) { 248 switch (result) { 248 case TC_ACT_QUEUED: 249 case TC_ACT_QUEUED: 249 case TC_ACT_STOLEN: 250 case TC_ACT_STOLEN: 250 case TC_ACT_TRAP: 251 case TC_ACT_TRAP: 251 *qerr = NET_XMIT_SUCCE 252 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 252 fallthrough; 253 fallthrough; 253 case TC_ACT_SHOT: 254 case TC_ACT_SHOT: 254 return NULL; 255 return NULL; 255 } 256 } 256 #endif 257 #endif 257 cl = (void *)res.class; 258 cl = (void *)res.class; 258 if (!cl) { 259 if (!cl) { 259 if (res.classid == sch 260 if (res.classid == sch->handle) 260 return HTB_DIR 261 return HTB_DIRECT; /* X:0 (direct flow) */ 261 cl = htb_find(res.clas 262 cl = htb_find(res.classid, sch); 262 if (!cl) 263 if (!cl) 263 break; /* fil 264 break; /* filter selected invalid classid */ 264 } 265 } 265 if (!cl->level) 266 if (!cl->level) 266 return cl; /* we 267 return cl; /* we hit leaf; return it */ 267 268 268 /* we have got inner class; ap 269 /* we have got inner class; apply inner filter chain */ 269 tcf = rcu_dereference_bh(cl->f 270 tcf = rcu_dereference_bh(cl->filter_list); 270 } 271 } 271 /* classification failed; try to use d 272 /* classification failed; try to use default class */ 272 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch-> 273 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); 273 if (!cl || cl->level) 274 if (!cl || cl->level) 274 return HTB_DIRECT; /* bad 275 return HTB_DIRECT; /* bad default .. this is safe bet */ 275 return cl; 276 return cl; 276 } 277 } 277 278 278 /** 279 /** 279 * htb_add_to_id_tree - adds class to the roun 280 * htb_add_to_id_tree - adds class to the round robin list 280 * @root: the root of the tree 281 * @root: the root of the tree 281 * @cl: the class to add 282 * @cl: the class to add 282 * @prio: the give prio in class 283 * @prio: the give prio in class 283 * 284 * 284 * Routine adds class to the list (actually tr 285 * Routine adds class to the list (actually tree) sorted by classid. 285 * Make sure that class is not already on such 286 * Make sure that class is not already on such list for given prio. 286 */ 287 */ 287 static void htb_add_to_id_tree(struct rb_root 288 static void htb_add_to_id_tree(struct rb_root *root, 288 struct htb_clas 289 struct htb_class *cl, int prio) 289 { 290 { 290 struct rb_node **p = &root->rb_node, * 291 struct rb_node **p = &root->rb_node, *parent = NULL; 291 292 292 while (*p) { 293 while (*p) { 293 struct htb_class *c; 294 struct htb_class *c; 294 parent = *p; 295 parent = *p; 295 c = rb_entry(parent, struct ht 296 c = rb_entry(parent, struct htb_class, node[prio]); 296 297 297 if (cl->common.classid > c->co 298 if (cl->common.classid > c->common.classid) 298 p = &parent->rb_right; 299 p = &parent->rb_right; 299 else 300 else 300 p = &parent->rb_left; 301 p = &parent->rb_left; 301 } 302 } 302 rb_link_node(&cl->node[prio], parent, 303 rb_link_node(&cl->node[prio], parent, p); 303 rb_insert_color(&cl->node[prio], root) 304 rb_insert_color(&cl->node[prio], root); 304 } 305 } 305 306 306 /** 307 /** 307 * htb_add_to_wait_tree - adds class to the ev 308 * htb_add_to_wait_tree - adds class to the event queue with delay 308 * @q: the priority event queue 309 * @q: the priority event queue 309 * @cl: the class to add 310 * @cl: the class to add 310 * @delay: delay in microseconds 311 * @delay: delay in microseconds 311 * 312 * 312 * The class is added to priority event queue 313 * The class is added to priority event queue to indicate that class will 313 * change its mode in cl->pq_key microseconds. 314 * change its mode in cl->pq_key microseconds. Make sure that class is not 314 * already in the queue. 315 * already in the queue. 315 */ 316 */ 316 static void htb_add_to_wait_tree(struct htb_sc 317 static void htb_add_to_wait_tree(struct htb_sched *q, 317 struct htb_cl 318 struct htb_class *cl, s64 delay) 318 { 319 { 319 struct rb_node **p = &q->hlevel[cl->le 320 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; 320 321 321 cl->pq_key = q->now + delay; 322 cl->pq_key = q->now + delay; 322 if (cl->pq_key == q->now) 323 if (cl->pq_key == q->now) 323 cl->pq_key++; 324 cl->pq_key++; 324 325 325 /* update the nearest event cache */ 326 /* update the nearest event cache */ 326 if (q->near_ev_cache[cl->level] > cl-> 327 if (q->near_ev_cache[cl->level] > cl->pq_key) 327 q->near_ev_cache[cl->level] = 328 q->near_ev_cache[cl->level] = cl->pq_key; 328 329 329 while (*p) { 330 while (*p) { 330 struct htb_class *c; 331 struct htb_class *c; 331 parent = *p; 332 parent = *p; 332 c = rb_entry(parent, struct ht 333 c = rb_entry(parent, struct htb_class, pq_node); 333 if (cl->pq_key >= c->pq_key) 334 if (cl->pq_key >= c->pq_key) 334 p = &parent->rb_right; 335 p = &parent->rb_right; 335 else 336 else 336 p = &parent->rb_left; 337 p = &parent->rb_left; 337 } 338 } 338 rb_link_node(&cl->pq_node, parent, p); 339 rb_link_node(&cl->pq_node, parent, p); 339 rb_insert_color(&cl->pq_node, &q->hlev 340 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); 340 } 341 } 341 342 342 /** 343 /** 343 * htb_next_rb_node - finds next node in binar 344 * htb_next_rb_node - finds next node in binary tree 344 * @n: the current node in binary tree 345 * @n: the current node in binary tree 345 * 346 * 346 * When we are past last key we return NULL. 347 * When we are past last key we return NULL. 347 * Average complexity is 2 steps per call. 348 * Average complexity is 2 steps per call. 348 */ 349 */ 349 static inline void htb_next_rb_node(struct rb_ 350 static inline void htb_next_rb_node(struct rb_node **n) 350 { 351 { 351 *n = rb_next(*n); 352 *n = rb_next(*n); 352 } 353 } 353 354 354 /** 355 /** 355 * htb_add_class_to_row - add class to its row 356 * htb_add_class_to_row - add class to its row 356 * @q: the priority event queue 357 * @q: the priority event queue 357 * @cl: the class to add 358 * @cl: the class to add 358 * @mask: the given priorities in class in bit 359 * @mask: the given priorities in class in bitmap 359 * 360 * 360 * The class is added to row at priorities mar 361 * The class is added to row at priorities marked in mask. 361 * It does nothing if mask == 0. 362 * It does nothing if mask == 0. 362 */ 363 */ 363 static inline void htb_add_class_to_row(struct 364 static inline void htb_add_class_to_row(struct htb_sched *q, 364 struct 365 struct htb_class *cl, int mask) 365 { 366 { 366 q->row_mask[cl->level] |= mask; 367 q->row_mask[cl->level] |= mask; 367 while (mask) { 368 while (mask) { 368 int prio = ffz(~mask); 369 int prio = ffz(~mask); 369 mask &= ~(1 << prio); 370 mask &= ~(1 << prio); 370 htb_add_to_id_tree(&q->hlevel[ 371 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); 371 } 372 } 372 } 373 } 373 374 374 /* If this triggers, it is a bug in this code, 375 /* If this triggers, it is a bug in this code, but it need not be fatal */ 375 static void htb_safe_rb_erase(struct rb_node * 376 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root) 376 { 377 { 377 if (RB_EMPTY_NODE(rb)) { 378 if (RB_EMPTY_NODE(rb)) { 378 WARN_ON(1); 379 WARN_ON(1); 379 } else { 380 } else { 380 rb_erase(rb, root); 381 rb_erase(rb, root); 381 RB_CLEAR_NODE(rb); 382 RB_CLEAR_NODE(rb); 382 } 383 } 383 } 384 } 384 385 385 386 386 /** 387 /** 387 * htb_remove_class_from_row - removes class f 388 * htb_remove_class_from_row - removes class from its row 388 * @q: the priority event queue 389 * @q: the priority event queue 389 * @cl: the class to add 390 * @cl: the class to add 390 * @mask: the given priorities in class in bit 391 * @mask: the given priorities in class in bitmap 391 * 392 * 392 * The class is removed from row at priorities 393 * The class is removed from row at priorities marked in mask. 393 * It does nothing if mask == 0. 394 * It does nothing if mask == 0. 394 */ 395 */ 395 static inline void htb_remove_class_from_row(s 396 static inline void htb_remove_class_from_row(struct htb_sched *q, 396 397 struct htb_class *cl, int mask) 397 { 398 { 398 int m = 0; 399 int m = 0; 399 struct htb_level *hlevel = &q->hlevel[ 400 struct htb_level *hlevel = &q->hlevel[cl->level]; 400 401 401 while (mask) { 402 while (mask) { 402 int prio = ffz(~mask); 403 int prio = ffz(~mask); 403 struct htb_prio *hprio = &hlev 404 struct htb_prio *hprio = &hlevel->hprio[prio]; 404 405 405 mask &= ~(1 << prio); 406 mask &= ~(1 << prio); 406 if (hprio->ptr == cl->node + p 407 if (hprio->ptr == cl->node + prio) 407 htb_next_rb_node(&hpri 408 htb_next_rb_node(&hprio->ptr); 408 409 409 htb_safe_rb_erase(cl->node + p 410 htb_safe_rb_erase(cl->node + prio, &hprio->row); 410 if (!hprio->row.rb_node) 411 if (!hprio->row.rb_node) 411 m |= 1 << prio; 412 m |= 1 << prio; 412 } 413 } 413 q->row_mask[cl->level] &= ~m; 414 q->row_mask[cl->level] &= ~m; 414 } 415 } 415 416 416 /** 417 /** 417 * htb_activate_prios - creates active classe' 418 * htb_activate_prios - creates active classe's feed chain 418 * @q: the priority event queue 419 * @q: the priority event queue 419 * @cl: the class to activate 420 * @cl: the class to activate 420 * 421 * 421 * The class is connected to ancestors and/or 422 * The class is connected to ancestors and/or appropriate rows 422 * for priorities it is participating on. cl-> 423 * for priorities it is participating on. cl->cmode must be new 423 * (activated) mode. It does nothing if cl->pr 424 * (activated) mode. It does nothing if cl->prio_activity == 0. 424 */ 425 */ 425 static void htb_activate_prios(struct htb_sche 426 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) 426 { 427 { 427 struct htb_class *p = cl->parent; 428 struct htb_class *p = cl->parent; 428 long m, mask = cl->prio_activity; 429 long m, mask = cl->prio_activity; 429 430 430 while (cl->cmode == HTB_MAY_BORROW && 431 while (cl->cmode == HTB_MAY_BORROW && p && mask) { 431 m = mask; 432 m = mask; 432 while (m) { 433 while (m) { 433 unsigned int prio = ff 434 unsigned int prio = ffz(~m); 434 435 435 if (WARN_ON_ONCE(prio 436 if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio))) 436 break; 437 break; 437 m &= ~(1 << prio); 438 m &= ~(1 << prio); 438 439 439 if (p->inner.clprio[pr 440 if (p->inner.clprio[prio].feed.rb_node) 440 /* parent alre 441 /* parent already has its feed in use so that 441 * reset bit i 442 * reset bit in mask as parent is already ok 442 */ 443 */ 443 mask &= ~(1 << 444 mask &= ~(1 << prio); 444 445 445 htb_add_to_id_tree(&p- 446 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio); 446 } 447 } 447 p->prio_activity |= mask; 448 p->prio_activity |= mask; 448 cl = p; 449 cl = p; 449 p = cl->parent; 450 p = cl->parent; 450 451 451 } 452 } 452 if (cl->cmode == HTB_CAN_SEND && mask) 453 if (cl->cmode == HTB_CAN_SEND && mask) 453 htb_add_class_to_row(q, cl, ma 454 htb_add_class_to_row(q, cl, mask); 454 } 455 } 455 456 456 /** 457 /** 457 * htb_deactivate_prios - remove class from fe 458 * htb_deactivate_prios - remove class from feed chain 458 * @q: the priority event queue 459 * @q: the priority event queue 459 * @cl: the class to deactivate 460 * @cl: the class to deactivate 460 * 461 * 461 * cl->cmode must represent old mode (before d 462 * cl->cmode must represent old mode (before deactivation). It does 462 * nothing if cl->prio_activity == 0. Class is 463 * nothing if cl->prio_activity == 0. Class is removed from all feed 463 * chains and rows. 464 * chains and rows. 464 */ 465 */ 465 static void htb_deactivate_prios(struct htb_sc 466 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) 466 { 467 { 467 struct htb_class *p = cl->parent; 468 struct htb_class *p = cl->parent; 468 long m, mask = cl->prio_activity; 469 long m, mask = cl->prio_activity; 469 470 470 while (cl->cmode == HTB_MAY_BORROW && 471 while (cl->cmode == HTB_MAY_BORROW && p && mask) { 471 m = mask; 472 m = mask; 472 mask = 0; 473 mask = 0; 473 while (m) { 474 while (m) { 474 int prio = ffz(~m); 475 int prio = ffz(~m); 475 m &= ~(1 << prio); 476 m &= ~(1 << prio); 476 477 477 if (p->inner.clprio[pr 478 if (p->inner.clprio[prio].ptr == cl->node + prio) { 478 /* we are remo 479 /* we are removing child which is pointed to from 479 * parent feed 480 * parent feed - forget the pointer but remember 480 * classid 481 * classid 481 */ 482 */ 482 p->inner.clpri 483 p->inner.clprio[prio].last_ptr_id = cl->common.classid; 483 p->inner.clpri 484 p->inner.clprio[prio].ptr = NULL; 484 } 485 } 485 486 486 htb_safe_rb_erase(cl-> 487 htb_safe_rb_erase(cl->node + prio, 487 &p-> 488 &p->inner.clprio[prio].feed); 488 489 489 if (!p->inner.clprio[p 490 if (!p->inner.clprio[prio].feed.rb_node) 490 mask |= 1 << p 491 mask |= 1 << prio; 491 } 492 } 492 493 493 p->prio_activity &= ~mask; 494 p->prio_activity &= ~mask; 494 cl = p; 495 cl = p; 495 p = cl->parent; 496 p = cl->parent; 496 497 497 } 498 } 498 if (cl->cmode == HTB_CAN_SEND && mask) 499 if (cl->cmode == HTB_CAN_SEND && mask) 499 htb_remove_class_from_row(q, c 500 htb_remove_class_from_row(q, cl, mask); 500 } 501 } 501 502 502 static inline s64 htb_lowater(const struct htb 503 static inline s64 htb_lowater(const struct htb_class *cl) 503 { 504 { 504 if (htb_hysteresis) 505 if (htb_hysteresis) 505 return cl->cmode != HTB_CANT_S 506 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; 506 else 507 else 507 return 0; 508 return 0; 508 } 509 } 509 static inline s64 htb_hiwater(const struct htb 510 static inline s64 htb_hiwater(const struct htb_class *cl) 510 { 511 { 511 if (htb_hysteresis) 512 if (htb_hysteresis) 512 return cl->cmode == HTB_CAN_SE 513 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; 513 else 514 else 514 return 0; 515 return 0; 515 } 516 } 516 517 517 518 518 /** 519 /** 519 * htb_class_mode - computes and returns curre 520 * htb_class_mode - computes and returns current class mode 520 * @cl: the target class 521 * @cl: the target class 521 * @diff: diff time in microseconds 522 * @diff: diff time in microseconds 522 * 523 * 523 * It computes cl's mode at time cl->t_c+diff 524 * It computes cl's mode at time cl->t_c+diff and returns it. If mode 524 * is not HTB_CAN_SEND then cl->pq_key is upda 525 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference 525 * from now to time when cl will change its st 526 * from now to time when cl will change its state. 526 * Also it is worth to note that class mode do 527 * Also it is worth to note that class mode doesn't change simply 527 * at cl->{c,}tokens == 0 but there can rather 528 * at cl->{c,}tokens == 0 but there can rather be hysteresis of 528 * 0 .. -cl->{c,}buffer range. It is meant to 529 * 0 .. -cl->{c,}buffer range. It is meant to limit number of 529 * mode transitions per time unit. The speed g 530 * mode transitions per time unit. The speed gain is about 1/6. 530 */ 531 */ 531 static inline enum htb_cmode 532 static inline enum htb_cmode 532 htb_class_mode(struct htb_class *cl, s64 *diff 533 htb_class_mode(struct htb_class *cl, s64 *diff) 533 { 534 { 534 s64 toks; 535 s64 toks; 535 536 536 if ((toks = (cl->ctokens + *diff)) < h 537 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { 537 *diff = -toks; 538 *diff = -toks; 538 return HTB_CANT_SEND; 539 return HTB_CANT_SEND; 539 } 540 } 540 541 541 if ((toks = (cl->tokens + *diff)) >= h 542 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl)) 542 return HTB_CAN_SEND; 543 return HTB_CAN_SEND; 543 544 544 *diff = -toks; 545 *diff = -toks; 545 return HTB_MAY_BORROW; 546 return HTB_MAY_BORROW; 546 } 547 } 547 548 548 /** 549 /** 549 * htb_change_class_mode - changes classe's mo 550 * htb_change_class_mode - changes classe's mode 550 * @q: the priority event queue 551 * @q: the priority event queue 551 * @cl: the target class 552 * @cl: the target class 552 * @diff: diff time in microseconds 553 * @diff: diff time in microseconds 553 * 554 * 554 * This should be the only way how to change c 555 * This should be the only way how to change classe's mode under normal 555 * circumstances. Routine will update feed lis 556 * circumstances. Routine will update feed lists linkage, change mode 556 * and add class to the wait event queue if ap 557 * and add class to the wait event queue if appropriate. New mode should 557 * be different from old one and cl->pq_key ha 558 * be different from old one and cl->pq_key has to be valid if changing 558 * to mode other than HTB_CAN_SEND (see htb_ad 559 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree). 559 */ 560 */ 560 static void 561 static void 561 htb_change_class_mode(struct htb_sched *q, str 562 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) 562 { 563 { 563 enum htb_cmode new_mode = htb_class_mo 564 enum htb_cmode new_mode = htb_class_mode(cl, diff); 564 565 565 if (new_mode == cl->cmode) 566 if (new_mode == cl->cmode) 566 return; 567 return; 567 568 568 if (new_mode == HTB_CANT_SEND) { 569 if (new_mode == HTB_CANT_SEND) { 569 cl->overlimits++; 570 cl->overlimits++; 570 q->overlimits++; 571 q->overlimits++; 571 } 572 } 572 573 573 if (cl->prio_activity) { /* not 574 if (cl->prio_activity) { /* not necessary: speed optimization */ 574 if (cl->cmode != HTB_CANT_SEND 575 if (cl->cmode != HTB_CANT_SEND) 575 htb_deactivate_prios(q 576 htb_deactivate_prios(q, cl); 576 cl->cmode = new_mode; 577 cl->cmode = new_mode; 577 if (new_mode != HTB_CANT_SEND) 578 if (new_mode != HTB_CANT_SEND) 578 htb_activate_prios(q, 579 htb_activate_prios(q, cl); 579 } else 580 } else 580 cl->cmode = new_mode; 581 cl->cmode = new_mode; 581 } 582 } 582 583 583 /** 584 /** 584 * htb_activate - inserts leaf cl into appropr 585 * htb_activate - inserts leaf cl into appropriate active feeds 585 * @q: the priority event queue 586 * @q: the priority event queue 586 * @cl: the target class 587 * @cl: the target class 587 * 588 * 588 * Routine learns (new) priority of leaf and a 589 * Routine learns (new) priority of leaf and activates feed chain 589 * for the prio. It can be called on already a 590 * for the prio. It can be called on already active leaf safely. 590 * It also adds leaf into droplist. 591 * It also adds leaf into droplist. 591 */ 592 */ 592 static inline void htb_activate(struct htb_sch 593 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) 593 { 594 { 594 WARN_ON(cl->level || !cl->leaf.q || !c 595 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); 595 596 596 if (!cl->prio_activity) { 597 if (!cl->prio_activity) { 597 cl->prio_activity = 1 << cl->p 598 cl->prio_activity = 1 << cl->prio; 598 htb_activate_prios(q, cl); 599 htb_activate_prios(q, cl); 599 } 600 } 600 } 601 } 601 602 602 /** 603 /** 603 * htb_deactivate - remove leaf cl from active 604 * htb_deactivate - remove leaf cl from active feeds 604 * @q: the priority event queue 605 * @q: the priority event queue 605 * @cl: the target class 606 * @cl: the target class 606 * 607 * 607 * Make sure that leaf is active. In the other 608 * Make sure that leaf is active. In the other words it can't be called 608 * with non-active leaf. It also removes class 609 * with non-active leaf. It also removes class from the drop list. 609 */ 610 */ 610 static inline void htb_deactivate(struct htb_s 611 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) 611 { 612 { 612 WARN_ON(!cl->prio_activity); 613 WARN_ON(!cl->prio_activity); 613 614 614 htb_deactivate_prios(q, cl); 615 htb_deactivate_prios(q, cl); 615 cl->prio_activity = 0; 616 cl->prio_activity = 0; 616 } 617 } 617 618 618 static int htb_enqueue(struct sk_buff *skb, st 619 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, 619 struct sk_buff **to_fre 620 struct sk_buff **to_free) 620 { 621 { 621 int ret; 622 int ret; 622 unsigned int len = qdisc_pkt_len(skb); 623 unsigned int len = qdisc_pkt_len(skb); 623 struct htb_sched *q = qdisc_priv(sch); 624 struct htb_sched *q = qdisc_priv(sch); 624 struct htb_class *cl = htb_classify(sk 625 struct htb_class *cl = htb_classify(skb, sch, &ret); 625 626 626 if (cl == HTB_DIRECT) { 627 if (cl == HTB_DIRECT) { 627 /* enqueue to helper queue */ 628 /* enqueue to helper queue */ 628 if (q->direct_queue.qlen < q-> 629 if (q->direct_queue.qlen < q->direct_qlen) { 629 __qdisc_enqueue_tail(s 630 __qdisc_enqueue_tail(skb, &q->direct_queue); 630 q->direct_pkts++; 631 q->direct_pkts++; 631 } else { 632 } else { 632 return qdisc_drop(skb, 633 return qdisc_drop(skb, sch, to_free); 633 } 634 } 634 #ifdef CONFIG_NET_CLS_ACT 635 #ifdef CONFIG_NET_CLS_ACT 635 } else if (!cl) { 636 } else if (!cl) { 636 if (ret & __NET_XMIT_BYPASS) 637 if (ret & __NET_XMIT_BYPASS) 637 qdisc_qstats_drop(sch) 638 qdisc_qstats_drop(sch); 638 __qdisc_drop(skb, to_free); 639 __qdisc_drop(skb, to_free); 639 return ret; 640 return ret; 640 #endif 641 #endif 641 } else if ((ret = qdisc_enqueue(skb, c 642 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, 642 to_fre 643 to_free)) != NET_XMIT_SUCCESS) { 643 if (net_xmit_drop_count(ret)) 644 if (net_xmit_drop_count(ret)) { 644 qdisc_qstats_drop(sch) 645 qdisc_qstats_drop(sch); 645 cl->drops++; 646 cl->drops++; 646 } 647 } 647 return ret; 648 return ret; 648 } else { 649 } else { 649 htb_activate(q, cl); 650 htb_activate(q, cl); 650 } 651 } 651 652 652 sch->qstats.backlog += len; 653 sch->qstats.backlog += len; 653 sch->q.qlen++; 654 sch->q.qlen++; 654 return NET_XMIT_SUCCESS; 655 return NET_XMIT_SUCCESS; 655 } 656 } 656 657 657 static inline void htb_accnt_tokens(struct htb 658 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff) 658 { 659 { 659 s64 toks = diff + cl->tokens; 660 s64 toks = diff + cl->tokens; 660 661 661 if (toks > cl->buffer) 662 if (toks > cl->buffer) 662 toks = cl->buffer; 663 toks = cl->buffer; 663 toks -= (s64) psched_l2t_ns(&cl->rate, 664 toks -= (s64) psched_l2t_ns(&cl->rate, bytes); 664 if (toks <= -cl->mbuffer) 665 if (toks <= -cl->mbuffer) 665 toks = 1 - cl->mbuffer; 666 toks = 1 - cl->mbuffer; 666 667 667 cl->tokens = toks; 668 cl->tokens = toks; 668 } 669 } 669 670 670 static inline void htb_accnt_ctokens(struct ht 671 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff) 671 { 672 { 672 s64 toks = diff + cl->ctokens; 673 s64 toks = diff + cl->ctokens; 673 674 674 if (toks > cl->cbuffer) 675 if (toks > cl->cbuffer) 675 toks = cl->cbuffer; 676 toks = cl->cbuffer; 676 toks -= (s64) psched_l2t_ns(&cl->ceil, 677 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes); 677 if (toks <= -cl->mbuffer) 678 if (toks <= -cl->mbuffer) 678 toks = 1 - cl->mbuffer; 679 toks = 1 - cl->mbuffer; 679 680 680 cl->ctokens = toks; 681 cl->ctokens = toks; 681 } 682 } 682 683 683 /** 684 /** 684 * htb_charge_class - charges amount "bytes" t 685 * htb_charge_class - charges amount "bytes" to leaf and ancestors 685 * @q: the priority event queue 686 * @q: the priority event queue 686 * @cl: the class to start iterate 687 * @cl: the class to start iterate 687 * @level: the minimum level to account 688 * @level: the minimum level to account 688 * @skb: the socket buffer 689 * @skb: the socket buffer 689 * 690 * 690 * Routine assumes that packet "bytes" long wa 691 * Routine assumes that packet "bytes" long was dequeued from leaf cl 691 * borrowing from "level". It accounts bytes t 692 * borrowing from "level". It accounts bytes to ceil leaky bucket for 692 * leaf and all ancestors and to rate bucket f 693 * leaf and all ancestors and to rate bucket for ancestors at levels 693 * "level" and higher. It also handles possibl 694 * "level" and higher. It also handles possible change of mode resulting 694 * from the update. Note that mode can also in 695 * from the update. Note that mode can also increase here (MAY_BORROW to 695 * CAN_SEND) because we can use more precise c 696 * CAN_SEND) because we can use more precise clock that event queue here. 696 * In such case we remove class from event que 697 * In such case we remove class from event queue first. 697 */ 698 */ 698 static void htb_charge_class(struct htb_sched 699 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, 699 int level, struct 700 int level, struct sk_buff *skb) 700 { 701 { 701 int bytes = qdisc_pkt_len(skb); 702 int bytes = qdisc_pkt_len(skb); 702 enum htb_cmode old_mode; 703 enum htb_cmode old_mode; 703 s64 diff; 704 s64 diff; 704 705 705 while (cl) { 706 while (cl) { 706 diff = min_t(s64, q->now - cl- 707 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); 707 if (cl->level >= level) { 708 if (cl->level >= level) { 708 if (cl->level == level 709 if (cl->level == level) 709 cl->xstats.len 710 cl->xstats.lends++; 710 htb_accnt_tokens(cl, b 711 htb_accnt_tokens(cl, bytes, diff); 711 } else { 712 } else { 712 cl->xstats.borrows++; 713 cl->xstats.borrows++; 713 cl->tokens += diff; 714 cl->tokens += diff; /* we moved t_c; update tokens */ 714 } 715 } 715 htb_accnt_ctokens(cl, bytes, d 716 htb_accnt_ctokens(cl, bytes, diff); 716 cl->t_c = q->now; 717 cl->t_c = q->now; 717 718 718 old_mode = cl->cmode; 719 old_mode = cl->cmode; 719 diff = 0; 720 diff = 0; 720 htb_change_class_mode(q, cl, & 721 htb_change_class_mode(q, cl, &diff); 721 if (old_mode != cl->cmode) { 722 if (old_mode != cl->cmode) { 722 if (old_mode != HTB_CA 723 if (old_mode != HTB_CAN_SEND) 723 htb_safe_rb_er 724 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); 724 if (cl->cmode != HTB_C 725 if (cl->cmode != HTB_CAN_SEND) 725 htb_add_to_wai 726 htb_add_to_wait_tree(q, cl, diff); 726 } 727 } 727 728 728 /* update basic stats except f 729 /* update basic stats except for leaves which are already updated */ 729 if (cl->level) 730 if (cl->level) 730 bstats_update(&cl->bst 731 bstats_update(&cl->bstats, skb); 731 732 732 cl = cl->parent; 733 cl = cl->parent; 733 } 734 } 734 } 735 } 735 736 736 /** 737 /** 737 * htb_do_events - make mode changes to classe 738 * htb_do_events - make mode changes to classes at the level 738 * @q: the priority event queue 739 * @q: the priority event queue 739 * @level: which wait_pq in 'q->hlevel' 740 * @level: which wait_pq in 'q->hlevel' 740 * @start: start jiffies 741 * @start: start jiffies 741 * 742 * 742 * Scans event queue for pending events and ap 743 * Scans event queue for pending events and applies them. Returns time of 743 * next pending event (0 for no event in pq, q 744 * next pending event (0 for no event in pq, q->now for too many events). 744 * Note: Applied are events whose have cl->pq_ 745 * Note: Applied are events whose have cl->pq_key <= q->now. 745 */ 746 */ 746 static s64 htb_do_events(struct htb_sched *q, 747 static s64 htb_do_events(struct htb_sched *q, const int level, 747 unsigned long start) 748 unsigned long start) 748 { 749 { 749 /* don't run for longer than 2 jiffies 750 /* don't run for longer than 2 jiffies; 2 is used instead of 750 * 1 to simplify things when jiffy is 751 * 1 to simplify things when jiffy is going to be incremented 751 * too soon 752 * too soon 752 */ 753 */ 753 unsigned long stop_at = start + 2; 754 unsigned long stop_at = start + 2; 754 struct rb_root *wait_pq = &q->hlevel[l 755 struct rb_root *wait_pq = &q->hlevel[level].wait_pq; 755 756 756 while (time_before(jiffies, stop_at)) 757 while (time_before(jiffies, stop_at)) { 757 struct htb_class *cl; 758 struct htb_class *cl; 758 s64 diff; 759 s64 diff; 759 struct rb_node *p = rb_first(w 760 struct rb_node *p = rb_first(wait_pq); 760 761 761 if (!p) 762 if (!p) 762 return 0; 763 return 0; 763 764 764 cl = rb_entry(p, struct htb_cl 765 cl = rb_entry(p, struct htb_class, pq_node); 765 if (cl->pq_key > q->now) 766 if (cl->pq_key > q->now) 766 return cl->pq_key; 767 return cl->pq_key; 767 768 768 htb_safe_rb_erase(p, wait_pq); 769 htb_safe_rb_erase(p, wait_pq); 769 diff = min_t(s64, q->now - cl- 770 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); 770 htb_change_class_mode(q, cl, & 771 htb_change_class_mode(q, cl, &diff); 771 if (cl->cmode != HTB_CAN_SEND) 772 if (cl->cmode != HTB_CAN_SEND) 772 htb_add_to_wait_tree(q 773 htb_add_to_wait_tree(q, cl, diff); 773 } 774 } 774 775 775 /* too much load - let's continue afte 776 /* too much load - let's continue after a break for scheduling */ 776 if (!(q->warned & HTB_WARN_TOOMANYEVEN 777 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { 777 pr_warn("htb: too many events! 778 pr_warn("htb: too many events!\n"); 778 q->warned |= HTB_WARN_TOOMANYE 779 q->warned |= HTB_WARN_TOOMANYEVENTS; 779 } 780 } 780 781 781 return q->now; 782 return q->now; 782 } 783 } 783 784 784 /* Returns class->node+prio from id-tree where 785 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL 785 * is no such one exists. 786 * is no such one exists. 786 */ 787 */ 787 static struct rb_node *htb_id_find_next_upper( 788 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, 788 789 u32 id) 789 { 790 { 790 struct rb_node *r = NULL; 791 struct rb_node *r = NULL; 791 while (n) { 792 while (n) { 792 struct htb_class *cl = 793 struct htb_class *cl = 793 rb_entry(n, struct htb_cla 794 rb_entry(n, struct htb_class, node[prio]); 794 795 795 if (id > cl->common.classid) { 796 if (id > cl->common.classid) { 796 n = n->rb_right; 797 n = n->rb_right; 797 } else if (id < cl->common.cla 798 } else if (id < cl->common.classid) { 798 r = n; 799 r = n; 799 n = n->rb_left; 800 n = n->rb_left; 800 } else { 801 } else { 801 return n; 802 return n; 802 } 803 } 803 } 804 } 804 return r; 805 return r; 805 } 806 } 806 807 807 /** 808 /** 808 * htb_lookup_leaf - returns next leaf class i 809 * htb_lookup_leaf - returns next leaf class in DRR order 809 * @hprio: the current one 810 * @hprio: the current one 810 * @prio: which prio in class 811 * @prio: which prio in class 811 * 812 * 812 * Find leaf where current feed pointers point 813 * Find leaf where current feed pointers points to. 813 */ 814 */ 814 static struct htb_class *htb_lookup_leaf(struc 815 static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio) 815 { 816 { 816 int i; 817 int i; 817 struct { 818 struct { 818 struct rb_node *root; 819 struct rb_node *root; 819 struct rb_node **pptr; 820 struct rb_node **pptr; 820 u32 *pid; 821 u32 *pid; 821 } stk[TC_HTB_MAXDEPTH], *sp = stk; 822 } stk[TC_HTB_MAXDEPTH], *sp = stk; 822 823 823 BUG_ON(!hprio->row.rb_node); 824 BUG_ON(!hprio->row.rb_node); 824 sp->root = hprio->row.rb_node; 825 sp->root = hprio->row.rb_node; 825 sp->pptr = &hprio->ptr; 826 sp->pptr = &hprio->ptr; 826 sp->pid = &hprio->last_ptr_id; 827 sp->pid = &hprio->last_ptr_id; 827 828 828 for (i = 0; i < 65535; i++) { 829 for (i = 0; i < 65535; i++) { 829 if (!*sp->pptr && *sp->pid) { 830 if (!*sp->pptr && *sp->pid) { 830 /* ptr was invalidated 831 /* ptr was invalidated but id is valid - try to recover 831 * the original or nex 832 * the original or next ptr 832 */ 833 */ 833 *sp->pptr = 834 *sp->pptr = 834 htb_id_find_next_u 835 htb_id_find_next_upper(prio, sp->root, *sp->pid); 835 } 836 } 836 *sp->pid = 0; /* ptr is vali 837 *sp->pid = 0; /* ptr is valid now so that remove this hint as it 837 * can become 838 * can become out of date quickly 838 */ 839 */ 839 if (!*sp->pptr) { /* we 840 if (!*sp->pptr) { /* we are at right end; rewind & go up */ 840 *sp->pptr = sp->root; 841 *sp->pptr = sp->root; 841 while ((*sp->pptr)->rb 842 while ((*sp->pptr)->rb_left) 842 *sp->pptr = (* 843 *sp->pptr = (*sp->pptr)->rb_left; 843 if (sp > stk) { 844 if (sp > stk) { 844 sp--; 845 sp--; 845 if (!*sp->pptr 846 if (!*sp->pptr) { 846 WARN_O 847 WARN_ON(1); 847 return 848 return NULL; 848 } 849 } 849 htb_next_rb_no 850 htb_next_rb_node(sp->pptr); 850 } 851 } 851 } else { 852 } else { 852 struct htb_class *cl; 853 struct htb_class *cl; 853 struct htb_prio *clp; 854 struct htb_prio *clp; 854 855 855 cl = rb_entry(*sp->ppt 856 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); 856 if (!cl->level) 857 if (!cl->level) 857 return cl; 858 return cl; 858 clp = &cl->inner.clpri 859 clp = &cl->inner.clprio[prio]; 859 (++sp)->root = clp->fe 860 (++sp)->root = clp->feed.rb_node; 860 sp->pptr = &clp->ptr; 861 sp->pptr = &clp->ptr; 861 sp->pid = &clp->last_p 862 sp->pid = &clp->last_ptr_id; 862 } 863 } 863 } 864 } 864 WARN_ON(1); 865 WARN_ON(1); 865 return NULL; 866 return NULL; 866 } 867 } 867 868 868 /* dequeues packet at given priority and level 869 /* dequeues packet at given priority and level; call only if 869 * you are sure that there is active class at 870 * you are sure that there is active class at prio/level 870 */ 871 */ 871 static struct sk_buff *htb_dequeue_tree(struct 872 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio, 872 const 873 const int level) 873 { 874 { 874 struct sk_buff *skb = NULL; 875 struct sk_buff *skb = NULL; 875 struct htb_class *cl, *start; 876 struct htb_class *cl, *start; 876 struct htb_level *hlevel = &q->hlevel[ 877 struct htb_level *hlevel = &q->hlevel[level]; 877 struct htb_prio *hprio = &hlevel->hpri 878 struct htb_prio *hprio = &hlevel->hprio[prio]; 878 879 879 /* look initial class up in the row */ 880 /* look initial class up in the row */ 880 start = cl = htb_lookup_leaf(hprio, pr 881 start = cl = htb_lookup_leaf(hprio, prio); 881 882 882 do { 883 do { 883 next: 884 next: 884 if (unlikely(!cl)) 885 if (unlikely(!cl)) 885 return NULL; 886 return NULL; 886 887 887 /* class can be empty - it is 888 /* class can be empty - it is unlikely but can be true if leaf 888 * qdisc drops packets in enqu 889 * qdisc drops packets in enqueue routine or if someone used 889 * graft operation on the leaf 890 * graft operation on the leaf since last dequeue; 890 * simply deactivate and skip 891 * simply deactivate and skip such class 891 */ 892 */ 892 if (unlikely(cl->leaf.q->q.qle 893 if (unlikely(cl->leaf.q->q.qlen == 0)) { 893 struct htb_class *next 894 struct htb_class *next; 894 htb_deactivate(q, cl); 895 htb_deactivate(q, cl); 895 896 896 /* row/level might bec 897 /* row/level might become empty */ 897 if ((q->row_mask[level 898 if ((q->row_mask[level] & (1 << prio)) == 0) 898 return NULL; 899 return NULL; 899 900 900 next = htb_lookup_leaf 901 next = htb_lookup_leaf(hprio, prio); 901 902 902 if (cl == start) 903 if (cl == start) /* fix start if we just deleted it */ 903 start = next; 904 start = next; 904 cl = next; 905 cl = next; 905 goto next; 906 goto next; 906 } 907 } 907 908 908 skb = cl->leaf.q->dequeue(cl-> 909 skb = cl->leaf.q->dequeue(cl->leaf.q); 909 if (likely(skb != NULL)) 910 if (likely(skb != NULL)) 910 break; 911 break; 911 912 912 qdisc_warn_nonwc("htb", cl->le 913 qdisc_warn_nonwc("htb", cl->leaf.q); 913 htb_next_rb_node(level ? &cl-> 914 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr: 914 &q->h 915 &q->hlevel[0].hprio[prio].ptr); 915 cl = htb_lookup_leaf(hprio, pr 916 cl = htb_lookup_leaf(hprio, prio); 916 917 917 } while (cl != start); 918 } while (cl != start); 918 919 919 if (likely(skb != NULL)) { 920 if (likely(skb != NULL)) { 920 bstats_update(&cl->bstats, skb 921 bstats_update(&cl->bstats, skb); 921 cl->leaf.deficit[level] -= qdi 922 cl->leaf.deficit[level] -= qdisc_pkt_len(skb); 922 if (cl->leaf.deficit[level] < 923 if (cl->leaf.deficit[level] < 0) { 923 cl->leaf.deficit[level 924 cl->leaf.deficit[level] += cl->quantum; 924 htb_next_rb_node(level 925 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr : 925 926 &q->hlevel[0].hprio[prio].ptr); 926 } 927 } 927 /* this used to be after charg 928 /* this used to be after charge_class but this constelation 928 * gives us slightly better pe 929 * gives us slightly better performance 929 */ 930 */ 930 if (!cl->leaf.q->q.qlen) 931 if (!cl->leaf.q->q.qlen) 931 htb_deactivate(q, cl); 932 htb_deactivate(q, cl); 932 htb_charge_class(q, cl, level, 933 htb_charge_class(q, cl, level, skb); 933 } 934 } 934 return skb; 935 return skb; 935 } 936 } 936 937 937 static struct sk_buff *htb_dequeue(struct Qdis 938 static struct sk_buff *htb_dequeue(struct Qdisc *sch) 938 { 939 { 939 struct sk_buff *skb; 940 struct sk_buff *skb; 940 struct htb_sched *q = qdisc_priv(sch); 941 struct htb_sched *q = qdisc_priv(sch); 941 int level; 942 int level; 942 s64 next_event; 943 s64 next_event; 943 unsigned long start_at; 944 unsigned long start_at; 944 945 945 /* try to dequeue direct packets as hi 946 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 946 skb = __qdisc_dequeue_head(&q->direct_ 947 skb = __qdisc_dequeue_head(&q->direct_queue); 947 if (skb != NULL) { 948 if (skb != NULL) { 948 ok: 949 ok: 949 qdisc_bstats_update(sch, skb); 950 qdisc_bstats_update(sch, skb); 950 qdisc_qstats_backlog_dec(sch, 951 qdisc_qstats_backlog_dec(sch, skb); 951 sch->q.qlen--; 952 sch->q.qlen--; 952 return skb; 953 return skb; 953 } 954 } 954 955 955 if (!sch->q.qlen) 956 if (!sch->q.qlen) 956 goto fin; 957 goto fin; 957 q->now = ktime_get_ns(); 958 q->now = ktime_get_ns(); 958 start_at = jiffies; 959 start_at = jiffies; 959 960 960 next_event = q->now + 5LLU * NSEC_PER_ 961 next_event = q->now + 5LLU * NSEC_PER_SEC; 961 962 962 for (level = 0; level < TC_HTB_MAXDEPT 963 for (level = 0; level < TC_HTB_MAXDEPTH; level++) { 963 /* common case optimization - 964 /* common case optimization - skip event handler quickly */ 964 int m; 965 int m; 965 s64 event = q->near_ev_cache[l 966 s64 event = q->near_ev_cache[level]; 966 967 967 if (q->now >= event) { 968 if (q->now >= event) { 968 event = htb_do_events( 969 event = htb_do_events(q, level, start_at); 969 if (!event) 970 if (!event) 970 event = q->now 971 event = q->now + NSEC_PER_SEC; 971 q->near_ev_cache[level 972 q->near_ev_cache[level] = event; 972 } 973 } 973 974 974 if (next_event > event) 975 if (next_event > event) 975 next_event = event; 976 next_event = event; 976 977 977 m = ~q->row_mask[level]; 978 m = ~q->row_mask[level]; 978 while (m != (int)(-1)) { 979 while (m != (int)(-1)) { 979 int prio = ffz(m); 980 int prio = ffz(m); 980 981 981 m |= 1 << prio; 982 m |= 1 << prio; 982 skb = htb_dequeue_tree 983 skb = htb_dequeue_tree(q, prio, level); 983 if (likely(skb != NULL 984 if (likely(skb != NULL)) 984 goto ok; 985 goto ok; 985 } 986 } 986 } 987 } 987 if (likely(next_event > q->now)) 988 if (likely(next_event > q->now)) 988 qdisc_watchdog_schedule_ns(&q- 989 qdisc_watchdog_schedule_ns(&q->watchdog, next_event); 989 else 990 else 990 schedule_work(&q->work); 991 schedule_work(&q->work); 991 fin: 992 fin: 992 return skb; 993 return skb; 993 } 994 } 994 995 995 /* reset all classes */ 996 /* reset all classes */ 996 /* always caled under BH & queue lock */ 997 /* always caled under BH & queue lock */ 997 static void htb_reset(struct Qdisc *sch) 998 static void htb_reset(struct Qdisc *sch) 998 { 999 { 999 struct htb_sched *q = qdisc_priv(sch); 1000 struct htb_sched *q = qdisc_priv(sch); 1000 struct htb_class *cl; 1001 struct htb_class *cl; 1001 unsigned int i; 1002 unsigned int i; 1002 1003 1003 for (i = 0; i < q->clhash.hashsize; i 1004 for (i = 0; i < q->clhash.hashsize; i++) { 1004 hlist_for_each_entry(cl, &q-> 1005 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 1005 if (cl->level) 1006 if (cl->level) 1006 memset(&cl->i 1007 memset(&cl->inner, 0, sizeof(cl->inner)); 1007 else { 1008 else { 1008 if (cl->leaf. 1009 if (cl->leaf.q && !q->offload) 1009 qdisc 1010 qdisc_reset(cl->leaf.q); 1010 } 1011 } 1011 cl->prio_activity = 0 1012 cl->prio_activity = 0; 1012 cl->cmode = HTB_CAN_S 1013 cl->cmode = HTB_CAN_SEND; 1013 } 1014 } 1014 } 1015 } 1015 qdisc_watchdog_cancel(&q->watchdog); 1016 qdisc_watchdog_cancel(&q->watchdog); 1016 __qdisc_reset_queue(&q->direct_queue) 1017 __qdisc_reset_queue(&q->direct_queue); 1017 memset(q->hlevel, 0, sizeof(q->hlevel 1018 memset(q->hlevel, 0, sizeof(q->hlevel)); 1018 memset(q->row_mask, 0, sizeof(q->row_ 1019 memset(q->row_mask, 0, sizeof(q->row_mask)); 1019 } 1020 } 1020 1021 1021 static const struct nla_policy htb_policy[TCA 1022 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = { 1022 [TCA_HTB_PARMS] = { .len = sizeof(str 1023 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) }, 1023 [TCA_HTB_INIT] = { .len = sizeof(str 1024 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) }, 1024 [TCA_HTB_CTAB] = { .type = NLA_BINAR 1025 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 1025 [TCA_HTB_RTAB] = { .type = NLA_BINAR 1026 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 1026 [TCA_HTB_DIRECT_QLEN] = { .type = NLA 1027 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 }, 1027 [TCA_HTB_RATE64] = { .type = NLA_U64 1028 [TCA_HTB_RATE64] = { .type = NLA_U64 }, 1028 [TCA_HTB_CEIL64] = { .type = NLA_U64 1029 [TCA_HTB_CEIL64] = { .type = NLA_U64 }, 1029 [TCA_HTB_OFFLOAD] = { .type = NLA_FLA 1030 [TCA_HTB_OFFLOAD] = { .type = NLA_FLAG }, 1030 }; 1031 }; 1031 1032 1032 static void htb_work_func(struct work_struct 1033 static void htb_work_func(struct work_struct *work) 1033 { 1034 { 1034 struct htb_sched *q = container_of(wo 1035 struct htb_sched *q = container_of(work, struct htb_sched, work); 1035 struct Qdisc *sch = q->watchdog.qdisc 1036 struct Qdisc *sch = q->watchdog.qdisc; 1036 1037 1037 rcu_read_lock(); 1038 rcu_read_lock(); 1038 __netif_schedule(qdisc_root(sch)); 1039 __netif_schedule(qdisc_root(sch)); 1039 rcu_read_unlock(); 1040 rcu_read_unlock(); 1040 } 1041 } 1041 1042 >> 1043 static void htb_set_lockdep_class_child(struct Qdisc *q) >> 1044 { >> 1045 static struct lock_class_key child_key; >> 1046 >> 1047 lockdep_set_class(qdisc_lock(q), &child_key); >> 1048 } >> 1049 1042 static int htb_offload(struct net_device *dev 1050 static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt) 1043 { 1051 { 1044 return dev->netdev_ops->ndo_setup_tc( 1052 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt); 1045 } 1053 } 1046 1054 1047 static int htb_init(struct Qdisc *sch, struct 1055 static int htb_init(struct Qdisc *sch, struct nlattr *opt, 1048 struct netlink_ext_ack *e 1056 struct netlink_ext_ack *extack) 1049 { 1057 { 1050 struct net_device *dev = qdisc_dev(sc 1058 struct net_device *dev = qdisc_dev(sch); 1051 struct tc_htb_qopt_offload offload_op 1059 struct tc_htb_qopt_offload offload_opt; 1052 struct htb_sched *q = qdisc_priv(sch) 1060 struct htb_sched *q = qdisc_priv(sch); 1053 struct nlattr *tb[TCA_HTB_MAX + 1]; 1061 struct nlattr *tb[TCA_HTB_MAX + 1]; 1054 struct tc_htb_glob *gopt; 1062 struct tc_htb_glob *gopt; 1055 unsigned int ntx; 1063 unsigned int ntx; 1056 bool offload; 1064 bool offload; 1057 int err; 1065 int err; 1058 1066 1059 qdisc_watchdog_init(&q->watchdog, sch 1067 qdisc_watchdog_init(&q->watchdog, sch); 1060 INIT_WORK(&q->work, htb_work_func); 1068 INIT_WORK(&q->work, htb_work_func); 1061 1069 1062 if (!opt) 1070 if (!opt) 1063 return -EINVAL; 1071 return -EINVAL; 1064 1072 1065 err = tcf_block_get(&q->block, &q->fi 1073 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); 1066 if (err) 1074 if (err) 1067 return err; 1075 return err; 1068 1076 1069 err = nla_parse_nested_deprecated(tb, 1077 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy, 1070 NUL 1078 NULL); 1071 if (err < 0) 1079 if (err < 0) 1072 return err; 1080 return err; 1073 1081 1074 if (!tb[TCA_HTB_INIT]) 1082 if (!tb[TCA_HTB_INIT]) 1075 return -EINVAL; 1083 return -EINVAL; 1076 1084 1077 gopt = nla_data(tb[TCA_HTB_INIT]); 1085 gopt = nla_data(tb[TCA_HTB_INIT]); 1078 if (gopt->version != HTB_VER >> 16) 1086 if (gopt->version != HTB_VER >> 16) 1079 return -EINVAL; 1087 return -EINVAL; 1080 1088 1081 offload = nla_get_flag(tb[TCA_HTB_OFF 1089 offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]); 1082 1090 1083 if (offload) { 1091 if (offload) { 1084 if (sch->parent != TC_H_ROOT) 1092 if (sch->parent != TC_H_ROOT) { 1085 NL_SET_ERR_MSG(extack 1093 NL_SET_ERR_MSG(extack, "HTB must be the root qdisc to use offload"); 1086 return -EOPNOTSUPP; 1094 return -EOPNOTSUPP; 1087 } 1095 } 1088 1096 1089 if (!tc_can_offload(dev) || ! 1097 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) { 1090 NL_SET_ERR_MSG(extack 1098 NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on"); 1091 return -EOPNOTSUPP; 1099 return -EOPNOTSUPP; 1092 } 1100 } 1093 1101 1094 q->num_direct_qdiscs = dev->r 1102 q->num_direct_qdiscs = dev->real_num_tx_queues; 1095 q->direct_qdiscs = kcalloc(q- 1103 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs, 1096 si 1104 sizeof(*q->direct_qdiscs), 1097 GF 1105 GFP_KERNEL); 1098 if (!q->direct_qdiscs) 1106 if (!q->direct_qdiscs) 1099 return -ENOMEM; 1107 return -ENOMEM; 1100 } 1108 } 1101 1109 1102 err = qdisc_class_hash_init(&q->clhas 1110 err = qdisc_class_hash_init(&q->clhash); 1103 if (err < 0) 1111 if (err < 0) 1104 return err; 1112 return err; 1105 1113 1106 if (tb[TCA_HTB_DIRECT_QLEN]) 1114 if (tb[TCA_HTB_DIRECT_QLEN]) 1107 q->direct_qlen = nla_get_u32( 1115 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); 1108 else 1116 else 1109 q->direct_qlen = qdisc_dev(sc 1117 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; 1110 1118 1111 if ((q->rate2quantum = gopt->rate2qua 1119 if ((q->rate2quantum = gopt->rate2quantum) < 1) 1112 q->rate2quantum = 1; 1120 q->rate2quantum = 1; 1113 q->defcls = gopt->defcls; 1121 q->defcls = gopt->defcls; 1114 1122 1115 if (!offload) 1123 if (!offload) 1116 return 0; 1124 return 0; 1117 1125 1118 for (ntx = 0; ntx < q->num_direct_qdi 1126 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { 1119 struct netdev_queue *dev_queu 1127 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); 1120 struct Qdisc *qdisc; 1128 struct Qdisc *qdisc; 1121 1129 1122 qdisc = qdisc_create_dflt(dev 1130 qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, 1123 TC_ 1131 TC_H_MAKE(sch->handle, 0), extack); 1124 if (!qdisc) { 1132 if (!qdisc) { 1125 return -ENOMEM; 1133 return -ENOMEM; 1126 } 1134 } 1127 1135 >> 1136 htb_set_lockdep_class_child(qdisc); 1128 q->direct_qdiscs[ntx] = qdisc 1137 q->direct_qdiscs[ntx] = qdisc; 1129 qdisc->flags |= TCQ_F_ONETXQU 1138 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1130 } 1139 } 1131 1140 1132 sch->flags |= TCQ_F_MQROOT; 1141 sch->flags |= TCQ_F_MQROOT; 1133 1142 1134 offload_opt = (struct tc_htb_qopt_off 1143 offload_opt = (struct tc_htb_qopt_offload) { 1135 .command = TC_HTB_CREATE, 1144 .command = TC_HTB_CREATE, 1136 .parent_classid = TC_H_MAJ(sc 1145 .parent_classid = TC_H_MAJ(sch->handle) >> 16, 1137 .classid = TC_H_MIN(q->defcls 1146 .classid = TC_H_MIN(q->defcls), 1138 .extack = extack, 1147 .extack = extack, 1139 }; 1148 }; 1140 err = htb_offload(dev, &offload_opt); 1149 err = htb_offload(dev, &offload_opt); 1141 if (err) 1150 if (err) 1142 return err; 1151 return err; 1143 1152 1144 /* Defer this assignment, so that htb 1153 /* Defer this assignment, so that htb_destroy skips offload-related 1145 * parts (especially calling ndo_setu 1154 * parts (especially calling ndo_setup_tc) on errors. 1146 */ 1155 */ 1147 q->offload = true; 1156 q->offload = true; 1148 1157 1149 return 0; 1158 return 0; 1150 } 1159 } 1151 1160 1152 static void htb_attach_offload(struct Qdisc * 1161 static void htb_attach_offload(struct Qdisc *sch) 1153 { 1162 { 1154 struct net_device *dev = qdisc_dev(sc 1163 struct net_device *dev = qdisc_dev(sch); 1155 struct htb_sched *q = qdisc_priv(sch) 1164 struct htb_sched *q = qdisc_priv(sch); 1156 unsigned int ntx; 1165 unsigned int ntx; 1157 1166 1158 for (ntx = 0; ntx < q->num_direct_qdi 1167 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { 1159 struct Qdisc *old, *qdisc = q 1168 struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx]; 1160 1169 1161 old = dev_graft_qdisc(qdisc-> 1170 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); 1162 qdisc_put(old); 1171 qdisc_put(old); 1163 qdisc_hash_add(qdisc, false); 1172 qdisc_hash_add(qdisc, false); 1164 } 1173 } 1165 for (ntx = q->num_direct_qdiscs; ntx 1174 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) { 1166 struct netdev_queue *dev_queu 1175 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); 1167 struct Qdisc *old = dev_graft 1176 struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL); 1168 1177 1169 qdisc_put(old); 1178 qdisc_put(old); 1170 } 1179 } 1171 1180 1172 kfree(q->direct_qdiscs); 1181 kfree(q->direct_qdiscs); 1173 q->direct_qdiscs = NULL; 1182 q->direct_qdiscs = NULL; 1174 } 1183 } 1175 1184 1176 static void htb_attach_software(struct Qdisc 1185 static void htb_attach_software(struct Qdisc *sch) 1177 { 1186 { 1178 struct net_device *dev = qdisc_dev(sc 1187 struct net_device *dev = qdisc_dev(sch); 1179 unsigned int ntx; 1188 unsigned int ntx; 1180 1189 1181 /* Resemble qdisc_graft behavior. */ 1190 /* Resemble qdisc_graft behavior. */ 1182 for (ntx = 0; ntx < dev->num_tx_queue 1191 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 1183 struct netdev_queue *dev_queu 1192 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); 1184 struct Qdisc *old = dev_graft 1193 struct Qdisc *old = dev_graft_qdisc(dev_queue, sch); 1185 1194 1186 qdisc_refcount_inc(sch); 1195 qdisc_refcount_inc(sch); 1187 1196 1188 qdisc_put(old); 1197 qdisc_put(old); 1189 } 1198 } 1190 } 1199 } 1191 1200 1192 static void htb_attach(struct Qdisc *sch) 1201 static void htb_attach(struct Qdisc *sch) 1193 { 1202 { 1194 struct htb_sched *q = qdisc_priv(sch) 1203 struct htb_sched *q = qdisc_priv(sch); 1195 1204 1196 if (q->offload) 1205 if (q->offload) 1197 htb_attach_offload(sch); 1206 htb_attach_offload(sch); 1198 else 1207 else 1199 htb_attach_software(sch); 1208 htb_attach_software(sch); 1200 } 1209 } 1201 1210 1202 static int htb_dump(struct Qdisc *sch, struct 1211 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1203 { 1212 { 1204 struct htb_sched *q = qdisc_priv(sch) 1213 struct htb_sched *q = qdisc_priv(sch); 1205 struct nlattr *nest; 1214 struct nlattr *nest; 1206 struct tc_htb_glob gopt; 1215 struct tc_htb_glob gopt; 1207 1216 1208 if (q->offload) 1217 if (q->offload) 1209 sch->flags |= TCQ_F_OFFLOADED 1218 sch->flags |= TCQ_F_OFFLOADED; 1210 else 1219 else 1211 sch->flags &= ~TCQ_F_OFFLOADE 1220 sch->flags &= ~TCQ_F_OFFLOADED; 1212 1221 1213 sch->qstats.overlimits = q->overlimit 1222 sch->qstats.overlimits = q->overlimits; 1214 /* Its safe to not acquire qdisc lock 1223 /* Its safe to not acquire qdisc lock. As we hold RTNL, 1215 * no change can happen on the qdisc 1224 * no change can happen on the qdisc parameters. 1216 */ 1225 */ 1217 1226 1218 gopt.direct_pkts = q->direct_pkts; 1227 gopt.direct_pkts = q->direct_pkts; 1219 gopt.version = HTB_VER; 1228 gopt.version = HTB_VER; 1220 gopt.rate2quantum = q->rate2quantum; 1229 gopt.rate2quantum = q->rate2quantum; 1221 gopt.defcls = q->defcls; 1230 gopt.defcls = q->defcls; 1222 gopt.debug = 0; 1231 gopt.debug = 0; 1223 1232 1224 nest = nla_nest_start_noflag(skb, TCA 1233 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 1225 if (nest == NULL) 1234 if (nest == NULL) 1226 goto nla_put_failure; 1235 goto nla_put_failure; 1227 if (nla_put(skb, TCA_HTB_INIT, sizeof 1236 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) || 1228 nla_put_u32(skb, TCA_HTB_DIRECT_Q 1237 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) 1229 goto nla_put_failure; 1238 goto nla_put_failure; 1230 if (q->offload && nla_put_flag(skb, T 1239 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) 1231 goto nla_put_failure; 1240 goto nla_put_failure; 1232 1241 1233 return nla_nest_end(skb, nest); 1242 return nla_nest_end(skb, nest); 1234 1243 1235 nla_put_failure: 1244 nla_put_failure: 1236 nla_nest_cancel(skb, nest); 1245 nla_nest_cancel(skb, nest); 1237 return -1; 1246 return -1; 1238 } 1247 } 1239 1248 1240 static int htb_dump_class(struct Qdisc *sch, 1249 static int htb_dump_class(struct Qdisc *sch, unsigned long arg, 1241 struct sk_buff *skb 1250 struct sk_buff *skb, struct tcmsg *tcm) 1242 { 1251 { 1243 struct htb_class *cl = (struct htb_cl 1252 struct htb_class *cl = (struct htb_class *)arg; 1244 struct htb_sched *q = qdisc_priv(sch) 1253 struct htb_sched *q = qdisc_priv(sch); 1245 struct nlattr *nest; 1254 struct nlattr *nest; 1246 struct tc_htb_opt opt; 1255 struct tc_htb_opt opt; 1247 1256 1248 /* Its safe to not acquire qdisc lock 1257 /* Its safe to not acquire qdisc lock. As we hold RTNL, 1249 * no change can happen on the class 1258 * no change can happen on the class parameters. 1250 */ 1259 */ 1251 tcm->tcm_parent = cl->parent ? cl->pa 1260 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; 1252 tcm->tcm_handle = cl->common.classid; 1261 tcm->tcm_handle = cl->common.classid; 1253 if (!cl->level && cl->leaf.q) 1262 if (!cl->level && cl->leaf.q) 1254 tcm->tcm_info = cl->leaf.q->h 1263 tcm->tcm_info = cl->leaf.q->handle; 1255 1264 1256 nest = nla_nest_start_noflag(skb, TCA 1265 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 1257 if (nest == NULL) 1266 if (nest == NULL) 1258 goto nla_put_failure; 1267 goto nla_put_failure; 1259 1268 1260 memset(&opt, 0, sizeof(opt)); 1269 memset(&opt, 0, sizeof(opt)); 1261 1270 1262 psched_ratecfg_getrate(&opt.rate, &cl 1271 psched_ratecfg_getrate(&opt.rate, &cl->rate); 1263 opt.buffer = PSCHED_NS2TICKS(cl->buff 1272 opt.buffer = PSCHED_NS2TICKS(cl->buffer); 1264 psched_ratecfg_getrate(&opt.ceil, &cl 1273 psched_ratecfg_getrate(&opt.ceil, &cl->ceil); 1265 opt.cbuffer = PSCHED_NS2TICKS(cl->cbu 1274 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); 1266 opt.quantum = cl->quantum; 1275 opt.quantum = cl->quantum; 1267 opt.prio = cl->prio; 1276 opt.prio = cl->prio; 1268 opt.level = cl->level; 1277 opt.level = cl->level; 1269 if (nla_put(skb, TCA_HTB_PARMS, sizeo 1278 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt)) 1270 goto nla_put_failure; 1279 goto nla_put_failure; 1271 if (q->offload && nla_put_flag(skb, T 1280 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) 1272 goto nla_put_failure; 1281 goto nla_put_failure; 1273 if ((cl->rate.rate_bytes_ps >= (1ULL 1282 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && 1274 nla_put_u64_64bit(skb, TCA_HTB_RA 1283 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps, 1275 TCA_HTB_PAD)) 1284 TCA_HTB_PAD)) 1276 goto nla_put_failure; 1285 goto nla_put_failure; 1277 if ((cl->ceil.rate_bytes_ps >= (1ULL 1286 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && 1278 nla_put_u64_64bit(skb, TCA_HTB_CE 1287 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps, 1279 TCA_HTB_PAD)) 1288 TCA_HTB_PAD)) 1280 goto nla_put_failure; 1289 goto nla_put_failure; 1281 1290 1282 return nla_nest_end(skb, nest); 1291 return nla_nest_end(skb, nest); 1283 1292 1284 nla_put_failure: 1293 nla_put_failure: 1285 nla_nest_cancel(skb, nest); 1294 nla_nest_cancel(skb, nest); 1286 return -1; 1295 return -1; 1287 } 1296 } 1288 1297 1289 static void htb_offload_aggregate_stats(struc 1298 static void htb_offload_aggregate_stats(struct htb_sched *q, 1290 struc 1299 struct htb_class *cl) 1291 { 1300 { 1292 u64 bytes = 0, packets = 0; 1301 u64 bytes = 0, packets = 0; 1293 struct htb_class *c; 1302 struct htb_class *c; 1294 unsigned int i; 1303 unsigned int i; 1295 1304 1296 gnet_stats_basic_sync_init(&cl->bstat 1305 gnet_stats_basic_sync_init(&cl->bstats); 1297 1306 1298 for (i = 0; i < q->clhash.hashsize; i 1307 for (i = 0; i < q->clhash.hashsize; i++) { 1299 hlist_for_each_entry(c, &q->c 1308 hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) { 1300 struct htb_class *p = 1309 struct htb_class *p = c; 1301 1310 1302 while (p && p->level 1311 while (p && p->level < cl->level) 1303 p = p->parent 1312 p = p->parent; 1304 1313 1305 if (p != cl) 1314 if (p != cl) 1306 continue; 1315 continue; 1307 1316 1308 bytes += u64_stats_re 1317 bytes += u64_stats_read(&c->bstats_bias.bytes); 1309 packets += u64_stats_ 1318 packets += u64_stats_read(&c->bstats_bias.packets); 1310 if (c->level == 0) { 1319 if (c->level == 0) { 1311 bytes += u64_ 1320 bytes += u64_stats_read(&c->leaf.q->bstats.bytes); 1312 packets += u6 1321 packets += u64_stats_read(&c->leaf.q->bstats.packets); 1313 } 1322 } 1314 } 1323 } 1315 } 1324 } 1316 _bstats_update(&cl->bstats, bytes, pa 1325 _bstats_update(&cl->bstats, bytes, packets); 1317 } 1326 } 1318 1327 1319 static int 1328 static int 1320 htb_dump_class_stats(struct Qdisc *sch, unsig 1329 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) 1321 { 1330 { 1322 struct htb_class *cl = (struct htb_cl 1331 struct htb_class *cl = (struct htb_class *)arg; 1323 struct htb_sched *q = qdisc_priv(sch) 1332 struct htb_sched *q = qdisc_priv(sch); 1324 struct gnet_stats_queue qs = { 1333 struct gnet_stats_queue qs = { 1325 .drops = cl->drops, 1334 .drops = cl->drops, 1326 .overlimits = cl->overlimits, 1335 .overlimits = cl->overlimits, 1327 }; 1336 }; 1328 __u32 qlen = 0; 1337 __u32 qlen = 0; 1329 1338 1330 if (!cl->level && cl->leaf.q) 1339 if (!cl->level && cl->leaf.q) 1331 qdisc_qstats_qlen_backlog(cl- 1340 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); 1332 1341 1333 cl->xstats.tokens = clamp_t(s64, PSCH 1342 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), 1334 INT_MIN, 1343 INT_MIN, INT_MAX); 1335 cl->xstats.ctokens = clamp_t(s64, PSC 1344 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), 1336 INT_MIN, 1345 INT_MIN, INT_MAX); 1337 1346 1338 if (q->offload) { 1347 if (q->offload) { 1339 if (!cl->level) { 1348 if (!cl->level) { 1340 if (cl->leaf.q) 1349 if (cl->leaf.q) 1341 cl->bstats = 1350 cl->bstats = cl->leaf.q->bstats; 1342 else 1351 else 1343 gnet_stats_ba 1352 gnet_stats_basic_sync_init(&cl->bstats); 1344 _bstats_update(&cl->b 1353 _bstats_update(&cl->bstats, 1345 u64_st 1354 u64_stats_read(&cl->bstats_bias.bytes), 1346 u64_st 1355 u64_stats_read(&cl->bstats_bias.packets)); 1347 } else { 1356 } else { 1348 htb_offload_aggregate 1357 htb_offload_aggregate_stats(q, cl); 1349 } 1358 } 1350 } 1359 } 1351 1360 1352 if (gnet_stats_copy_basic(d, NULL, &c 1361 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || 1353 gnet_stats_copy_rate_est(d, &cl-> 1362 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1354 gnet_stats_copy_queue(d, NULL, &q 1363 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0) 1355 return -1; 1364 return -1; 1356 1365 1357 return gnet_stats_copy_app(d, &cl->xs 1366 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); 1358 } 1367 } 1359 1368 1360 static struct netdev_queue * 1369 static struct netdev_queue * 1361 htb_select_queue(struct Qdisc *sch, struct tc 1370 htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm) 1362 { 1371 { 1363 struct net_device *dev = qdisc_dev(sc 1372 struct net_device *dev = qdisc_dev(sch); 1364 struct tc_htb_qopt_offload offload_op 1373 struct tc_htb_qopt_offload offload_opt; 1365 struct htb_sched *q = qdisc_priv(sch) 1374 struct htb_sched *q = qdisc_priv(sch); 1366 int err; 1375 int err; 1367 1376 1368 if (!q->offload) 1377 if (!q->offload) 1369 return sch->dev_queue; 1378 return sch->dev_queue; 1370 1379 1371 offload_opt = (struct tc_htb_qopt_off 1380 offload_opt = (struct tc_htb_qopt_offload) { 1372 .command = TC_HTB_LEAF_QUERY_ 1381 .command = TC_HTB_LEAF_QUERY_QUEUE, 1373 .classid = TC_H_MIN(tcm->tcm_ 1382 .classid = TC_H_MIN(tcm->tcm_parent), 1374 }; 1383 }; 1375 err = htb_offload(dev, &offload_opt); 1384 err = htb_offload(dev, &offload_opt); 1376 if (err || offload_opt.qid >= dev->nu 1385 if (err || offload_opt.qid >= dev->num_tx_queues) 1377 return NULL; 1386 return NULL; 1378 return netdev_get_tx_queue(dev, offlo 1387 return netdev_get_tx_queue(dev, offload_opt.qid); 1379 } 1388 } 1380 1389 1381 static struct Qdisc * 1390 static struct Qdisc * 1382 htb_graft_helper(struct netdev_queue *dev_que 1391 htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q) 1383 { 1392 { 1384 struct net_device *dev = dev_queue->d 1393 struct net_device *dev = dev_queue->dev; 1385 struct Qdisc *old_q; 1394 struct Qdisc *old_q; 1386 1395 1387 if (dev->flags & IFF_UP) 1396 if (dev->flags & IFF_UP) 1388 dev_deactivate(dev); 1397 dev_deactivate(dev); 1389 old_q = dev_graft_qdisc(dev_queue, ne 1398 old_q = dev_graft_qdisc(dev_queue, new_q); 1390 if (new_q) 1399 if (new_q) 1391 new_q->flags |= TCQ_F_ONETXQU 1400 new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1392 if (dev->flags & IFF_UP) 1401 if (dev->flags & IFF_UP) 1393 dev_activate(dev); 1402 dev_activate(dev); 1394 1403 1395 return old_q; 1404 return old_q; 1396 } 1405 } 1397 1406 1398 static struct netdev_queue *htb_offload_get_q 1407 static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl) 1399 { 1408 { 1400 struct netdev_queue *queue; 1409 struct netdev_queue *queue; 1401 1410 1402 queue = cl->leaf.offload_queue; 1411 queue = cl->leaf.offload_queue; 1403 if (!(cl->leaf.q->flags & TCQ_F_BUILT 1412 if (!(cl->leaf.q->flags & TCQ_F_BUILTIN)) 1404 WARN_ON(cl->leaf.q->dev_queue 1413 WARN_ON(cl->leaf.q->dev_queue != queue); 1405 1414 1406 return queue; 1415 return queue; 1407 } 1416 } 1408 1417 1409 static void htb_offload_move_qdisc(struct Qdi 1418 static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old, 1410 struct htb 1419 struct htb_class *cl_new, bool destroying) 1411 { 1420 { 1412 struct netdev_queue *queue_old, *queu 1421 struct netdev_queue *queue_old, *queue_new; 1413 struct net_device *dev = qdisc_dev(sc 1422 struct net_device *dev = qdisc_dev(sch); 1414 1423 1415 queue_old = htb_offload_get_queue(cl_ 1424 queue_old = htb_offload_get_queue(cl_old); 1416 queue_new = htb_offload_get_queue(cl_ 1425 queue_new = htb_offload_get_queue(cl_new); 1417 1426 1418 if (!destroying) { 1427 if (!destroying) { 1419 struct Qdisc *qdisc; 1428 struct Qdisc *qdisc; 1420 1429 1421 if (dev->flags & IFF_UP) 1430 if (dev->flags & IFF_UP) 1422 dev_deactivate(dev); 1431 dev_deactivate(dev); 1423 qdisc = dev_graft_qdisc(queue 1432 qdisc = dev_graft_qdisc(queue_old, NULL); 1424 WARN_ON(qdisc != cl_old->leaf 1433 WARN_ON(qdisc != cl_old->leaf.q); 1425 } 1434 } 1426 1435 1427 if (!(cl_old->leaf.q->flags & TCQ_F_B 1436 if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN)) 1428 cl_old->leaf.q->dev_queue = q 1437 cl_old->leaf.q->dev_queue = queue_new; 1429 cl_old->leaf.offload_queue = queue_ne 1438 cl_old->leaf.offload_queue = queue_new; 1430 1439 1431 if (!destroying) { 1440 if (!destroying) { 1432 struct Qdisc *qdisc; 1441 struct Qdisc *qdisc; 1433 1442 1434 qdisc = dev_graft_qdisc(queue 1443 qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q); 1435 if (dev->flags & IFF_UP) 1444 if (dev->flags & IFF_UP) 1436 dev_activate(dev); 1445 dev_activate(dev); 1437 WARN_ON(!(qdisc->flags & TCQ_ 1446 WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN)); 1438 } 1447 } 1439 } 1448 } 1440 1449 1441 static int htb_graft(struct Qdisc *sch, unsig 1450 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1442 struct Qdisc **old, stru 1451 struct Qdisc **old, struct netlink_ext_ack *extack) 1443 { 1452 { 1444 struct netdev_queue *dev_queue = sch- 1453 struct netdev_queue *dev_queue = sch->dev_queue; 1445 struct htb_class *cl = (struct htb_cl 1454 struct htb_class *cl = (struct htb_class *)arg; 1446 struct htb_sched *q = qdisc_priv(sch) 1455 struct htb_sched *q = qdisc_priv(sch); 1447 struct Qdisc *old_q; 1456 struct Qdisc *old_q; 1448 1457 1449 if (cl->level) 1458 if (cl->level) 1450 return -EINVAL; 1459 return -EINVAL; 1451 1460 1452 if (q->offload) 1461 if (q->offload) 1453 dev_queue = htb_offload_get_q 1462 dev_queue = htb_offload_get_queue(cl); 1454 1463 1455 if (!new) { 1464 if (!new) { 1456 new = qdisc_create_dflt(dev_q 1465 new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, 1457 cl->c 1466 cl->common.classid, extack); 1458 if (!new) 1467 if (!new) 1459 return -ENOBUFS; 1468 return -ENOBUFS; 1460 } 1469 } 1461 1470 1462 if (q->offload) { 1471 if (q->offload) { >> 1472 htb_set_lockdep_class_child(new); 1463 /* One ref for cl->leaf.q, th 1473 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ 1464 qdisc_refcount_inc(new); 1474 qdisc_refcount_inc(new); 1465 old_q = htb_graft_helper(dev_ 1475 old_q = htb_graft_helper(dev_queue, new); 1466 } 1476 } 1467 1477 1468 *old = qdisc_replace(sch, new, &cl->l 1478 *old = qdisc_replace(sch, new, &cl->leaf.q); 1469 1479 1470 if (q->offload) { 1480 if (q->offload) { 1471 WARN_ON(old_q != *old); 1481 WARN_ON(old_q != *old); 1472 qdisc_put(old_q); 1482 qdisc_put(old_q); 1473 } 1483 } 1474 1484 1475 return 0; 1485 return 0; 1476 } 1486 } 1477 1487 1478 static struct Qdisc *htb_leaf(struct Qdisc *s 1488 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) 1479 { 1489 { 1480 struct htb_class *cl = (struct htb_cl 1490 struct htb_class *cl = (struct htb_class *)arg; 1481 return !cl->level ? cl->leaf.q : NULL 1491 return !cl->level ? cl->leaf.q : NULL; 1482 } 1492 } 1483 1493 1484 static void htb_qlen_notify(struct Qdisc *sch 1494 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) 1485 { 1495 { 1486 struct htb_class *cl = (struct htb_cl 1496 struct htb_class *cl = (struct htb_class *)arg; 1487 1497 1488 htb_deactivate(qdisc_priv(sch), cl); 1498 htb_deactivate(qdisc_priv(sch), cl); 1489 } 1499 } 1490 1500 1491 static inline int htb_parent_last_child(struc 1501 static inline int htb_parent_last_child(struct htb_class *cl) 1492 { 1502 { 1493 if (!cl->parent) 1503 if (!cl->parent) 1494 /* the root class */ 1504 /* the root class */ 1495 return 0; 1505 return 0; 1496 if (cl->parent->children > 1) 1506 if (cl->parent->children > 1) 1497 /* not the last child */ 1507 /* not the last child */ 1498 return 0; 1508 return 0; 1499 return 1; 1509 return 1; 1500 } 1510 } 1501 1511 1502 static void htb_parent_to_leaf(struct Qdisc * 1512 static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl, 1503 struct Qdisc * 1513 struct Qdisc *new_q) 1504 { 1514 { 1505 struct htb_sched *q = qdisc_priv(sch) 1515 struct htb_sched *q = qdisc_priv(sch); 1506 struct htb_class *parent = cl->parent 1516 struct htb_class *parent = cl->parent; 1507 1517 1508 WARN_ON(cl->level || !cl->leaf.q || c 1518 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity); 1509 1519 1510 if (parent->cmode != HTB_CAN_SEND) 1520 if (parent->cmode != HTB_CAN_SEND) 1511 htb_safe_rb_erase(&parent->pq 1521 htb_safe_rb_erase(&parent->pq_node, 1512 &q->hlevel[ 1522 &q->hlevel[parent->level].wait_pq); 1513 1523 1514 parent->level = 0; 1524 parent->level = 0; 1515 memset(&parent->inner, 0, sizeof(pare 1525 memset(&parent->inner, 0, sizeof(parent->inner)); 1516 parent->leaf.q = new_q ? new_q : &noo 1526 parent->leaf.q = new_q ? new_q : &noop_qdisc; 1517 parent->tokens = parent->buffer; 1527 parent->tokens = parent->buffer; 1518 parent->ctokens = parent->cbuffer; 1528 parent->ctokens = parent->cbuffer; 1519 parent->t_c = ktime_get_ns(); 1529 parent->t_c = ktime_get_ns(); 1520 parent->cmode = HTB_CAN_SEND; 1530 parent->cmode = HTB_CAN_SEND; 1521 if (q->offload) 1531 if (q->offload) 1522 parent->leaf.offload_queue = 1532 parent->leaf.offload_queue = cl->leaf.offload_queue; 1523 } 1533 } 1524 1534 1525 static void htb_parent_to_leaf_offload(struct 1535 static void htb_parent_to_leaf_offload(struct Qdisc *sch, 1526 struct 1536 struct netdev_queue *dev_queue, 1527 struct 1537 struct Qdisc *new_q) 1528 { 1538 { 1529 struct Qdisc *old_q; 1539 struct Qdisc *old_q; 1530 1540 1531 /* One ref for cl->leaf.q, the other 1541 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ 1532 if (new_q) 1542 if (new_q) 1533 qdisc_refcount_inc(new_q); 1543 qdisc_refcount_inc(new_q); 1534 old_q = htb_graft_helper(dev_queue, n 1544 old_q = htb_graft_helper(dev_queue, new_q); 1535 WARN_ON(!(old_q->flags & TCQ_F_BUILTI 1545 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); 1536 } 1546 } 1537 1547 1538 static int htb_destroy_class_offload(struct Q 1548 static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl, 1539 bool las 1549 bool last_child, bool destroying, 1540 struct n 1550 struct netlink_ext_ack *extack) 1541 { 1551 { 1542 struct tc_htb_qopt_offload offload_op 1552 struct tc_htb_qopt_offload offload_opt; 1543 struct netdev_queue *dev_queue; 1553 struct netdev_queue *dev_queue; 1544 struct Qdisc *q = cl->leaf.q; 1554 struct Qdisc *q = cl->leaf.q; 1545 struct Qdisc *old; 1555 struct Qdisc *old; 1546 int err; 1556 int err; 1547 1557 1548 if (cl->level) 1558 if (cl->level) 1549 return -EINVAL; 1559 return -EINVAL; 1550 1560 1551 WARN_ON(!q); 1561 WARN_ON(!q); 1552 dev_queue = htb_offload_get_queue(cl) 1562 dev_queue = htb_offload_get_queue(cl); 1553 /* When destroying, caller qdisc_graf 1563 /* When destroying, caller qdisc_graft grafts the new qdisc and invokes 1554 * qdisc_put for the qdisc being dest 1564 * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload 1555 * does not need to graft or qdisc_pu 1565 * does not need to graft or qdisc_put the qdisc being destroyed. 1556 */ 1566 */ 1557 if (!destroying) { 1567 if (!destroying) { 1558 old = htb_graft_helper(dev_qu 1568 old = htb_graft_helper(dev_queue, NULL); 1559 /* Last qdisc grafted should 1569 /* Last qdisc grafted should be the same as cl->leaf.q when 1560 * calling htb_delete. 1570 * calling htb_delete. 1561 */ 1571 */ 1562 WARN_ON(old != q); 1572 WARN_ON(old != q); 1563 } 1573 } 1564 1574 1565 if (cl->parent) { 1575 if (cl->parent) { 1566 _bstats_update(&cl->parent->b 1576 _bstats_update(&cl->parent->bstats_bias, 1567 u64_stats_read 1577 u64_stats_read(&q->bstats.bytes), 1568 u64_stats_read 1578 u64_stats_read(&q->bstats.packets)); 1569 } 1579 } 1570 1580 1571 offload_opt = (struct tc_htb_qopt_off 1581 offload_opt = (struct tc_htb_qopt_offload) { 1572 .command = !last_child ? TC_H 1582 .command = !last_child ? TC_HTB_LEAF_DEL : 1573 destroying ? TC_HT 1583 destroying ? TC_HTB_LEAF_DEL_LAST_FORCE : 1574 TC_HTB_LEAF_DEL_LA 1584 TC_HTB_LEAF_DEL_LAST, 1575 .classid = cl->common.classid 1585 .classid = cl->common.classid, 1576 .extack = extack, 1586 .extack = extack, 1577 }; 1587 }; 1578 err = htb_offload(qdisc_dev(sch), &of 1588 err = htb_offload(qdisc_dev(sch), &offload_opt); 1579 1589 1580 if (!destroying) { 1590 if (!destroying) { 1581 if (!err) 1591 if (!err) 1582 qdisc_put(old); 1592 qdisc_put(old); 1583 else 1593 else 1584 htb_graft_helper(dev_ 1594 htb_graft_helper(dev_queue, old); 1585 } 1595 } 1586 1596 1587 if (last_child) 1597 if (last_child) 1588 return err; 1598 return err; 1589 1599 1590 if (!err && offload_opt.classid != TC 1600 if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) { 1591 u32 classid = TC_H_MAJ(sch->h 1601 u32 classid = TC_H_MAJ(sch->handle) | 1592 TC_H_MIN(offloa 1602 TC_H_MIN(offload_opt.classid); 1593 struct htb_class *moved_cl = 1603 struct htb_class *moved_cl = htb_find(classid, sch); 1594 1604 1595 htb_offload_move_qdisc(sch, m 1605 htb_offload_move_qdisc(sch, moved_cl, cl, destroying); 1596 } 1606 } 1597 1607 1598 return err; 1608 return err; 1599 } 1609 } 1600 1610 1601 static void htb_destroy_class(struct Qdisc *s 1611 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) 1602 { 1612 { 1603 if (!cl->level) { 1613 if (!cl->level) { 1604 WARN_ON(!cl->leaf.q); 1614 WARN_ON(!cl->leaf.q); 1605 qdisc_put(cl->leaf.q); 1615 qdisc_put(cl->leaf.q); 1606 } 1616 } 1607 gen_kill_estimator(&cl->rate_est); 1617 gen_kill_estimator(&cl->rate_est); 1608 tcf_block_put(cl->block); 1618 tcf_block_put(cl->block); 1609 kfree(cl); 1619 kfree(cl); 1610 } 1620 } 1611 1621 1612 static void htb_destroy(struct Qdisc *sch) 1622 static void htb_destroy(struct Qdisc *sch) 1613 { 1623 { 1614 struct net_device *dev = qdisc_dev(sc 1624 struct net_device *dev = qdisc_dev(sch); 1615 struct tc_htb_qopt_offload offload_op 1625 struct tc_htb_qopt_offload offload_opt; 1616 struct htb_sched *q = qdisc_priv(sch) 1626 struct htb_sched *q = qdisc_priv(sch); 1617 struct hlist_node *next; 1627 struct hlist_node *next; 1618 bool nonempty, changed; 1628 bool nonempty, changed; 1619 struct htb_class *cl; 1629 struct htb_class *cl; 1620 unsigned int i; 1630 unsigned int i; 1621 1631 1622 cancel_work_sync(&q->work); 1632 cancel_work_sync(&q->work); 1623 qdisc_watchdog_cancel(&q->watchdog); 1633 qdisc_watchdog_cancel(&q->watchdog); 1624 /* This line used to be after htb_des 1634 /* This line used to be after htb_destroy_class call below 1625 * and surprisingly it worked in 2.4. 1635 * and surprisingly it worked in 2.4. But it must precede it 1626 * because filter need its target cla 1636 * because filter need its target class alive to be able to call 1627 * unbind_filter on it (without Oops) 1637 * unbind_filter on it (without Oops). 1628 */ 1638 */ 1629 tcf_block_put(q->block); 1639 tcf_block_put(q->block); 1630 1640 1631 for (i = 0; i < q->clhash.hashsize; i 1641 for (i = 0; i < q->clhash.hashsize; i++) { 1632 hlist_for_each_entry(cl, &q-> 1642 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 1633 tcf_block_put(cl->blo 1643 tcf_block_put(cl->block); 1634 cl->block = NULL; 1644 cl->block = NULL; 1635 } 1645 } 1636 } 1646 } 1637 1647 1638 do { 1648 do { 1639 nonempty = false; 1649 nonempty = false; 1640 changed = false; 1650 changed = false; 1641 for (i = 0; i < q->clhash.has 1651 for (i = 0; i < q->clhash.hashsize; i++) { 1642 hlist_for_each_entry_ 1652 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], 1643 1653 common.hnode) { 1644 bool last_chi 1654 bool last_child; 1645 1655 1646 if (!q->offlo 1656 if (!q->offload) { 1647 htb_d 1657 htb_destroy_class(sch, cl); 1648 conti 1658 continue; 1649 } 1659 } 1650 1660 1651 nonempty = tr 1661 nonempty = true; 1652 1662 1653 if (cl->level 1663 if (cl->level) 1654 conti 1664 continue; 1655 1665 1656 changed = tru 1666 changed = true; 1657 1667 1658 last_child = 1668 last_child = htb_parent_last_child(cl); 1659 htb_destroy_c 1669 htb_destroy_class_offload(sch, cl, last_child, 1660 1670 true, NULL); 1661 qdisc_class_h 1671 qdisc_class_hash_remove(&q->clhash, 1662 1672 &cl->common); 1663 if (cl->paren 1673 if (cl->parent) 1664 cl->p 1674 cl->parent->children--; 1665 if (last_chil 1675 if (last_child) 1666 htb_p 1676 htb_parent_to_leaf(sch, cl, NULL); 1667 htb_destroy_c 1677 htb_destroy_class(sch, cl); 1668 } 1678 } 1669 } 1679 } 1670 } while (changed); 1680 } while (changed); 1671 WARN_ON(nonempty); 1681 WARN_ON(nonempty); 1672 1682 1673 qdisc_class_hash_destroy(&q->clhash); 1683 qdisc_class_hash_destroy(&q->clhash); 1674 __qdisc_reset_queue(&q->direct_queue) 1684 __qdisc_reset_queue(&q->direct_queue); 1675 1685 1676 if (q->offload) { 1686 if (q->offload) { 1677 offload_opt = (struct tc_htb_ 1687 offload_opt = (struct tc_htb_qopt_offload) { 1678 .command = TC_HTB_DES 1688 .command = TC_HTB_DESTROY, 1679 }; 1689 }; 1680 htb_offload(dev, &offload_opt 1690 htb_offload(dev, &offload_opt); 1681 } 1691 } 1682 1692 1683 if (!q->direct_qdiscs) 1693 if (!q->direct_qdiscs) 1684 return; 1694 return; 1685 for (i = 0; i < q->num_direct_qdiscs 1695 for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++) 1686 qdisc_put(q->direct_qdiscs[i] 1696 qdisc_put(q->direct_qdiscs[i]); 1687 kfree(q->direct_qdiscs); 1697 kfree(q->direct_qdiscs); 1688 } 1698 } 1689 1699 1690 static int htb_delete(struct Qdisc *sch, unsi 1700 static int htb_delete(struct Qdisc *sch, unsigned long arg, 1691 struct netlink_ext_ack 1701 struct netlink_ext_ack *extack) 1692 { 1702 { 1693 struct htb_sched *q = qdisc_priv(sch) 1703 struct htb_sched *q = qdisc_priv(sch); 1694 struct htb_class *cl = (struct htb_cl 1704 struct htb_class *cl = (struct htb_class *)arg; 1695 struct Qdisc *new_q = NULL; 1705 struct Qdisc *new_q = NULL; 1696 int last_child = 0; 1706 int last_child = 0; 1697 int err; 1707 int err; 1698 1708 1699 /* TODO: why don't allow to delete su 1709 /* TODO: why don't allow to delete subtree ? references ? does 1700 * tc subsys guarantee us that in htb 1710 * tc subsys guarantee us that in htb_destroy it holds no class 1701 * refs so that we can remove childre 1711 * refs so that we can remove children safely there ? 1702 */ 1712 */ 1703 if (cl->children || qdisc_class_in_us !! 1713 if (cl->children || cl->filter_cnt) 1704 NL_SET_ERR_MSG(extack, "HTB c << 1705 return -EBUSY; 1714 return -EBUSY; 1706 } << 1707 1715 1708 if (!cl->level && htb_parent_last_chi 1716 if (!cl->level && htb_parent_last_child(cl)) 1709 last_child = 1; 1717 last_child = 1; 1710 1718 1711 if (q->offload) { 1719 if (q->offload) { 1712 err = htb_destroy_class_offlo 1720 err = htb_destroy_class_offload(sch, cl, last_child, false, 1713 1721 extack); 1714 if (err) 1722 if (err) 1715 return err; 1723 return err; 1716 } 1724 } 1717 1725 1718 if (last_child) { 1726 if (last_child) { 1719 struct netdev_queue *dev_queu 1727 struct netdev_queue *dev_queue = sch->dev_queue; 1720 1728 1721 if (q->offload) 1729 if (q->offload) 1722 dev_queue = htb_offlo 1730 dev_queue = htb_offload_get_queue(cl); 1723 1731 1724 new_q = qdisc_create_dflt(dev 1732 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, 1725 cl- 1733 cl->parent->common.classid, 1726 NUL 1734 NULL); 1727 if (q->offload) !! 1735 if (q->offload) { >> 1736 if (new_q) >> 1737 htb_set_lockdep_class_child(new_q); 1728 htb_parent_to_leaf_of 1738 htb_parent_to_leaf_offload(sch, dev_queue, new_q); >> 1739 } 1729 } 1740 } 1730 1741 1731 sch_tree_lock(sch); 1742 sch_tree_lock(sch); 1732 1743 1733 if (!cl->level) 1744 if (!cl->level) 1734 qdisc_purge_queue(cl->leaf.q) 1745 qdisc_purge_queue(cl->leaf.q); 1735 1746 1736 /* delete from hash and active; remai 1747 /* delete from hash and active; remainder in destroy_class */ 1737 qdisc_class_hash_remove(&q->clhash, & 1748 qdisc_class_hash_remove(&q->clhash, &cl->common); 1738 if (cl->parent) 1749 if (cl->parent) 1739 cl->parent->children--; 1750 cl->parent->children--; 1740 1751 1741 if (cl->prio_activity) 1752 if (cl->prio_activity) 1742 htb_deactivate(q, cl); 1753 htb_deactivate(q, cl); 1743 1754 1744 if (cl->cmode != HTB_CAN_SEND) 1755 if (cl->cmode != HTB_CAN_SEND) 1745 htb_safe_rb_erase(&cl->pq_nod 1756 htb_safe_rb_erase(&cl->pq_node, 1746 &q->hlevel[ 1757 &q->hlevel[cl->level].wait_pq); 1747 1758 1748 if (last_child) 1759 if (last_child) 1749 htb_parent_to_leaf(sch, cl, n 1760 htb_parent_to_leaf(sch, cl, new_q); 1750 1761 1751 sch_tree_unlock(sch); 1762 sch_tree_unlock(sch); 1752 1763 1753 htb_destroy_class(sch, cl); 1764 htb_destroy_class(sch, cl); 1754 return 0; 1765 return 0; 1755 } 1766 } 1756 1767 1757 static int htb_change_class(struct Qdisc *sch 1768 static int htb_change_class(struct Qdisc *sch, u32 classid, 1758 u32 parentid, str 1769 u32 parentid, struct nlattr **tca, 1759 unsigned long *ar 1770 unsigned long *arg, struct netlink_ext_ack *extack) 1760 { 1771 { 1761 int err = -EINVAL; 1772 int err = -EINVAL; 1762 struct htb_sched *q = qdisc_priv(sch) 1773 struct htb_sched *q = qdisc_priv(sch); 1763 struct htb_class *cl = (struct htb_cl 1774 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1764 struct tc_htb_qopt_offload offload_op 1775 struct tc_htb_qopt_offload offload_opt; 1765 struct nlattr *opt = tca[TCA_OPTIONS] 1776 struct nlattr *opt = tca[TCA_OPTIONS]; 1766 struct nlattr *tb[TCA_HTB_MAX + 1]; 1777 struct nlattr *tb[TCA_HTB_MAX + 1]; 1767 struct Qdisc *parent_qdisc = NULL; 1778 struct Qdisc *parent_qdisc = NULL; 1768 struct netdev_queue *dev_queue; 1779 struct netdev_queue *dev_queue; 1769 struct tc_htb_opt *hopt; 1780 struct tc_htb_opt *hopt; 1770 u64 rate64, ceil64; 1781 u64 rate64, ceil64; 1771 int warn = 0; 1782 int warn = 0; 1772 1783 1773 /* extract all subattrs from opt attr 1784 /* extract all subattrs from opt attr */ 1774 if (!opt) 1785 if (!opt) 1775 goto failure; 1786 goto failure; 1776 1787 1777 err = nla_parse_nested_deprecated(tb, 1788 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy, 1778 ext 1789 extack); 1779 if (err < 0) 1790 if (err < 0) 1780 goto failure; 1791 goto failure; 1781 1792 1782 err = -EINVAL; 1793 err = -EINVAL; 1783 if (tb[TCA_HTB_PARMS] == NULL) 1794 if (tb[TCA_HTB_PARMS] == NULL) 1784 goto failure; 1795 goto failure; 1785 1796 1786 parent = parentid == TC_H_ROOT ? NULL 1797 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); 1787 1798 1788 hopt = nla_data(tb[TCA_HTB_PARMS]); 1799 hopt = nla_data(tb[TCA_HTB_PARMS]); 1789 if (!hopt->rate.rate || !hopt->ceil.r 1800 if (!hopt->rate.rate || !hopt->ceil.rate) 1790 goto failure; 1801 goto failure; 1791 1802 1792 if (q->offload) { 1803 if (q->offload) { 1793 /* Options not supported by t 1804 /* Options not supported by the offload. */ 1794 if (hopt->rate.overhead || ho 1805 if (hopt->rate.overhead || hopt->ceil.overhead) { 1795 NL_SET_ERR_MSG(extack 1806 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the overhead parameter"); 1796 goto failure; 1807 goto failure; 1797 } 1808 } 1798 if (hopt->rate.mpu || hopt->c 1809 if (hopt->rate.mpu || hopt->ceil.mpu) { 1799 NL_SET_ERR_MSG(extack 1810 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the mpu parameter"); 1800 goto failure; 1811 goto failure; 1801 } 1812 } >> 1813 if (hopt->quantum) { >> 1814 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the quantum parameter"); >> 1815 goto failure; >> 1816 } >> 1817 if (hopt->prio) { >> 1818 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the prio parameter"); >> 1819 goto failure; >> 1820 } 1802 } 1821 } 1803 1822 1804 /* Keeping backward compatible with r 1823 /* Keeping backward compatible with rate_table based iproute2 tc */ 1805 if (hopt->rate.linklayer == TC_LINKLA 1824 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) 1806 qdisc_put_rtab(qdisc_get_rtab 1825 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB], 1807 1826 NULL)); 1808 1827 1809 if (hopt->ceil.linklayer == TC_LINKLA 1828 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) 1810 qdisc_put_rtab(qdisc_get_rtab 1829 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB], 1811 1830 NULL)); 1812 1831 1813 rate64 = tb[TCA_HTB_RATE64] ? nla_get 1832 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0; 1814 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get 1833 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0; 1815 1834 1816 if (!cl) { /* new class 1835 if (!cl) { /* new class */ 1817 struct net_device *dev = qdis 1836 struct net_device *dev = qdisc_dev(sch); 1818 struct Qdisc *new_q, *old_q; 1837 struct Qdisc *new_q, *old_q; 1819 int prio; 1838 int prio; 1820 struct { 1839 struct { 1821 struct nlattr 1840 struct nlattr nla; 1822 struct gnet_estimator 1841 struct gnet_estimator opt; 1823 } est = { 1842 } est = { 1824 .nla = { 1843 .nla = { 1825 .nla_len 1844 .nla_len = nla_attr_size(sizeof(est.opt)), 1826 .nla_type 1845 .nla_type = TCA_RATE, 1827 }, 1846 }, 1828 .opt = { 1847 .opt = { 1829 /* 4s interva 1848 /* 4s interval, 16s averaging constant */ 1830 .interval 1849 .interval = 2, 1831 .ewma_log 1850 .ewma_log = 2, 1832 }, 1851 }, 1833 }; 1852 }; 1834 1853 1835 /* check for valid classid */ 1854 /* check for valid classid */ 1836 if (!classid || TC_H_MAJ(clas 1855 if (!classid || TC_H_MAJ(classid ^ sch->handle) || 1837 htb_find(classid, sch)) 1856 htb_find(classid, sch)) 1838 goto failure; 1857 goto failure; 1839 1858 1840 /* check maximal depth */ 1859 /* check maximal depth */ 1841 if (parent && parent->parent 1860 if (parent && parent->parent && parent->parent->level < 2) { 1842 NL_SET_ERR_MSG_MOD(ex 1861 NL_SET_ERR_MSG_MOD(extack, "tree is too deep"); 1843 goto failure; 1862 goto failure; 1844 } 1863 } 1845 err = -ENOBUFS; 1864 err = -ENOBUFS; 1846 cl = kzalloc(sizeof(*cl), GFP 1865 cl = kzalloc(sizeof(*cl), GFP_KERNEL); 1847 if (!cl) 1866 if (!cl) 1848 goto failure; 1867 goto failure; 1849 1868 1850 gnet_stats_basic_sync_init(&c 1869 gnet_stats_basic_sync_init(&cl->bstats); 1851 gnet_stats_basic_sync_init(&c 1870 gnet_stats_basic_sync_init(&cl->bstats_bias); 1852 1871 1853 err = tcf_block_get(&cl->bloc 1872 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); 1854 if (err) { 1873 if (err) { 1855 kfree(cl); 1874 kfree(cl); 1856 goto failure; 1875 goto failure; 1857 } 1876 } 1858 if (htb_rate_est || tca[TCA_R 1877 if (htb_rate_est || tca[TCA_RATE]) { 1859 err = gen_new_estimat 1878 err = gen_new_estimator(&cl->bstats, NULL, 1860 1879 &cl->rate_est, 1861 1880 NULL, 1862 1881 true, 1863 1882 tca[TCA_RATE] ? : &est.nla); 1864 if (err) 1883 if (err) 1865 goto err_bloc 1884 goto err_block_put; 1866 } 1885 } 1867 1886 1868 cl->children = 0; 1887 cl->children = 0; 1869 RB_CLEAR_NODE(&cl->pq_node); 1888 RB_CLEAR_NODE(&cl->pq_node); 1870 1889 1871 for (prio = 0; prio < TC_HTB_ 1890 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++) 1872 RB_CLEAR_NODE(&cl->no 1891 RB_CLEAR_NODE(&cl->node[prio]); 1873 1892 1874 cl->common.classid = classid; 1893 cl->common.classid = classid; 1875 1894 1876 /* Make sure nothing interrup 1895 /* Make sure nothing interrupts us in between of two 1877 * ndo_setup_tc calls. 1896 * ndo_setup_tc calls. 1878 */ 1897 */ 1879 ASSERT_RTNL(); 1898 ASSERT_RTNL(); 1880 1899 1881 /* create leaf qdisc early be 1900 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) 1882 * so that can't be used insi 1901 * so that can't be used inside of sch_tree_lock 1883 * -- thanks to Karlis Peisen 1902 * -- thanks to Karlis Peisenieks 1884 */ 1903 */ 1885 if (!q->offload) { 1904 if (!q->offload) { 1886 dev_queue = sch->dev_ 1905 dev_queue = sch->dev_queue; 1887 } else if (!(parent && !paren 1906 } else if (!(parent && !parent->level)) { 1888 /* Assign a dev_queue 1907 /* Assign a dev_queue to this classid. */ 1889 offload_opt = (struct 1908 offload_opt = (struct tc_htb_qopt_offload) { 1890 .command = TC 1909 .command = TC_HTB_LEAF_ALLOC_QUEUE, 1891 .classid = cl 1910 .classid = cl->common.classid, 1892 .parent_class 1911 .parent_classid = parent ? 1893 TC_H_ 1912 TC_H_MIN(parent->common.classid) : 1894 TC_HT 1913 TC_HTB_CLASSID_ROOT, 1895 .rate = max_t 1914 .rate = max_t(u64, hopt->rate.rate, rate64), 1896 .ceil = max_t 1915 .ceil = max_t(u64, hopt->ceil.rate, ceil64), 1897 .prio = hopt- << 1898 .quantum = ho << 1899 .extack = ext 1916 .extack = extack, 1900 }; 1917 }; 1901 err = htb_offload(dev 1918 err = htb_offload(dev, &offload_opt); 1902 if (err) { 1919 if (err) { 1903 NL_SET_ERR_MS 1920 NL_SET_ERR_MSG_WEAK(extack, 1904 1921 "Failed to offload TC_HTB_LEAF_ALLOC_QUEUE"); 1905 goto err_kill 1922 goto err_kill_estimator; 1906 } 1923 } 1907 dev_queue = netdev_ge 1924 dev_queue = netdev_get_tx_queue(dev, offload_opt.qid); 1908 } else { /* First child. */ 1925 } else { /* First child. */ 1909 dev_queue = htb_offlo 1926 dev_queue = htb_offload_get_queue(parent); 1910 old_q = htb_graft_hel 1927 old_q = htb_graft_helper(dev_queue, NULL); 1911 WARN_ON(old_q != pare 1928 WARN_ON(old_q != parent->leaf.q); 1912 offload_opt = (struct 1929 offload_opt = (struct tc_htb_qopt_offload) { 1913 .command = TC 1930 .command = TC_HTB_LEAF_TO_INNER, 1914 .classid = cl 1931 .classid = cl->common.classid, 1915 .parent_class 1932 .parent_classid = 1916 TC_H_ 1933 TC_H_MIN(parent->common.classid), 1917 .rate = max_t 1934 .rate = max_t(u64, hopt->rate.rate, rate64), 1918 .ceil = max_t 1935 .ceil = max_t(u64, hopt->ceil.rate, ceil64), 1919 .prio = hopt- << 1920 .quantum = ho << 1921 .extack = ext 1936 .extack = extack, 1922 }; 1937 }; 1923 err = htb_offload(dev 1938 err = htb_offload(dev, &offload_opt); 1924 if (err) { 1939 if (err) { 1925 NL_SET_ERR_MS 1940 NL_SET_ERR_MSG_WEAK(extack, 1926 1941 "Failed to offload TC_HTB_LEAF_TO_INNER"); 1927 htb_graft_hel 1942 htb_graft_helper(dev_queue, old_q); 1928 goto err_kill 1943 goto err_kill_estimator; 1929 } 1944 } 1930 _bstats_update(&paren 1945 _bstats_update(&parent->bstats_bias, 1931 u64_st 1946 u64_stats_read(&old_q->bstats.bytes), 1932 u64_st 1947 u64_stats_read(&old_q->bstats.packets)); 1933 qdisc_put(old_q); 1948 qdisc_put(old_q); 1934 } 1949 } 1935 new_q = qdisc_create_dflt(dev 1950 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, 1936 cla 1951 classid, NULL); 1937 if (q->offload) { 1952 if (q->offload) { 1938 /* One ref for cl->le !! 1953 if (new_q) { 1939 if (new_q) !! 1954 htb_set_lockdep_class_child(new_q); >> 1955 /* One ref for cl->leaf.q, the other for >> 1956 * dev_queue->qdisc. >> 1957 */ 1940 qdisc_refcoun 1958 qdisc_refcount_inc(new_q); >> 1959 } 1941 old_q = htb_graft_hel 1960 old_q = htb_graft_helper(dev_queue, new_q); 1942 /* No qdisc_put neede 1961 /* No qdisc_put needed. */ 1943 WARN_ON(!(old_q->flag 1962 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); 1944 } 1963 } 1945 sch_tree_lock(sch); 1964 sch_tree_lock(sch); 1946 if (parent && !parent->level) 1965 if (parent && !parent->level) { 1947 /* turn parent into i 1966 /* turn parent into inner node */ 1948 qdisc_purge_queue(par 1967 qdisc_purge_queue(parent->leaf.q); 1949 parent_qdisc = parent 1968 parent_qdisc = parent->leaf.q; 1950 if (parent->prio_acti 1969 if (parent->prio_activity) 1951 htb_deactivat 1970 htb_deactivate(q, parent); 1952 1971 1953 /* remove from evt li 1972 /* remove from evt list because of level change */ 1954 if (parent->cmode != 1973 if (parent->cmode != HTB_CAN_SEND) { 1955 htb_safe_rb_e 1974 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq); 1956 parent->cmode 1975 parent->cmode = HTB_CAN_SEND; 1957 } 1976 } 1958 parent->level = (pare 1977 parent->level = (parent->parent ? parent->parent->level 1959 : TC 1978 : TC_HTB_MAXDEPTH) - 1; 1960 memset(&parent->inner 1979 memset(&parent->inner, 0, sizeof(parent->inner)); 1961 } 1980 } 1962 1981 1963 /* leaf (we) needs elementary 1982 /* leaf (we) needs elementary qdisc */ 1964 cl->leaf.q = new_q ? new_q : 1983 cl->leaf.q = new_q ? new_q : &noop_qdisc; 1965 if (q->offload) 1984 if (q->offload) 1966 cl->leaf.offload_queu 1985 cl->leaf.offload_queue = dev_queue; 1967 1986 1968 cl->parent = parent; 1987 cl->parent = parent; 1969 1988 1970 /* set class to be in HTB_CAN 1989 /* set class to be in HTB_CAN_SEND state */ 1971 cl->tokens = PSCHED_TICKS2NS( 1990 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); 1972 cl->ctokens = PSCHED_TICKS2NS 1991 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); 1973 cl->mbuffer = 60ULL * NSEC_PE 1992 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ 1974 cl->t_c = ktime_get_ns(); 1993 cl->t_c = ktime_get_ns(); 1975 cl->cmode = HTB_CAN_SEND; 1994 cl->cmode = HTB_CAN_SEND; 1976 1995 1977 /* attach to the hash list an 1996 /* attach to the hash list and parent's family */ 1978 qdisc_class_hash_insert(&q->c 1997 qdisc_class_hash_insert(&q->clhash, &cl->common); 1979 if (parent) 1998 if (parent) 1980 parent->children++; 1999 parent->children++; 1981 if (cl->leaf.q != &noop_qdisc 2000 if (cl->leaf.q != &noop_qdisc) 1982 qdisc_hash_add(cl->le 2001 qdisc_hash_add(cl->leaf.q, true); 1983 } else { 2002 } else { 1984 if (tca[TCA_RATE]) { 2003 if (tca[TCA_RATE]) { 1985 err = gen_replace_est 2004 err = gen_replace_estimator(&cl->bstats, NULL, 1986 2005 &cl->rate_est, 1987 2006 NULL, 1988 2007 true, 1989 2008 tca[TCA_RATE]); 1990 if (err) 2009 if (err) 1991 return err; 2010 return err; 1992 } 2011 } 1993 2012 1994 if (q->offload) { 2013 if (q->offload) { 1995 struct net_device *de 2014 struct net_device *dev = qdisc_dev(sch); 1996 2015 1997 offload_opt = (struct 2016 offload_opt = (struct tc_htb_qopt_offload) { 1998 .command = TC 2017 .command = TC_HTB_NODE_MODIFY, 1999 .classid = cl 2018 .classid = cl->common.classid, 2000 .rate = max_t 2019 .rate = max_t(u64, hopt->rate.rate, rate64), 2001 .ceil = max_t 2020 .ceil = max_t(u64, hopt->ceil.rate, ceil64), 2002 .prio = hopt- << 2003 .quantum = ho << 2004 .extack = ext 2021 .extack = extack, 2005 }; 2022 }; 2006 err = htb_offload(dev 2023 err = htb_offload(dev, &offload_opt); 2007 if (err) 2024 if (err) 2008 /* Estimator 2025 /* Estimator was replaced, and rollback may fail 2009 * as well, s 2026 * as well, so we don't try to recover it, and 2010 * the estima 2027 * the estimator won't work property with the 2011 * offload an 2028 * offload anyway, because bstats are updated 2012 * only when 2029 * only when the stats are queried. 2013 */ 2030 */ 2014 return err; 2031 return err; 2015 } 2032 } 2016 2033 2017 sch_tree_lock(sch); 2034 sch_tree_lock(sch); 2018 } 2035 } 2019 2036 2020 psched_ratecfg_precompute(&cl->rate, 2037 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); 2021 psched_ratecfg_precompute(&cl->ceil, 2038 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); 2022 2039 2023 /* it used to be a nasty bug here, we 2040 /* it used to be a nasty bug here, we have to check that node 2024 * is really leaf before changing cl- 2041 * is really leaf before changing cl->leaf ! 2025 */ 2042 */ 2026 if (!cl->level) { 2043 if (!cl->level) { 2027 u64 quantum = cl->rate.rate_b 2044 u64 quantum = cl->rate.rate_bytes_ps; 2028 2045 2029 do_div(quantum, q->rate2quant 2046 do_div(quantum, q->rate2quantum); 2030 cl->quantum = min_t(u64, quan 2047 cl->quantum = min_t(u64, quantum, INT_MAX); 2031 2048 2032 if (!hopt->quantum && cl->qua 2049 if (!hopt->quantum && cl->quantum < 1000) { 2033 warn = -1; 2050 warn = -1; 2034 cl->quantum = 1000; 2051 cl->quantum = 1000; 2035 } 2052 } 2036 if (!hopt->quantum && cl->qua 2053 if (!hopt->quantum && cl->quantum > 200000) { 2037 warn = 1; 2054 warn = 1; 2038 cl->quantum = 200000; 2055 cl->quantum = 200000; 2039 } 2056 } 2040 if (hopt->quantum) 2057 if (hopt->quantum) 2041 cl->quantum = hopt->q 2058 cl->quantum = hopt->quantum; 2042 if ((cl->prio = hopt->prio) > 2059 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO) 2043 cl->prio = TC_HTB_NUM 2060 cl->prio = TC_HTB_NUMPRIO - 1; 2044 } 2061 } 2045 2062 2046 cl->buffer = PSCHED_TICKS2NS(hopt->bu 2063 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); 2047 cl->cbuffer = PSCHED_TICKS2NS(hopt->c 2064 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); 2048 2065 2049 sch_tree_unlock(sch); 2066 sch_tree_unlock(sch); 2050 qdisc_put(parent_qdisc); 2067 qdisc_put(parent_qdisc); 2051 2068 2052 if (warn) 2069 if (warn) 2053 NL_SET_ERR_MSG_FMT_MOD(extack 2070 NL_SET_ERR_MSG_FMT_MOD(extack, 2054 "quant 2071 "quantum of class %X is %s. Consider r2q change.", 2055 cl->co 2072 cl->common.classid, (warn == -1 ? "small" : "big")); 2056 2073 2057 qdisc_class_hash_grow(sch, &q->clhash 2074 qdisc_class_hash_grow(sch, &q->clhash); 2058 2075 2059 *arg = (unsigned long)cl; 2076 *arg = (unsigned long)cl; 2060 return 0; 2077 return 0; 2061 2078 2062 err_kill_estimator: 2079 err_kill_estimator: 2063 gen_kill_estimator(&cl->rate_est); 2080 gen_kill_estimator(&cl->rate_est); 2064 err_block_put: 2081 err_block_put: 2065 tcf_block_put(cl->block); 2082 tcf_block_put(cl->block); 2066 kfree(cl); 2083 kfree(cl); 2067 failure: 2084 failure: 2068 return err; 2085 return err; 2069 } 2086 } 2070 2087 2071 static struct tcf_block *htb_tcf_block(struct 2088 static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg, 2072 struct 2089 struct netlink_ext_ack *extack) 2073 { 2090 { 2074 struct htb_sched *q = qdisc_priv(sch) 2091 struct htb_sched *q = qdisc_priv(sch); 2075 struct htb_class *cl = (struct htb_cl 2092 struct htb_class *cl = (struct htb_class *)arg; 2076 2093 2077 return cl ? cl->block : q->block; 2094 return cl ? cl->block : q->block; 2078 } 2095 } 2079 2096 2080 static unsigned long htb_bind_filter(struct Q 2097 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, 2081 u32 clas 2098 u32 classid) 2082 { 2099 { 2083 struct htb_class *cl = htb_find(class 2100 struct htb_class *cl = htb_find(classid, sch); 2084 2101 2085 /*if (cl && !cl->level) return 0; 2102 /*if (cl && !cl->level) return 0; 2086 * The line above used to be there to 2103 * The line above used to be there to prevent attaching filters to 2087 * leaves. But at least tc_index filt 2104 * leaves. But at least tc_index filter uses this just to get class 2088 * for other reasons so that we have 2105 * for other reasons so that we have to allow for it. 2089 * ---- 2106 * ---- 2090 * 19.6.2002 As Werner explained it i 2107 * 19.6.2002 As Werner explained it is ok - bind filter is just 2091 * another way to "lock" the class - 2108 * another way to "lock" the class - unlike "get" this lock can 2092 * be broken by class during destroy 2109 * be broken by class during destroy IIUC. 2093 */ 2110 */ 2094 if (cl) 2111 if (cl) 2095 qdisc_class_get(&cl->common); !! 2112 cl->filter_cnt++; 2096 return (unsigned long)cl; 2113 return (unsigned long)cl; 2097 } 2114 } 2098 2115 2099 static void htb_unbind_filter(struct Qdisc *s 2116 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg) 2100 { 2117 { 2101 struct htb_class *cl = (struct htb_cl 2118 struct htb_class *cl = (struct htb_class *)arg; 2102 2119 2103 qdisc_class_put(&cl->common); !! 2120 if (cl) >> 2121 cl->filter_cnt--; 2104 } 2122 } 2105 2123 2106 static void htb_walk(struct Qdisc *sch, struc 2124 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) 2107 { 2125 { 2108 struct htb_sched *q = qdisc_priv(sch) 2126 struct htb_sched *q = qdisc_priv(sch); 2109 struct htb_class *cl; 2127 struct htb_class *cl; 2110 unsigned int i; 2128 unsigned int i; 2111 2129 2112 if (arg->stop) 2130 if (arg->stop) 2113 return; 2131 return; 2114 2132 2115 for (i = 0; i < q->clhash.hashsize; i 2133 for (i = 0; i < q->clhash.hashsize; i++) { 2116 hlist_for_each_entry(cl, &q-> 2134 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 2117 if (!tc_qdisc_stats_d 2135 if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg)) 2118 return; 2136 return; 2119 } 2137 } 2120 } 2138 } 2121 } 2139 } 2122 2140 2123 static const struct Qdisc_class_ops htb_class 2141 static const struct Qdisc_class_ops htb_class_ops = { 2124 .select_queue = htb_select_qu 2142 .select_queue = htb_select_queue, 2125 .graft = htb_graft, 2143 .graft = htb_graft, 2126 .leaf = htb_leaf, 2144 .leaf = htb_leaf, 2127 .qlen_notify = htb_qlen_noti 2145 .qlen_notify = htb_qlen_notify, 2128 .find = htb_search, 2146 .find = htb_search, 2129 .change = htb_change_cl 2147 .change = htb_change_class, 2130 .delete = htb_delete, 2148 .delete = htb_delete, 2131 .walk = htb_walk, 2149 .walk = htb_walk, 2132 .tcf_block = htb_tcf_block 2150 .tcf_block = htb_tcf_block, 2133 .bind_tcf = htb_bind_filt 2151 .bind_tcf = htb_bind_filter, 2134 .unbind_tcf = htb_unbind_fi 2152 .unbind_tcf = htb_unbind_filter, 2135 .dump = htb_dump_clas 2153 .dump = htb_dump_class, 2136 .dump_stats = htb_dump_clas 2154 .dump_stats = htb_dump_class_stats, 2137 }; 2155 }; 2138 2156 2139 static struct Qdisc_ops htb_qdisc_ops __read_ 2157 static struct Qdisc_ops htb_qdisc_ops __read_mostly = { 2140 .cl_ops = &htb_class_op 2158 .cl_ops = &htb_class_ops, 2141 .id = "htb", 2159 .id = "htb", 2142 .priv_size = sizeof(struct 2160 .priv_size = sizeof(struct htb_sched), 2143 .enqueue = htb_enqueue, 2161 .enqueue = htb_enqueue, 2144 .dequeue = htb_dequeue, 2162 .dequeue = htb_dequeue, 2145 .peek = qdisc_peek_de 2163 .peek = qdisc_peek_dequeued, 2146 .init = htb_init, 2164 .init = htb_init, 2147 .attach = htb_attach, 2165 .attach = htb_attach, 2148 .reset = htb_reset, 2166 .reset = htb_reset, 2149 .destroy = htb_destroy, 2167 .destroy = htb_destroy, 2150 .dump = htb_dump, 2168 .dump = htb_dump, 2151 .owner = THIS_MODULE, 2169 .owner = THIS_MODULE, 2152 }; 2170 }; 2153 MODULE_ALIAS_NET_SCH("htb"); << 2154 2171 2155 static int __init htb_module_init(void) 2172 static int __init htb_module_init(void) 2156 { 2173 { 2157 return register_qdisc(&htb_qdisc_ops) 2174 return register_qdisc(&htb_qdisc_ops); 2158 } 2175 } 2159 static void __exit htb_module_exit(void) 2176 static void __exit htb_module_exit(void) 2160 { 2177 { 2161 unregister_qdisc(&htb_qdisc_ops); 2178 unregister_qdisc(&htb_qdisc_ops); 2162 } 2179 } 2163 2180 2164 module_init(htb_module_init) 2181 module_init(htb_module_init) 2165 module_exit(htb_module_exit) 2182 module_exit(htb_module_exit) 2166 MODULE_LICENSE("GPL"); 2183 MODULE_LICENSE("GPL"); 2167 MODULE_DESCRIPTION("Hierarchical Token Bucket << 2168 2184
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.