~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sched/sch_htb.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /net/sched/sch_htb.c (Version linux-6.11.5) and /net/sched/sch_htb.c (Version linux-6.10.4)


** Warning: Cannot open xref database.

  1 // SPDX-License-Identifier: GPL-2.0-or-later        1 
  2 /*                                                
  3  * net/sched/sch_htb.c  Hierarchical token buc    
  4  *                                                
  5  * Authors:     Martin Devera, <devik@cdi.cz>     
  6  *                                                
  7  * Credits (in time order) for older HTB versi    
  8  *              Stef Coene <stef.coene@docum.o    
  9  *                      HTB support at LARTC m    
 10  *              Ondrej Kraus, <krauso@barr.cz>    
 11  *                      found missing INIT_QDI    
 12  *              Vladimir Smelhaus, Aamer Akhte    
 13  *                      helped a lot to locate    
 14  *              Andi Kleen, Jamal Hadi, Bert H    
 15  *                      code review and helpfu    
 16  *              Tomasz Wrona, <tw@eter.tym.pl>    
 17  *                      created test case so t    
 18  *              Wilfried Weissmann                
 19  *                      spotted bug in dequeue    
 20  *              Jiri Fojtasek                     
 21  *                      fixed requeue routine     
 22  *              and many others. thanks.          
 23  */                                               
 24 #include <linux/module.h>                         
 25 #include <linux/moduleparam.h>                    
 26 #include <linux/types.h>                          
 27 #include <linux/kernel.h>                         
 28 #include <linux/string.h>                         
 29 #include <linux/errno.h>                          
 30 #include <linux/skbuff.h>                         
 31 #include <linux/list.h>                           
 32 #include <linux/compiler.h>                       
 33 #include <linux/rbtree.h>                         
 34 #include <linux/workqueue.h>                      
 35 #include <linux/slab.h>                           
 36 #include <net/netlink.h>                          
 37 #include <net/sch_generic.h>                      
 38 #include <net/pkt_sched.h>                        
 39 #include <net/pkt_cls.h>                          
 40                                                   
 41 /* HTB algorithm.                                 
 42     Author: devik@cdi.cz                          
 43     ==========================================    
 44     HTB is like TBF with multiple classes. It     
 45     it allows to assign priority to each class    
 46     In fact it is another implementation of Fl    
 47                                                   
 48     Levels:                                       
 49     Each class is assigned level. Leaf has ALW    
 50     classes have level TC_HTB_MAXDEPTH-1. Inte    
 51     one less than their parent.                   
 52 */                                                
 53                                                   
 54 static int htb_hysteresis __read_mostly = 0; /    
 55 #define HTB_VER 0x30011         /* major must     
 56                                                   
 57 #if HTB_VER >> 16 != TC_HTB_PROTOVER              
 58 #error "Mismatched sch_htb.c and pkt_sch.h"       
 59 #endif                                            
 60                                                   
 61 /* Module parameter and sysfs export */           
 62 module_param    (htb_hysteresis, int, 0640);      
 63 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis m    
 64                                                   
 65 static int htb_rate_est = 0; /* htb classes ha    
 66 module_param(htb_rate_est, int, 0640);            
 67 MODULE_PARM_DESC(htb_rate_est, "setup a defaul    
 68                                                   
 69 /* used internaly to keep status of single cla    
 70 enum htb_cmode {                                  
 71         HTB_CANT_SEND,          /* class can't    
 72         HTB_MAY_BORROW,         /* class can't    
 73         HTB_CAN_SEND            /* class can s    
 74 };                                                
 75                                                   
 76 struct htb_prio {                                 
 77         union {                                   
 78                 struct rb_root  row;              
 79                 struct rb_root  feed;             
 80         };                                        
 81         struct rb_node  *ptr;                     
 82         /* When class changes from state 1->2     
 83          * parent's feed then we lost ptr valu    
 84          * first child again. Here we store cl    
 85          * last valid ptr (used when ptr is NU    
 86          */                                       
 87         u32             last_ptr_id;              
 88 };                                                
 89                                                   
 90 /* interior & leaf nodes; props specific to le    
 91  * To reduce false sharing, place mostly read     
 92  * and mostly written ones at the end.            
 93  */                                               
 94 struct htb_class {                                
 95         struct Qdisc_class_common common;         
 96         struct psched_ratecfg   rate;             
 97         struct psched_ratecfg   ceil;             
 98         s64                     buffer, cbuffe    
 99         s64                     mbuffer;          
100         u32                     prio;             
101         int                     quantum;          
102                                                   
103         struct tcf_proto __rcu  *filter_list;     
104         struct tcf_block        *block;           
105                                                   
106         int                     level;            
107         unsigned int            children;         
108         struct htb_class        *parent;          
109                                                   
110         struct net_rate_estimator __rcu *rate_    
111                                                   
112         /*                                        
113          * Written often fields                   
114          */                                       
115         struct gnet_stats_basic_sync bstats;      
116         struct gnet_stats_basic_sync bstats_bi    
117         struct tc_htb_xstats    xstats; /* our    
118                                                   
119         /* token bucket parameters */             
120         s64                     tokens, ctoken    
121         s64                     t_c;              
122                                                   
123         union {                                   
124                 struct htb_class_leaf {           
125                         int             defici    
126                         struct Qdisc    *q;       
127                         struct netdev_queue *o    
128                 } leaf;                           
129                 struct htb_class_inner {          
130                         struct htb_prio clprio    
131                 } inner;                          
132         };                                        
133         s64                     pq_key;           
134                                                   
135         int                     prio_activity;    
136         enum htb_cmode          cmode;            
137         struct rb_node          pq_node;          
138         struct rb_node          node[TC_HTB_NU    
139                                                   
140         unsigned int drops ____cacheline_align    
141         unsigned int            overlimits;       
142 };                                                
143                                                   
144 struct htb_level {                                
145         struct rb_root  wait_pq;                  
146         struct htb_prio hprio[TC_HTB_NUMPRIO];    
147 };                                                
148                                                   
149 struct htb_sched {                                
150         struct Qdisc_class_hash clhash;           
151         int                     defcls;           
152         int                     rate2quantum;     
153                                                   
154         /* filters for qdisc itself */            
155         struct tcf_proto __rcu  *filter_list;     
156         struct tcf_block        *block;           
157                                                   
158 #define HTB_WARN_TOOMANYEVENTS  0x1               
159         unsigned int            warned; /* onl    
160         int                     direct_qlen;      
161         struct work_struct      work;             
162                                                   
163         /* non shaped skbs; let them go direct    
164         struct qdisc_skb_head   direct_queue;     
165         u32                     direct_pkts;      
166         u32                     overlimits;       
167                                                   
168         struct qdisc_watchdog   watchdog;         
169                                                   
170         s64                     now;    /* cac    
171                                                   
172         /* time of nearest event per level (ro    
173         s64                     near_ev_cache[    
174                                                   
175         int                     row_mask[TC_HT    
176                                                   
177         struct htb_level        hlevel[TC_HTB_    
178                                                   
179         struct Qdisc            **direct_qdisc    
180         unsigned int            num_direct_qdi    
181                                                   
182         bool                    offload;          
183 };                                                
184                                                   
185 /* find class in global hash table using given    
186 static inline struct htb_class *htb_find(u32 h    
187 {                                                 
188         struct htb_sched *q = qdisc_priv(sch);    
189         struct Qdisc_class_common *clc;           
190                                                   
191         clc = qdisc_class_find(&q->clhash, han    
192         if (clc == NULL)                          
193                 return NULL;                      
194         return container_of(clc, struct htb_cl    
195 }                                                 
196                                                   
197 static unsigned long htb_search(struct Qdisc *    
198 {                                                 
199         return (unsigned long)htb_find(handle,    
200 }                                                 
201                                                   
202 #define HTB_DIRECT ((struct htb_class *)-1L)      
203                                                   
204 /**                                               
205  * htb_classify - classify a packet into class    
206  * @skb: the socket buffer                        
207  * @sch: the active queue discipline              
208  * @qerr: pointer for returned status code        
209  *                                                
210  * It returns NULL if the packet should be dro    
211  * should be passed directly thru. In all othe    
212  * We allow direct class selection by classid     
213  * filters in qdisc and in inner nodes (if hig    
214  * node). If we end up with classid MAJOR:0 we    
215  * internal fifo (direct). These packets then     
216  * have no valid leaf we try to use MAJOR:defa    
217  * then finish and return direct queue.           
218  */                                               
219 static struct htb_class *htb_classify(struct s    
220                                       int *qer    
221 {                                                 
222         struct htb_sched *q = qdisc_priv(sch);    
223         struct htb_class *cl;                     
224         struct tcf_result res;                    
225         struct tcf_proto *tcf;                    
226         int result;                               
227                                                   
228         /* allow to select class by setting sk    
229          * note that nfmark can be used too by    
230          * rules in it                            
231          */                                       
232         if (skb->priority == sch->handle)         
233                 return HTB_DIRECT;      /* X:0    
234         cl = htb_find(skb->priority, sch);        
235         if (cl) {                                 
236                 if (cl->level == 0)               
237                         return cl;                
238                 /* Start with inner filter cha    
239                 tcf = rcu_dereference_bh(cl->f    
240         } else {                                  
241                 tcf = rcu_dereference_bh(q->fi    
242         }                                         
243                                                   
244         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_    
245         while (tcf && (result = tcf_classify(s    
246 #ifdef CONFIG_NET_CLS_ACT                         
247                 switch (result) {                 
248                 case TC_ACT_QUEUED:               
249                 case TC_ACT_STOLEN:               
250                 case TC_ACT_TRAP:                 
251                         *qerr = NET_XMIT_SUCCE    
252                         fallthrough;              
253                 case TC_ACT_SHOT:                 
254                         return NULL;              
255                 }                                 
256 #endif                                            
257                 cl = (void *)res.class;           
258                 if (!cl) {                        
259                         if (res.classid == sch    
260                                 return HTB_DIR    
261                         cl = htb_find(res.clas    
262                         if (!cl)                  
263                                 break;  /* fil    
264                 }                                 
265                 if (!cl->level)                   
266                         return cl;      /* we     
267                                                   
268                 /* we have got inner class; ap    
269                 tcf = rcu_dereference_bh(cl->f    
270         }                                         
271         /* classification failed; try to use d    
272         cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->    
273         if (!cl || cl->level)                     
274                 return HTB_DIRECT;      /* bad    
275         return cl;                                
276 }                                                 
277                                                   
278 /**                                               
279  * htb_add_to_id_tree - adds class to the roun    
280  * @root: the root of the tree                    
281  * @cl: the class to add                          
282  * @prio: the give prio in class                  
283  *                                                
284  * Routine adds class to the list (actually tr    
285  * Make sure that class is not already on such    
286  */                                               
287 static void htb_add_to_id_tree(struct rb_root     
288                                struct htb_clas    
289 {                                                 
290         struct rb_node **p = &root->rb_node, *    
291                                                   
292         while (*p) {                              
293                 struct htb_class *c;              
294                 parent = *p;                      
295                 c = rb_entry(parent, struct ht    
296                                                   
297                 if (cl->common.classid > c->co    
298                         p = &parent->rb_right;    
299                 else                              
300                         p = &parent->rb_left;     
301         }                                         
302         rb_link_node(&cl->node[prio], parent,     
303         rb_insert_color(&cl->node[prio], root)    
304 }                                                 
305                                                   
306 /**                                               
307  * htb_add_to_wait_tree - adds class to the ev    
308  * @q: the priority event queue                   
309  * @cl: the class to add                          
310  * @delay: delay in microseconds                  
311  *                                                
312  * The class is added to priority event queue     
313  * change its mode in cl->pq_key microseconds.    
314  * already in the queue.                          
315  */                                               
316 static void htb_add_to_wait_tree(struct htb_sc    
317                                  struct htb_cl    
318 {                                                 
319         struct rb_node **p = &q->hlevel[cl->le    
320                                                   
321         cl->pq_key = q->now + delay;              
322         if (cl->pq_key == q->now)                 
323                 cl->pq_key++;                     
324                                                   
325         /* update the nearest event cache */      
326         if (q->near_ev_cache[cl->level] > cl->    
327                 q->near_ev_cache[cl->level] =     
328                                                   
329         while (*p) {                              
330                 struct htb_class *c;              
331                 parent = *p;                      
332                 c = rb_entry(parent, struct ht    
333                 if (cl->pq_key >= c->pq_key)      
334                         p = &parent->rb_right;    
335                 else                              
336                         p = &parent->rb_left;     
337         }                                         
338         rb_link_node(&cl->pq_node, parent, p);    
339         rb_insert_color(&cl->pq_node, &q->hlev    
340 }                                                 
341                                                   
342 /**                                               
343  * htb_next_rb_node - finds next node in binar    
344  * @n: the current node in binary tree            
345  *                                                
346  * When we are past last key we return NULL.      
347  * Average complexity is 2 steps per call.        
348  */                                               
349 static inline void htb_next_rb_node(struct rb_    
350 {                                                 
351         *n = rb_next(*n);                         
352 }                                                 
353                                                   
354 /**                                               
355  * htb_add_class_to_row - add class to its row    
356  * @q: the priority event queue                   
357  * @cl: the class to add                          
358  * @mask: the given priorities in class in bit    
359  *                                                
360  * The class is added to row at priorities mar    
361  * It does nothing if mask == 0.                  
362  */                                               
363 static inline void htb_add_class_to_row(struct    
364                                         struct    
365 {                                                 
366         q->row_mask[cl->level] |= mask;           
367         while (mask) {                            
368                 int prio = ffz(~mask);            
369                 mask &= ~(1 << prio);             
370                 htb_add_to_id_tree(&q->hlevel[    
371         }                                         
372 }                                                 
373                                                   
374 /* If this triggers, it is a bug in this code,    
375 static void htb_safe_rb_erase(struct rb_node *    
376 {                                                 
377         if (RB_EMPTY_NODE(rb)) {                  
378                 WARN_ON(1);                       
379         } else {                                  
380                 rb_erase(rb, root);               
381                 RB_CLEAR_NODE(rb);                
382         }                                         
383 }                                                 
384                                                   
385                                                   
386 /**                                               
387  * htb_remove_class_from_row - removes class f    
388  * @q: the priority event queue                   
389  * @cl: the class to add                          
390  * @mask: the given priorities in class in bit    
391  *                                                
392  * The class is removed from row at priorities    
393  * It does nothing if mask == 0.                  
394  */                                               
395 static inline void htb_remove_class_from_row(s    
396                                                   
397 {                                                 
398         int m = 0;                                
399         struct htb_level *hlevel = &q->hlevel[    
400                                                   
401         while (mask) {                            
402                 int prio = ffz(~mask);            
403                 struct htb_prio *hprio = &hlev    
404                                                   
405                 mask &= ~(1 << prio);             
406                 if (hprio->ptr == cl->node + p    
407                         htb_next_rb_node(&hpri    
408                                                   
409                 htb_safe_rb_erase(cl->node + p    
410                 if (!hprio->row.rb_node)          
411                         m |= 1 << prio;           
412         }                                         
413         q->row_mask[cl->level] &= ~m;             
414 }                                                 
415                                                   
416 /**                                               
417  * htb_activate_prios - creates active classe'    
418  * @q: the priority event queue                   
419  * @cl: the class to activate                     
420  *                                                
421  * The class is connected to ancestors and/or     
422  * for priorities it is participating on. cl->    
423  * (activated) mode. It does nothing if cl->pr    
424  */                                               
425 static void htb_activate_prios(struct htb_sche    
426 {                                                 
427         struct htb_class *p = cl->parent;         
428         long m, mask = cl->prio_activity;         
429                                                   
430         while (cl->cmode == HTB_MAY_BORROW &&     
431                 m = mask;                         
432                 while (m) {                       
433                         unsigned int prio = ff    
434                                                   
435                         if (WARN_ON_ONCE(prio     
436                                 break;            
437                         m &= ~(1 << prio);        
438                                                   
439                         if (p->inner.clprio[pr    
440                                 /* parent alre    
441                                  * reset bit i    
442                                  */               
443                                 mask &= ~(1 <<    
444                                                   
445                         htb_add_to_id_tree(&p-    
446                 }                                 
447                 p->prio_activity |= mask;         
448                 cl = p;                           
449                 p = cl->parent;                   
450                                                   
451         }                                         
452         if (cl->cmode == HTB_CAN_SEND && mask)    
453                 htb_add_class_to_row(q, cl, ma    
454 }                                                 
455                                                   
456 /**                                               
457  * htb_deactivate_prios - remove class from fe    
458  * @q: the priority event queue                   
459  * @cl: the class to deactivate                   
460  *                                                
461  * cl->cmode must represent old mode (before d    
462  * nothing if cl->prio_activity == 0. Class is    
463  * chains and rows.                               
464  */                                               
465 static void htb_deactivate_prios(struct htb_sc    
466 {                                                 
467         struct htb_class *p = cl->parent;         
468         long m, mask = cl->prio_activity;         
469                                                   
470         while (cl->cmode == HTB_MAY_BORROW &&     
471                 m = mask;                         
472                 mask = 0;                         
473                 while (m) {                       
474                         int prio = ffz(~m);       
475                         m &= ~(1 << prio);        
476                                                   
477                         if (p->inner.clprio[pr    
478                                 /* we are remo    
479                                  * parent feed    
480                                  * classid        
481                                  */               
482                                 p->inner.clpri    
483                                 p->inner.clpri    
484                         }                         
485                                                   
486                         htb_safe_rb_erase(cl->    
487                                           &p->    
488                                                   
489                         if (!p->inner.clprio[p    
490                                 mask |= 1 << p    
491                 }                                 
492                                                   
493                 p->prio_activity &= ~mask;        
494                 cl = p;                           
495                 p = cl->parent;                   
496                                                   
497         }                                         
498         if (cl->cmode == HTB_CAN_SEND && mask)    
499                 htb_remove_class_from_row(q, c    
500 }                                                 
501                                                   
502 static inline s64 htb_lowater(const struct htb    
503 {                                                 
504         if (htb_hysteresis)                       
505                 return cl->cmode != HTB_CANT_S    
506         else                                      
507                 return 0;                         
508 }                                                 
509 static inline s64 htb_hiwater(const struct htb    
510 {                                                 
511         if (htb_hysteresis)                       
512                 return cl->cmode == HTB_CAN_SE    
513         else                                      
514                 return 0;                         
515 }                                                 
516                                                   
517                                                   
518 /**                                               
519  * htb_class_mode - computes and returns curre    
520  * @cl: the target class                          
521  * @diff: diff time in microseconds               
522  *                                                
523  * It computes cl's mode at time cl->t_c+diff     
524  * is not HTB_CAN_SEND then cl->pq_key is upda    
525  * from now to time when cl will change its st    
526  * Also it is worth to note that class mode do    
527  * at cl->{c,}tokens == 0 but there can rather    
528  * 0 .. -cl->{c,}buffer range. It is meant to     
529  * mode transitions per time unit. The speed g    
530  */                                               
531 static inline enum htb_cmode                      
532 htb_class_mode(struct htb_class *cl, s64 *diff    
533 {                                                 
534         s64 toks;                                 
535                                                   
536         if ((toks = (cl->ctokens + *diff)) < h    
537                 *diff = -toks;                    
538                 return HTB_CANT_SEND;             
539         }                                         
540                                                   
541         if ((toks = (cl->tokens + *diff)) >= h    
542                 return HTB_CAN_SEND;              
543                                                   
544         *diff = -toks;                            
545         return HTB_MAY_BORROW;                    
546 }                                                 
547                                                   
548 /**                                               
549  * htb_change_class_mode - changes classe's mo    
550  * @q: the priority event queue                   
551  * @cl: the target class                          
552  * @diff: diff time in microseconds               
553  *                                                
554  * This should be the only way how to change c    
555  * circumstances. Routine will update feed lis    
556  * and add class to the wait event queue if ap    
557  * be different from old one and cl->pq_key ha    
558  * to mode other than HTB_CAN_SEND (see htb_ad    
559  */                                               
560 static void                                       
561 htb_change_class_mode(struct htb_sched *q, str    
562 {                                                 
563         enum htb_cmode new_mode = htb_class_mo    
564                                                   
565         if (new_mode == cl->cmode)                
566                 return;                           
567                                                   
568         if (new_mode == HTB_CANT_SEND) {          
569                 cl->overlimits++;                 
570                 q->overlimits++;                  
571         }                                         
572                                                   
573         if (cl->prio_activity) {        /* not    
574                 if (cl->cmode != HTB_CANT_SEND    
575                         htb_deactivate_prios(q    
576                 cl->cmode = new_mode;             
577                 if (new_mode != HTB_CANT_SEND)    
578                         htb_activate_prios(q,     
579         } else                                    
580                 cl->cmode = new_mode;             
581 }                                                 
582                                                   
583 /**                                               
584  * htb_activate - inserts leaf cl into appropr    
585  * @q: the priority event queue                   
586  * @cl: the target class                          
587  *                                                
588  * Routine learns (new) priority of leaf and a    
589  * for the prio. It can be called on already a    
590  * It also adds leaf into droplist.               
591  */                                               
592 static inline void htb_activate(struct htb_sch    
593 {                                                 
594         WARN_ON(cl->level || !cl->leaf.q || !c    
595                                                   
596         if (!cl->prio_activity) {                 
597                 cl->prio_activity = 1 << cl->p    
598                 htb_activate_prios(q, cl);        
599         }                                         
600 }                                                 
601                                                   
602 /**                                               
603  * htb_deactivate - remove leaf cl from active    
604  * @q: the priority event queue                   
605  * @cl: the target class                          
606  *                                                
607  * Make sure that leaf is active. In the other    
608  * with non-active leaf. It also removes class    
609  */                                               
610 static inline void htb_deactivate(struct htb_s    
611 {                                                 
612         WARN_ON(!cl->prio_activity);              
613                                                   
614         htb_deactivate_prios(q, cl);              
615         cl->prio_activity = 0;                    
616 }                                                 
617                                                   
618 static int htb_enqueue(struct sk_buff *skb, st    
619                        struct sk_buff **to_fre    
620 {                                                 
621         int ret;                                  
622         unsigned int len = qdisc_pkt_len(skb);    
623         struct htb_sched *q = qdisc_priv(sch);    
624         struct htb_class *cl = htb_classify(sk    
625                                                   
626         if (cl == HTB_DIRECT) {                   
627                 /* enqueue to helper queue */     
628                 if (q->direct_queue.qlen < q->    
629                         __qdisc_enqueue_tail(s    
630                         q->direct_pkts++;         
631                 } else {                          
632                         return qdisc_drop(skb,    
633                 }                                 
634 #ifdef CONFIG_NET_CLS_ACT                         
635         } else if (!cl) {                         
636                 if (ret & __NET_XMIT_BYPASS)      
637                         qdisc_qstats_drop(sch)    
638                 __qdisc_drop(skb, to_free);       
639                 return ret;                       
640 #endif                                            
641         } else if ((ret = qdisc_enqueue(skb, c    
642                                         to_fre    
643                 if (net_xmit_drop_count(ret))     
644                         qdisc_qstats_drop(sch)    
645                         cl->drops++;              
646                 }                                 
647                 return ret;                       
648         } else {                                  
649                 htb_activate(q, cl);              
650         }                                         
651                                                   
652         sch->qstats.backlog += len;               
653         sch->q.qlen++;                            
654         return NET_XMIT_SUCCESS;                  
655 }                                                 
656                                                   
657 static inline void htb_accnt_tokens(struct htb    
658 {                                                 
659         s64 toks = diff + cl->tokens;             
660                                                   
661         if (toks > cl->buffer)                    
662                 toks = cl->buffer;                
663         toks -= (s64) psched_l2t_ns(&cl->rate,    
664         if (toks <= -cl->mbuffer)                 
665                 toks = 1 - cl->mbuffer;           
666                                                   
667         cl->tokens = toks;                        
668 }                                                 
669                                                   
670 static inline void htb_accnt_ctokens(struct ht    
671 {                                                 
672         s64 toks = diff + cl->ctokens;            
673                                                   
674         if (toks > cl->cbuffer)                   
675                 toks = cl->cbuffer;               
676         toks -= (s64) psched_l2t_ns(&cl->ceil,    
677         if (toks <= -cl->mbuffer)                 
678                 toks = 1 - cl->mbuffer;           
679                                                   
680         cl->ctokens = toks;                       
681 }                                                 
682                                                   
683 /**                                               
684  * htb_charge_class - charges amount "bytes" t    
685  * @q: the priority event queue                   
686  * @cl: the class to start iterate                
687  * @level: the minimum level to account           
688  * @skb: the socket buffer                        
689  *                                                
690  * Routine assumes that packet "bytes" long wa    
691  * borrowing from "level". It accounts bytes t    
692  * leaf and all ancestors and to rate bucket f    
693  * "level" and higher. It also handles possibl    
694  * from the update. Note that mode can also in    
695  * CAN_SEND) because we can use more precise c    
696  * In such case we remove class from event que    
697  */                                               
698 static void htb_charge_class(struct htb_sched     
699                              int level, struct    
700 {                                                 
701         int bytes = qdisc_pkt_len(skb);           
702         enum htb_cmode old_mode;                  
703         s64 diff;                                 
704                                                   
705         while (cl) {                              
706                 diff = min_t(s64, q->now - cl-    
707                 if (cl->level >= level) {         
708                         if (cl->level == level    
709                                 cl->xstats.len    
710                         htb_accnt_tokens(cl, b    
711                 } else {                          
712                         cl->xstats.borrows++;     
713                         cl->tokens += diff;       
714                 }                                 
715                 htb_accnt_ctokens(cl, bytes, d    
716                 cl->t_c = q->now;                 
717                                                   
718                 old_mode = cl->cmode;             
719                 diff = 0;                         
720                 htb_change_class_mode(q, cl, &    
721                 if (old_mode != cl->cmode) {      
722                         if (old_mode != HTB_CA    
723                                 htb_safe_rb_er    
724                         if (cl->cmode != HTB_C    
725                                 htb_add_to_wai    
726                 }                                 
727                                                   
728                 /* update basic stats except f    
729                 if (cl->level)                    
730                         bstats_update(&cl->bst    
731                                                   
732                 cl = cl->parent;                  
733         }                                         
734 }                                                 
735                                                   
736 /**                                               
737  * htb_do_events - make mode changes to classe    
738  * @q: the priority event queue                   
739  * @level: which wait_pq in 'q->hlevel'           
740  * @start: start jiffies                          
741  *                                                
742  * Scans event queue for pending events and ap    
743  * next pending event (0 for no event in pq, q    
744  * Note: Applied are events whose have cl->pq_    
745  */                                               
746 static s64 htb_do_events(struct htb_sched *q,     
747                          unsigned long start)     
748 {                                                 
749         /* don't run for longer than 2 jiffies    
750          * 1 to simplify things when jiffy is     
751          * too soon                               
752          */                                       
753         unsigned long stop_at = start + 2;        
754         struct rb_root *wait_pq = &q->hlevel[l    
755                                                   
756         while (time_before(jiffies, stop_at))     
757                 struct htb_class *cl;             
758                 s64 diff;                         
759                 struct rb_node *p = rb_first(w    
760                                                   
761                 if (!p)                           
762                         return 0;                 
763                                                   
764                 cl = rb_entry(p, struct htb_cl    
765                 if (cl->pq_key > q->now)          
766                         return cl->pq_key;        
767                                                   
768                 htb_safe_rb_erase(p, wait_pq);    
769                 diff = min_t(s64, q->now - cl-    
770                 htb_change_class_mode(q, cl, &    
771                 if (cl->cmode != HTB_CAN_SEND)    
772                         htb_add_to_wait_tree(q    
773         }                                         
774                                                   
775         /* too much load - let's continue afte    
776         if (!(q->warned & HTB_WARN_TOOMANYEVEN    
777                 pr_warn("htb: too many events!    
778                 q->warned |= HTB_WARN_TOOMANYE    
779         }                                         
780                                                   
781         return q->now;                            
782 }                                                 
783                                                   
784 /* Returns class->node+prio from id-tree where    
785  * is no such one exists.                         
786  */                                               
787 static struct rb_node *htb_id_find_next_upper(    
788                                                   
789 {                                                 
790         struct rb_node *r = NULL;                 
791         while (n) {                               
792                 struct htb_class *cl =            
793                     rb_entry(n, struct htb_cla    
794                                                   
795                 if (id > cl->common.classid) {    
796                         n = n->rb_right;          
797                 } else if (id < cl->common.cla    
798                         r = n;                    
799                         n = n->rb_left;           
800                 } else {                          
801                         return n;                 
802                 }                                 
803         }                                         
804         return r;                                 
805 }                                                 
806                                                   
807 /**                                               
808  * htb_lookup_leaf - returns next leaf class i    
809  * @hprio: the current one                        
810  * @prio: which prio in class                     
811  *                                                
812  * Find leaf where current feed pointers point    
813  */                                               
814 static struct htb_class *htb_lookup_leaf(struc    
815 {                                                 
816         int i;                                    
817         struct {                                  
818                 struct rb_node *root;             
819                 struct rb_node **pptr;            
820                 u32 *pid;                         
821         } stk[TC_HTB_MAXDEPTH], *sp = stk;        
822                                                   
823         BUG_ON(!hprio->row.rb_node);              
824         sp->root = hprio->row.rb_node;            
825         sp->pptr = &hprio->ptr;                   
826         sp->pid = &hprio->last_ptr_id;            
827                                                   
828         for (i = 0; i < 65535; i++) {             
829                 if (!*sp->pptr && *sp->pid) {     
830                         /* ptr was invalidated    
831                          * the original or nex    
832                          */                       
833                         *sp->pptr =               
834                             htb_id_find_next_u    
835                 }                                 
836                 *sp->pid = 0;   /* ptr is vali    
837                                  * can become     
838                                  */               
839                 if (!*sp->pptr) {       /* we     
840                         *sp->pptr = sp->root;     
841                         while ((*sp->pptr)->rb    
842                                 *sp->pptr = (*    
843                         if (sp > stk) {           
844                                 sp--;             
845                                 if (!*sp->pptr    
846                                         WARN_O    
847                                         return    
848                                 }                 
849                                 htb_next_rb_no    
850                         }                         
851                 } else {                          
852                         struct htb_class *cl;     
853                         struct htb_prio *clp;     
854                                                   
855                         cl = rb_entry(*sp->ppt    
856                         if (!cl->level)           
857                                 return cl;        
858                         clp = &cl->inner.clpri    
859                         (++sp)->root = clp->fe    
860                         sp->pptr = &clp->ptr;     
861                         sp->pid = &clp->last_p    
862                 }                                 
863         }                                         
864         WARN_ON(1);                               
865         return NULL;                              
866 }                                                 
867                                                   
868 /* dequeues packet at given priority and level    
869  * you are sure that there is active class at     
870  */                                               
871 static struct sk_buff *htb_dequeue_tree(struct    
872                                         const     
873 {                                                 
874         struct sk_buff *skb = NULL;               
875         struct htb_class *cl, *start;             
876         struct htb_level *hlevel = &q->hlevel[    
877         struct htb_prio *hprio = &hlevel->hpri    
878                                                   
879         /* look initial class up in the row */    
880         start = cl = htb_lookup_leaf(hprio, pr    
881                                                   
882         do {                                      
883 next:                                             
884                 if (unlikely(!cl))                
885                         return NULL;              
886                                                   
887                 /* class can be empty - it is     
888                  * qdisc drops packets in enqu    
889                  * graft operation on the leaf    
890                  * simply deactivate and skip     
891                  */                               
892                 if (unlikely(cl->leaf.q->q.qle    
893                         struct htb_class *next    
894                         htb_deactivate(q, cl);    
895                                                   
896                         /* row/level might bec    
897                         if ((q->row_mask[level    
898                                 return NULL;      
899                                                   
900                         next = htb_lookup_leaf    
901                                                   
902                         if (cl == start)          
903                                 start = next;     
904                         cl = next;                
905                         goto next;                
906                 }                                 
907                                                   
908                 skb = cl->leaf.q->dequeue(cl->    
909                 if (likely(skb != NULL))          
910                         break;                    
911                                                   
912                 qdisc_warn_nonwc("htb", cl->le    
913                 htb_next_rb_node(level ? &cl->    
914                                          &q->h    
915                 cl = htb_lookup_leaf(hprio, pr    
916                                                   
917         } while (cl != start);                    
918                                                   
919         if (likely(skb != NULL)) {                
920                 bstats_update(&cl->bstats, skb    
921                 cl->leaf.deficit[level] -= qdi    
922                 if (cl->leaf.deficit[level] <     
923                         cl->leaf.deficit[level    
924                         htb_next_rb_node(level    
925                                                   
926                 }                                 
927                 /* this used to be after charg    
928                  * gives us slightly better pe    
929                  */                               
930                 if (!cl->leaf.q->q.qlen)          
931                         htb_deactivate(q, cl);    
932                 htb_charge_class(q, cl, level,    
933         }                                         
934         return skb;                               
935 }                                                 
936                                                   
937 static struct sk_buff *htb_dequeue(struct Qdis    
938 {                                                 
939         struct sk_buff *skb;                      
940         struct htb_sched *q = qdisc_priv(sch);    
941         int level;                                
942         s64 next_event;                           
943         unsigned long start_at;                   
944                                                   
945         /* try to dequeue direct packets as hi    
946         skb = __qdisc_dequeue_head(&q->direct_    
947         if (skb != NULL) {                        
948 ok:                                               
949                 qdisc_bstats_update(sch, skb);    
950                 qdisc_qstats_backlog_dec(sch,     
951                 sch->q.qlen--;                    
952                 return skb;                       
953         }                                         
954                                                   
955         if (!sch->q.qlen)                         
956                 goto fin;                         
957         q->now = ktime_get_ns();                  
958         start_at = jiffies;                       
959                                                   
960         next_event = q->now + 5LLU * NSEC_PER_    
961                                                   
962         for (level = 0; level < TC_HTB_MAXDEPT    
963                 /* common case optimization -     
964                 int m;                            
965                 s64 event = q->near_ev_cache[l    
966                                                   
967                 if (q->now >= event) {            
968                         event = htb_do_events(    
969                         if (!event)               
970                                 event = q->now    
971                         q->near_ev_cache[level    
972                 }                                 
973                                                   
974                 if (next_event > event)           
975                         next_event = event;       
976                                                   
977                 m = ~q->row_mask[level];          
978                 while (m != (int)(-1)) {          
979                         int prio = ffz(m);        
980                                                   
981                         m |= 1 << prio;           
982                         skb = htb_dequeue_tree    
983                         if (likely(skb != NULL    
984                                 goto ok;          
985                 }                                 
986         }                                         
987         if (likely(next_event > q->now))          
988                 qdisc_watchdog_schedule_ns(&q-    
989         else                                      
990                 schedule_work(&q->work);          
991 fin:                                              
992         return skb;                               
993 }                                                 
994                                                   
995 /* reset all classes */                           
996 /* always caled under BH & queue lock */          
997 static void htb_reset(struct Qdisc *sch)          
998 {                                                 
999         struct htb_sched *q = qdisc_priv(sch);    
1000         struct htb_class *cl;                    
1001         unsigned int i;                          
1002                                                  
1003         for (i = 0; i < q->clhash.hashsize; i    
1004                 hlist_for_each_entry(cl, &q->    
1005                         if (cl->level)           
1006                                 memset(&cl->i    
1007                         else {                   
1008                                 if (cl->leaf.    
1009                                         qdisc    
1010                         }                        
1011                         cl->prio_activity = 0    
1012                         cl->cmode = HTB_CAN_S    
1013                 }                                
1014         }                                        
1015         qdisc_watchdog_cancel(&q->watchdog);     
1016         __qdisc_reset_queue(&q->direct_queue)    
1017         memset(q->hlevel, 0, sizeof(q->hlevel    
1018         memset(q->row_mask, 0, sizeof(q->row_    
1019 }                                                
1020                                                  
1021 static const struct nla_policy htb_policy[TCA    
1022         [TCA_HTB_PARMS] = { .len = sizeof(str    
1023         [TCA_HTB_INIT]  = { .len = sizeof(str    
1024         [TCA_HTB_CTAB]  = { .type = NLA_BINAR    
1025         [TCA_HTB_RTAB]  = { .type = NLA_BINAR    
1026         [TCA_HTB_DIRECT_QLEN] = { .type = NLA    
1027         [TCA_HTB_RATE64] = { .type = NLA_U64     
1028         [TCA_HTB_CEIL64] = { .type = NLA_U64     
1029         [TCA_HTB_OFFLOAD] = { .type = NLA_FLA    
1030 };                                               
1031                                                  
1032 static void htb_work_func(struct work_struct     
1033 {                                                
1034         struct htb_sched *q = container_of(wo    
1035         struct Qdisc *sch = q->watchdog.qdisc    
1036                                                  
1037         rcu_read_lock();                         
1038         __netif_schedule(qdisc_root(sch));       
1039         rcu_read_unlock();                       
1040 }                                                
1041                                                  
1042 static int htb_offload(struct net_device *dev    
1043 {                                                
1044         return dev->netdev_ops->ndo_setup_tc(    
1045 }                                                
1046                                                  
1047 static int htb_init(struct Qdisc *sch, struct    
1048                     struct netlink_ext_ack *e    
1049 {                                                
1050         struct net_device *dev = qdisc_dev(sc    
1051         struct tc_htb_qopt_offload offload_op    
1052         struct htb_sched *q = qdisc_priv(sch)    
1053         struct nlattr *tb[TCA_HTB_MAX + 1];      
1054         struct tc_htb_glob *gopt;                
1055         unsigned int ntx;                        
1056         bool offload;                            
1057         int err;                                 
1058                                                  
1059         qdisc_watchdog_init(&q->watchdog, sch    
1060         INIT_WORK(&q->work, htb_work_func);      
1061                                                  
1062         if (!opt)                                
1063                 return -EINVAL;                  
1064                                                  
1065         err = tcf_block_get(&q->block, &q->fi    
1066         if (err)                                 
1067                 return err;                      
1068                                                  
1069         err = nla_parse_nested_deprecated(tb,    
1070                                           NUL    
1071         if (err < 0)                             
1072                 return err;                      
1073                                                  
1074         if (!tb[TCA_HTB_INIT])                   
1075                 return -EINVAL;                  
1076                                                  
1077         gopt = nla_data(tb[TCA_HTB_INIT]);       
1078         if (gopt->version != HTB_VER >> 16)      
1079                 return -EINVAL;                  
1080                                                  
1081         offload = nla_get_flag(tb[TCA_HTB_OFF    
1082                                                  
1083         if (offload) {                           
1084                 if (sch->parent != TC_H_ROOT)    
1085                         NL_SET_ERR_MSG(extack    
1086                         return -EOPNOTSUPP;      
1087                 }                                
1088                                                  
1089                 if (!tc_can_offload(dev) || !    
1090                         NL_SET_ERR_MSG(extack    
1091                         return -EOPNOTSUPP;      
1092                 }                                
1093                                                  
1094                 q->num_direct_qdiscs = dev->r    
1095                 q->direct_qdiscs = kcalloc(q-    
1096                                            si    
1097                                            GF    
1098                 if (!q->direct_qdiscs)           
1099                         return -ENOMEM;          
1100         }                                        
1101                                                  
1102         err = qdisc_class_hash_init(&q->clhas    
1103         if (err < 0)                             
1104                 return err;                      
1105                                                  
1106         if (tb[TCA_HTB_DIRECT_QLEN])             
1107                 q->direct_qlen = nla_get_u32(    
1108         else                                     
1109                 q->direct_qlen = qdisc_dev(sc    
1110                                                  
1111         if ((q->rate2quantum = gopt->rate2qua    
1112                 q->rate2quantum = 1;             
1113         q->defcls = gopt->defcls;                
1114                                                  
1115         if (!offload)                            
1116                 return 0;                        
1117                                                  
1118         for (ntx = 0; ntx < q->num_direct_qdi    
1119                 struct netdev_queue *dev_queu    
1120                 struct Qdisc *qdisc;             
1121                                                  
1122                 qdisc = qdisc_create_dflt(dev    
1123                                           TC_    
1124                 if (!qdisc) {                    
1125                         return -ENOMEM;          
1126                 }                                
1127                                                  
1128                 q->direct_qdiscs[ntx] = qdisc    
1129                 qdisc->flags |= TCQ_F_ONETXQU    
1130         }                                        
1131                                                  
1132         sch->flags |= TCQ_F_MQROOT;              
1133                                                  
1134         offload_opt = (struct tc_htb_qopt_off    
1135                 .command = TC_HTB_CREATE,        
1136                 .parent_classid = TC_H_MAJ(sc    
1137                 .classid = TC_H_MIN(q->defcls    
1138                 .extack = extack,                
1139         };                                       
1140         err = htb_offload(dev, &offload_opt);    
1141         if (err)                                 
1142                 return err;                      
1143                                                  
1144         /* Defer this assignment, so that htb    
1145          * parts (especially calling ndo_setu    
1146          */                                      
1147         q->offload = true;                       
1148                                                  
1149         return 0;                                
1150 }                                                
1151                                                  
1152 static void htb_attach_offload(struct Qdisc *    
1153 {                                                
1154         struct net_device *dev = qdisc_dev(sc    
1155         struct htb_sched *q = qdisc_priv(sch)    
1156         unsigned int ntx;                        
1157                                                  
1158         for (ntx = 0; ntx < q->num_direct_qdi    
1159                 struct Qdisc *old, *qdisc = q    
1160                                                  
1161                 old = dev_graft_qdisc(qdisc->    
1162                 qdisc_put(old);                  
1163                 qdisc_hash_add(qdisc, false);    
1164         }                                        
1165         for (ntx = q->num_direct_qdiscs; ntx     
1166                 struct netdev_queue *dev_queu    
1167                 struct Qdisc *old = dev_graft    
1168                                                  
1169                 qdisc_put(old);                  
1170         }                                        
1171                                                  
1172         kfree(q->direct_qdiscs);                 
1173         q->direct_qdiscs = NULL;                 
1174 }                                                
1175                                                  
1176 static void htb_attach_software(struct Qdisc     
1177 {                                                
1178         struct net_device *dev = qdisc_dev(sc    
1179         unsigned int ntx;                        
1180                                                  
1181         /* Resemble qdisc_graft behavior. */     
1182         for (ntx = 0; ntx < dev->num_tx_queue    
1183                 struct netdev_queue *dev_queu    
1184                 struct Qdisc *old = dev_graft    
1185                                                  
1186                 qdisc_refcount_inc(sch);         
1187                                                  
1188                 qdisc_put(old);                  
1189         }                                        
1190 }                                                
1191                                                  
1192 static void htb_attach(struct Qdisc *sch)        
1193 {                                                
1194         struct htb_sched *q = qdisc_priv(sch)    
1195                                                  
1196         if (q->offload)                          
1197                 htb_attach_offload(sch);         
1198         else                                     
1199                 htb_attach_software(sch);        
1200 }                                                
1201                                                  
1202 static int htb_dump(struct Qdisc *sch, struct    
1203 {                                                
1204         struct htb_sched *q = qdisc_priv(sch)    
1205         struct nlattr *nest;                     
1206         struct tc_htb_glob gopt;                 
1207                                                  
1208         if (q->offload)                          
1209                 sch->flags |= TCQ_F_OFFLOADED    
1210         else                                     
1211                 sch->flags &= ~TCQ_F_OFFLOADE    
1212                                                  
1213         sch->qstats.overlimits = q->overlimit    
1214         /* Its safe to not acquire qdisc lock    
1215          * no change can happen on the qdisc     
1216          */                                      
1217                                                  
1218         gopt.direct_pkts = q->direct_pkts;       
1219         gopt.version = HTB_VER;                  
1220         gopt.rate2quantum = q->rate2quantum;     
1221         gopt.defcls = q->defcls;                 
1222         gopt.debug = 0;                          
1223                                                  
1224         nest = nla_nest_start_noflag(skb, TCA    
1225         if (nest == NULL)                        
1226                 goto nla_put_failure;            
1227         if (nla_put(skb, TCA_HTB_INIT, sizeof    
1228             nla_put_u32(skb, TCA_HTB_DIRECT_Q    
1229                 goto nla_put_failure;            
1230         if (q->offload && nla_put_flag(skb, T    
1231                 goto nla_put_failure;            
1232                                                  
1233         return nla_nest_end(skb, nest);          
1234                                                  
1235 nla_put_failure:                                 
1236         nla_nest_cancel(skb, nest);              
1237         return -1;                               
1238 }                                                
1239                                                  
1240 static int htb_dump_class(struct Qdisc *sch,     
1241                           struct sk_buff *skb    
1242 {                                                
1243         struct htb_class *cl = (struct htb_cl    
1244         struct htb_sched *q = qdisc_priv(sch)    
1245         struct nlattr *nest;                     
1246         struct tc_htb_opt opt;                   
1247                                                  
1248         /* Its safe to not acquire qdisc lock    
1249          * no change can happen on the class     
1250          */                                      
1251         tcm->tcm_parent = cl->parent ? cl->pa    
1252         tcm->tcm_handle = cl->common.classid;    
1253         if (!cl->level && cl->leaf.q)            
1254                 tcm->tcm_info = cl->leaf.q->h    
1255                                                  
1256         nest = nla_nest_start_noflag(skb, TCA    
1257         if (nest == NULL)                        
1258                 goto nla_put_failure;            
1259                                                  
1260         memset(&opt, 0, sizeof(opt));            
1261                                                  
1262         psched_ratecfg_getrate(&opt.rate, &cl    
1263         opt.buffer = PSCHED_NS2TICKS(cl->buff    
1264         psched_ratecfg_getrate(&opt.ceil, &cl    
1265         opt.cbuffer = PSCHED_NS2TICKS(cl->cbu    
1266         opt.quantum = cl->quantum;               
1267         opt.prio = cl->prio;                     
1268         opt.level = cl->level;                   
1269         if (nla_put(skb, TCA_HTB_PARMS, sizeo    
1270                 goto nla_put_failure;            
1271         if (q->offload && nla_put_flag(skb, T    
1272                 goto nla_put_failure;            
1273         if ((cl->rate.rate_bytes_ps >= (1ULL     
1274             nla_put_u64_64bit(skb, TCA_HTB_RA    
1275                               TCA_HTB_PAD))      
1276                 goto nla_put_failure;            
1277         if ((cl->ceil.rate_bytes_ps >= (1ULL     
1278             nla_put_u64_64bit(skb, TCA_HTB_CE    
1279                               TCA_HTB_PAD))      
1280                 goto nla_put_failure;            
1281                                                  
1282         return nla_nest_end(skb, nest);          
1283                                                  
1284 nla_put_failure:                                 
1285         nla_nest_cancel(skb, nest);              
1286         return -1;                               
1287 }                                                
1288                                                  
1289 static void htb_offload_aggregate_stats(struc    
1290                                         struc    
1291 {                                                
1292         u64 bytes = 0, packets = 0;              
1293         struct htb_class *c;                     
1294         unsigned int i;                          
1295                                                  
1296         gnet_stats_basic_sync_init(&cl->bstat    
1297                                                  
1298         for (i = 0; i < q->clhash.hashsize; i    
1299                 hlist_for_each_entry(c, &q->c    
1300                         struct htb_class *p =    
1301                                                  
1302                         while (p && p->level     
1303                                 p = p->parent    
1304                                                  
1305                         if (p != cl)             
1306                                 continue;        
1307                                                  
1308                         bytes += u64_stats_re    
1309                         packets += u64_stats_    
1310                         if (c->level == 0) {     
1311                                 bytes += u64_    
1312                                 packets += u6    
1313                         }                        
1314                 }                                
1315         }                                        
1316         _bstats_update(&cl->bstats, bytes, pa    
1317 }                                                
1318                                                  
1319 static int                                       
1320 htb_dump_class_stats(struct Qdisc *sch, unsig    
1321 {                                                
1322         struct htb_class *cl = (struct htb_cl    
1323         struct htb_sched *q = qdisc_priv(sch)    
1324         struct gnet_stats_queue qs = {           
1325                 .drops = cl->drops,              
1326                 .overlimits = cl->overlimits,    
1327         };                                       
1328         __u32 qlen = 0;                          
1329                                                  
1330         if (!cl->level && cl->leaf.q)            
1331                 qdisc_qstats_qlen_backlog(cl-    
1332                                                  
1333         cl->xstats.tokens = clamp_t(s64, PSCH    
1334                                     INT_MIN,     
1335         cl->xstats.ctokens = clamp_t(s64, PSC    
1336                                      INT_MIN,    
1337                                                  
1338         if (q->offload) {                        
1339                 if (!cl->level) {                
1340                         if (cl->leaf.q)          
1341                                 cl->bstats =     
1342                         else                     
1343                                 gnet_stats_ba    
1344                         _bstats_update(&cl->b    
1345                                        u64_st    
1346                                        u64_st    
1347                 } else {                         
1348                         htb_offload_aggregate    
1349                 }                                
1350         }                                        
1351                                                  
1352         if (gnet_stats_copy_basic(d, NULL, &c    
1353             gnet_stats_copy_rate_est(d, &cl->    
1354             gnet_stats_copy_queue(d, NULL, &q    
1355                 return -1;                       
1356                                                  
1357         return gnet_stats_copy_app(d, &cl->xs    
1358 }                                                
1359                                                  
1360 static struct netdev_queue *                     
1361 htb_select_queue(struct Qdisc *sch, struct tc    
1362 {                                                
1363         struct net_device *dev = qdisc_dev(sc    
1364         struct tc_htb_qopt_offload offload_op    
1365         struct htb_sched *q = qdisc_priv(sch)    
1366         int err;                                 
1367                                                  
1368         if (!q->offload)                         
1369                 return sch->dev_queue;           
1370                                                  
1371         offload_opt = (struct tc_htb_qopt_off    
1372                 .command = TC_HTB_LEAF_QUERY_    
1373                 .classid = TC_H_MIN(tcm->tcm_    
1374         };                                       
1375         err = htb_offload(dev, &offload_opt);    
1376         if (err || offload_opt.qid >= dev->nu    
1377                 return NULL;                     
1378         return netdev_get_tx_queue(dev, offlo    
1379 }                                                
1380                                                  
1381 static struct Qdisc *                            
1382 htb_graft_helper(struct netdev_queue *dev_que    
1383 {                                                
1384         struct net_device *dev = dev_queue->d    
1385         struct Qdisc *old_q;                     
1386                                                  
1387         if (dev->flags & IFF_UP)                 
1388                 dev_deactivate(dev);             
1389         old_q = dev_graft_qdisc(dev_queue, ne    
1390         if (new_q)                               
1391                 new_q->flags |= TCQ_F_ONETXQU    
1392         if (dev->flags & IFF_UP)                 
1393                 dev_activate(dev);               
1394                                                  
1395         return old_q;                            
1396 }                                                
1397                                                  
1398 static struct netdev_queue *htb_offload_get_q    
1399 {                                                
1400         struct netdev_queue *queue;              
1401                                                  
1402         queue = cl->leaf.offload_queue;          
1403         if (!(cl->leaf.q->flags & TCQ_F_BUILT    
1404                 WARN_ON(cl->leaf.q->dev_queue    
1405                                                  
1406         return queue;                            
1407 }                                                
1408                                                  
1409 static void htb_offload_move_qdisc(struct Qdi    
1410                                    struct htb    
1411 {                                                
1412         struct netdev_queue *queue_old, *queu    
1413         struct net_device *dev = qdisc_dev(sc    
1414                                                  
1415         queue_old = htb_offload_get_queue(cl_    
1416         queue_new = htb_offload_get_queue(cl_    
1417                                                  
1418         if (!destroying) {                       
1419                 struct Qdisc *qdisc;             
1420                                                  
1421                 if (dev->flags & IFF_UP)         
1422                         dev_deactivate(dev);     
1423                 qdisc = dev_graft_qdisc(queue    
1424                 WARN_ON(qdisc != cl_old->leaf    
1425         }                                        
1426                                                  
1427         if (!(cl_old->leaf.q->flags & TCQ_F_B    
1428                 cl_old->leaf.q->dev_queue = q    
1429         cl_old->leaf.offload_queue = queue_ne    
1430                                                  
1431         if (!destroying) {                       
1432                 struct Qdisc *qdisc;             
1433                                                  
1434                 qdisc = dev_graft_qdisc(queue    
1435                 if (dev->flags & IFF_UP)         
1436                         dev_activate(dev);       
1437                 WARN_ON(!(qdisc->flags & TCQ_    
1438         }                                        
1439 }                                                
1440                                                  
1441 static int htb_graft(struct Qdisc *sch, unsig    
1442                      struct Qdisc **old, stru    
1443 {                                                
1444         struct netdev_queue *dev_queue = sch-    
1445         struct htb_class *cl = (struct htb_cl    
1446         struct htb_sched *q = qdisc_priv(sch)    
1447         struct Qdisc *old_q;                     
1448                                                  
1449         if (cl->level)                           
1450                 return -EINVAL;                  
1451                                                  
1452         if (q->offload)                          
1453                 dev_queue = htb_offload_get_q    
1454                                                  
1455         if (!new) {                              
1456                 new = qdisc_create_dflt(dev_q    
1457                                         cl->c    
1458                 if (!new)                        
1459                         return -ENOBUFS;         
1460         }                                        
1461                                                  
1462         if (q->offload) {                        
1463                 /* One ref for cl->leaf.q, th    
1464                 qdisc_refcount_inc(new);         
1465                 old_q = htb_graft_helper(dev_    
1466         }                                        
1467                                                  
1468         *old = qdisc_replace(sch, new, &cl->l    
1469                                                  
1470         if (q->offload) {                        
1471                 WARN_ON(old_q != *old);          
1472                 qdisc_put(old_q);                
1473         }                                        
1474                                                  
1475         return 0;                                
1476 }                                                
1477                                                  
1478 static struct Qdisc *htb_leaf(struct Qdisc *s    
1479 {                                                
1480         struct htb_class *cl = (struct htb_cl    
1481         return !cl->level ? cl->leaf.q : NULL    
1482 }                                                
1483                                                  
1484 static void htb_qlen_notify(struct Qdisc *sch    
1485 {                                                
1486         struct htb_class *cl = (struct htb_cl    
1487                                                  
1488         htb_deactivate(qdisc_priv(sch), cl);     
1489 }                                                
1490                                                  
1491 static inline int htb_parent_last_child(struc    
1492 {                                                
1493         if (!cl->parent)                         
1494                 /* the root class */             
1495                 return 0;                        
1496         if (cl->parent->children > 1)            
1497                 /* not the last child */         
1498                 return 0;                        
1499         return 1;                                
1500 }                                                
1501                                                  
1502 static void htb_parent_to_leaf(struct Qdisc *    
1503                                struct Qdisc *    
1504 {                                                
1505         struct htb_sched *q = qdisc_priv(sch)    
1506         struct htb_class *parent = cl->parent    
1507                                                  
1508         WARN_ON(cl->level || !cl->leaf.q || c    
1509                                                  
1510         if (parent->cmode != HTB_CAN_SEND)       
1511                 htb_safe_rb_erase(&parent->pq    
1512                                   &q->hlevel[    
1513                                                  
1514         parent->level = 0;                       
1515         memset(&parent->inner, 0, sizeof(pare    
1516         parent->leaf.q = new_q ? new_q : &noo    
1517         parent->tokens = parent->buffer;         
1518         parent->ctokens = parent->cbuffer;       
1519         parent->t_c = ktime_get_ns();            
1520         parent->cmode = HTB_CAN_SEND;            
1521         if (q->offload)                          
1522                 parent->leaf.offload_queue =     
1523 }                                                
1524                                                  
1525 static void htb_parent_to_leaf_offload(struct    
1526                                        struct    
1527                                        struct    
1528 {                                                
1529         struct Qdisc *old_q;                     
1530                                                  
1531         /* One ref for cl->leaf.q, the other     
1532         if (new_q)                               
1533                 qdisc_refcount_inc(new_q);       
1534         old_q = htb_graft_helper(dev_queue, n    
1535         WARN_ON(!(old_q->flags & TCQ_F_BUILTI    
1536 }                                                
1537                                                  
1538 static int htb_destroy_class_offload(struct Q    
1539                                      bool las    
1540                                      struct n    
1541 {                                                
1542         struct tc_htb_qopt_offload offload_op    
1543         struct netdev_queue *dev_queue;          
1544         struct Qdisc *q = cl->leaf.q;            
1545         struct Qdisc *old;                       
1546         int err;                                 
1547                                                  
1548         if (cl->level)                           
1549                 return -EINVAL;                  
1550                                                  
1551         WARN_ON(!q);                             
1552         dev_queue = htb_offload_get_queue(cl)    
1553         /* When destroying, caller qdisc_graf    
1554          * qdisc_put for the qdisc being dest    
1555          * does not need to graft or qdisc_pu    
1556          */                                      
1557         if (!destroying) {                       
1558                 old = htb_graft_helper(dev_qu    
1559                 /* Last qdisc grafted should     
1560                  * calling htb_delete.           
1561                  */                              
1562                 WARN_ON(old != q);               
1563         }                                        
1564                                                  
1565         if (cl->parent) {                        
1566                 _bstats_update(&cl->parent->b    
1567                                u64_stats_read    
1568                                u64_stats_read    
1569         }                                        
1570                                                  
1571         offload_opt = (struct tc_htb_qopt_off    
1572                 .command = !last_child ? TC_H    
1573                            destroying ? TC_HT    
1574                            TC_HTB_LEAF_DEL_LA    
1575                 .classid = cl->common.classid    
1576                 .extack = extack,                
1577         };                                       
1578         err = htb_offload(qdisc_dev(sch), &of    
1579                                                  
1580         if (!destroying) {                       
1581                 if (!err)                        
1582                         qdisc_put(old);          
1583                 else                             
1584                         htb_graft_helper(dev_    
1585         }                                        
1586                                                  
1587         if (last_child)                          
1588                 return err;                      
1589                                                  
1590         if (!err && offload_opt.classid != TC    
1591                 u32 classid = TC_H_MAJ(sch->h    
1592                               TC_H_MIN(offloa    
1593                 struct htb_class *moved_cl =     
1594                                                  
1595                 htb_offload_move_qdisc(sch, m    
1596         }                                        
1597                                                  
1598         return err;                              
1599 }                                                
1600                                                  
1601 static void htb_destroy_class(struct Qdisc *s    
1602 {                                                
1603         if (!cl->level) {                        
1604                 WARN_ON(!cl->leaf.q);            
1605                 qdisc_put(cl->leaf.q);           
1606         }                                        
1607         gen_kill_estimator(&cl->rate_est);       
1608         tcf_block_put(cl->block);                
1609         kfree(cl);                               
1610 }                                                
1611                                                  
1612 static void htb_destroy(struct Qdisc *sch)       
1613 {                                                
1614         struct net_device *dev = qdisc_dev(sc    
1615         struct tc_htb_qopt_offload offload_op    
1616         struct htb_sched *q = qdisc_priv(sch)    
1617         struct hlist_node *next;                 
1618         bool nonempty, changed;                  
1619         struct htb_class *cl;                    
1620         unsigned int i;                          
1621                                                  
1622         cancel_work_sync(&q->work);              
1623         qdisc_watchdog_cancel(&q->watchdog);     
1624         /* This line used to be after htb_des    
1625          * and surprisingly it worked in 2.4.    
1626          * because filter need its target cla    
1627          * unbind_filter on it (without Oops)    
1628          */                                      
1629         tcf_block_put(q->block);                 
1630                                                  
1631         for (i = 0; i < q->clhash.hashsize; i    
1632                 hlist_for_each_entry(cl, &q->    
1633                         tcf_block_put(cl->blo    
1634                         cl->block = NULL;        
1635                 }                                
1636         }                                        
1637                                                  
1638         do {                                     
1639                 nonempty = false;                
1640                 changed = false;                 
1641                 for (i = 0; i < q->clhash.has    
1642                         hlist_for_each_entry_    
1643                                                  
1644                                 bool last_chi    
1645                                                  
1646                                 if (!q->offlo    
1647                                         htb_d    
1648                                         conti    
1649                                 }                
1650                                                  
1651                                 nonempty = tr    
1652                                                  
1653                                 if (cl->level    
1654                                         conti    
1655                                                  
1656                                 changed = tru    
1657                                                  
1658                                 last_child =     
1659                                 htb_destroy_c    
1660                                                  
1661                                 qdisc_class_h    
1662                                                  
1663                                 if (cl->paren    
1664                                         cl->p    
1665                                 if (last_chil    
1666                                         htb_p    
1667                                 htb_destroy_c    
1668                         }                        
1669                 }                                
1670         } while (changed);                       
1671         WARN_ON(nonempty);                       
1672                                                  
1673         qdisc_class_hash_destroy(&q->clhash);    
1674         __qdisc_reset_queue(&q->direct_queue)    
1675                                                  
1676         if (q->offload) {                        
1677                 offload_opt = (struct tc_htb_    
1678                         .command = TC_HTB_DES    
1679                 };                               
1680                 htb_offload(dev, &offload_opt    
1681         }                                        
1682                                                  
1683         if (!q->direct_qdiscs)                   
1684                 return;                          
1685         for (i = 0; i < q->num_direct_qdiscs     
1686                 qdisc_put(q->direct_qdiscs[i]    
1687         kfree(q->direct_qdiscs);                 
1688 }                                                
1689                                                  
1690 static int htb_delete(struct Qdisc *sch, unsi    
1691                       struct netlink_ext_ack     
1692 {                                                
1693         struct htb_sched *q = qdisc_priv(sch)    
1694         struct htb_class *cl = (struct htb_cl    
1695         struct Qdisc *new_q = NULL;              
1696         int last_child = 0;                      
1697         int err;                                 
1698                                                  
1699         /* TODO: why don't allow to delete su    
1700          * tc subsys guarantee us that in htb    
1701          * refs so that we can remove childre    
1702          */                                      
1703         if (cl->children || qdisc_class_in_us    
1704                 NL_SET_ERR_MSG(extack, "HTB c    
1705                 return -EBUSY;                   
1706         }                                        
1707                                                  
1708         if (!cl->level && htb_parent_last_chi    
1709                 last_child = 1;                  
1710                                                  
1711         if (q->offload) {                        
1712                 err = htb_destroy_class_offlo    
1713                                                  
1714                 if (err)                         
1715                         return err;              
1716         }                                        
1717                                                  
1718         if (last_child) {                        
1719                 struct netdev_queue *dev_queu    
1720                                                  
1721                 if (q->offload)                  
1722                         dev_queue = htb_offlo    
1723                                                  
1724                 new_q = qdisc_create_dflt(dev    
1725                                           cl-    
1726                                           NUL    
1727                 if (q->offload)                  
1728                         htb_parent_to_leaf_of    
1729         }                                        
1730                                                  
1731         sch_tree_lock(sch);                      
1732                                                  
1733         if (!cl->level)                          
1734                 qdisc_purge_queue(cl->leaf.q)    
1735                                                  
1736         /* delete from hash and active; remai    
1737         qdisc_class_hash_remove(&q->clhash, &    
1738         if (cl->parent)                          
1739                 cl->parent->children--;          
1740                                                  
1741         if (cl->prio_activity)                   
1742                 htb_deactivate(q, cl);           
1743                                                  
1744         if (cl->cmode != HTB_CAN_SEND)           
1745                 htb_safe_rb_erase(&cl->pq_nod    
1746                                   &q->hlevel[    
1747                                                  
1748         if (last_child)                          
1749                 htb_parent_to_leaf(sch, cl, n    
1750                                                  
1751         sch_tree_unlock(sch);                    
1752                                                  
1753         htb_destroy_class(sch, cl);              
1754         return 0;                                
1755 }                                                
1756                                                  
1757 static int htb_change_class(struct Qdisc *sch    
1758                             u32 parentid, str    
1759                             unsigned long *ar    
1760 {                                                
1761         int err = -EINVAL;                       
1762         struct htb_sched *q = qdisc_priv(sch)    
1763         struct htb_class *cl = (struct htb_cl    
1764         struct tc_htb_qopt_offload offload_op    
1765         struct nlattr *opt = tca[TCA_OPTIONS]    
1766         struct nlattr *tb[TCA_HTB_MAX + 1];      
1767         struct Qdisc *parent_qdisc = NULL;       
1768         struct netdev_queue *dev_queue;          
1769         struct tc_htb_opt *hopt;                 
1770         u64 rate64, ceil64;                      
1771         int warn = 0;                            
1772                                                  
1773         /* extract all subattrs from opt attr    
1774         if (!opt)                                
1775                 goto failure;                    
1776                                                  
1777         err = nla_parse_nested_deprecated(tb,    
1778                                           ext    
1779         if (err < 0)                             
1780                 goto failure;                    
1781                                                  
1782         err = -EINVAL;                           
1783         if (tb[TCA_HTB_PARMS] == NULL)           
1784                 goto failure;                    
1785                                                  
1786         parent = parentid == TC_H_ROOT ? NULL    
1787                                                  
1788         hopt = nla_data(tb[TCA_HTB_PARMS]);      
1789         if (!hopt->rate.rate || !hopt->ceil.r    
1790                 goto failure;                    
1791                                                  
1792         if (q->offload) {                        
1793                 /* Options not supported by t    
1794                 if (hopt->rate.overhead || ho    
1795                         NL_SET_ERR_MSG(extack    
1796                         goto failure;            
1797                 }                                
1798                 if (hopt->rate.mpu || hopt->c    
1799                         NL_SET_ERR_MSG(extack    
1800                         goto failure;            
1801                 }                                
1802         }                                        
1803                                                  
1804         /* Keeping backward compatible with r    
1805         if (hopt->rate.linklayer == TC_LINKLA    
1806                 qdisc_put_rtab(qdisc_get_rtab    
1807                                                  
1808                                                  
1809         if (hopt->ceil.linklayer == TC_LINKLA    
1810                 qdisc_put_rtab(qdisc_get_rtab    
1811                                                  
1812                                                  
1813         rate64 = tb[TCA_HTB_RATE64] ? nla_get    
1814         ceil64 = tb[TCA_HTB_CEIL64] ? nla_get    
1815                                                  
1816         if (!cl) {              /* new class     
1817                 struct net_device *dev = qdis    
1818                 struct Qdisc *new_q, *old_q;     
1819                 int prio;                        
1820                 struct {                         
1821                         struct nlattr            
1822                         struct gnet_estimator    
1823                 } est = {                        
1824                         .nla = {                 
1825                                 .nla_len         
1826                                 .nla_type        
1827                         },                       
1828                         .opt = {                 
1829                                 /* 4s interva    
1830                                 .interval        
1831                                 .ewma_log        
1832                         },                       
1833                 };                               
1834                                                  
1835                 /* check for valid classid */    
1836                 if (!classid || TC_H_MAJ(clas    
1837                     htb_find(classid, sch))      
1838                         goto failure;            
1839                                                  
1840                 /* check maximal depth */        
1841                 if (parent && parent->parent     
1842                         NL_SET_ERR_MSG_MOD(ex    
1843                         goto failure;            
1844                 }                                
1845                 err = -ENOBUFS;                  
1846                 cl = kzalloc(sizeof(*cl), GFP    
1847                 if (!cl)                         
1848                         goto failure;            
1849                                                  
1850                 gnet_stats_basic_sync_init(&c    
1851                 gnet_stats_basic_sync_init(&c    
1852                                                  
1853                 err = tcf_block_get(&cl->bloc    
1854                 if (err) {                       
1855                         kfree(cl);               
1856                         goto failure;            
1857                 }                                
1858                 if (htb_rate_est || tca[TCA_R    
1859                         err = gen_new_estimat    
1860                                                  
1861                                                  
1862                                                  
1863                                                  
1864                         if (err)                 
1865                                 goto err_bloc    
1866                 }                                
1867                                                  
1868                 cl->children = 0;                
1869                 RB_CLEAR_NODE(&cl->pq_node);     
1870                                                  
1871                 for (prio = 0; prio < TC_HTB_    
1872                         RB_CLEAR_NODE(&cl->no    
1873                                                  
1874                 cl->common.classid = classid;    
1875                                                  
1876                 /* Make sure nothing interrup    
1877                  * ndo_setup_tc calls.           
1878                  */                              
1879                 ASSERT_RTNL();                   
1880                                                  
1881                 /* create leaf qdisc early be    
1882                  * so that can't be used insi    
1883                  * -- thanks to Karlis Peisen    
1884                  */                              
1885                 if (!q->offload) {               
1886                         dev_queue = sch->dev_    
1887                 } else if (!(parent && !paren    
1888                         /* Assign a dev_queue    
1889                         offload_opt = (struct    
1890                                 .command = TC    
1891                                 .classid = cl    
1892                                 .parent_class    
1893                                         TC_H_    
1894                                         TC_HT    
1895                                 .rate = max_t    
1896                                 .ceil = max_t    
1897                                 .prio = hopt-    
1898                                 .quantum = ho    
1899                                 .extack = ext    
1900                         };                       
1901                         err = htb_offload(dev    
1902                         if (err) {               
1903                                 NL_SET_ERR_MS    
1904                                                  
1905                                 goto err_kill    
1906                         }                        
1907                         dev_queue = netdev_ge    
1908                 } else { /* First child. */      
1909                         dev_queue = htb_offlo    
1910                         old_q = htb_graft_hel    
1911                         WARN_ON(old_q != pare    
1912                         offload_opt = (struct    
1913                                 .command = TC    
1914                                 .classid = cl    
1915                                 .parent_class    
1916                                         TC_H_    
1917                                 .rate = max_t    
1918                                 .ceil = max_t    
1919                                 .prio = hopt-    
1920                                 .quantum = ho    
1921                                 .extack = ext    
1922                         };                       
1923                         err = htb_offload(dev    
1924                         if (err) {               
1925                                 NL_SET_ERR_MS    
1926                                                  
1927                                 htb_graft_hel    
1928                                 goto err_kill    
1929                         }                        
1930                         _bstats_update(&paren    
1931                                        u64_st    
1932                                        u64_st    
1933                         qdisc_put(old_q);        
1934                 }                                
1935                 new_q = qdisc_create_dflt(dev    
1936                                           cla    
1937                 if (q->offload) {                
1938                         /* One ref for cl->le    
1939                         if (new_q)               
1940                                 qdisc_refcoun    
1941                         old_q = htb_graft_hel    
1942                         /* No qdisc_put neede    
1943                         WARN_ON(!(old_q->flag    
1944                 }                                
1945                 sch_tree_lock(sch);              
1946                 if (parent && !parent->level)    
1947                         /* turn parent into i    
1948                         qdisc_purge_queue(par    
1949                         parent_qdisc = parent    
1950                         if (parent->prio_acti    
1951                                 htb_deactivat    
1952                                                  
1953                         /* remove from evt li    
1954                         if (parent->cmode !=     
1955                                 htb_safe_rb_e    
1956                                 parent->cmode    
1957                         }                        
1958                         parent->level = (pare    
1959                                          : TC    
1960                         memset(&parent->inner    
1961                 }                                
1962                                                  
1963                 /* leaf (we) needs elementary    
1964                 cl->leaf.q = new_q ? new_q :     
1965                 if (q->offload)                  
1966                         cl->leaf.offload_queu    
1967                                                  
1968                 cl->parent = parent;             
1969                                                  
1970                 /* set class to be in HTB_CAN    
1971                 cl->tokens = PSCHED_TICKS2NS(    
1972                 cl->ctokens = PSCHED_TICKS2NS    
1973                 cl->mbuffer = 60ULL * NSEC_PE    
1974                 cl->t_c = ktime_get_ns();        
1975                 cl->cmode = HTB_CAN_SEND;        
1976                                                  
1977                 /* attach to the hash list an    
1978                 qdisc_class_hash_insert(&q->c    
1979                 if (parent)                      
1980                         parent->children++;      
1981                 if (cl->leaf.q != &noop_qdisc    
1982                         qdisc_hash_add(cl->le    
1983         } else {                                 
1984                 if (tca[TCA_RATE]) {             
1985                         err = gen_replace_est    
1986                                                  
1987                                                  
1988                                                  
1989                                                  
1990                         if (err)                 
1991                                 return err;      
1992                 }                                
1993                                                  
1994                 if (q->offload) {                
1995                         struct net_device *de    
1996                                                  
1997                         offload_opt = (struct    
1998                                 .command = TC    
1999                                 .classid = cl    
2000                                 .rate = max_t    
2001                                 .ceil = max_t    
2002                                 .prio = hopt-    
2003                                 .quantum = ho    
2004                                 .extack = ext    
2005                         };                       
2006                         err = htb_offload(dev    
2007                         if (err)                 
2008                                 /* Estimator     
2009                                  * as well, s    
2010                                  * the estima    
2011                                  * offload an    
2012                                  * only when     
2013                                  */              
2014                                 return err;      
2015                 }                                
2016                                                  
2017                 sch_tree_lock(sch);              
2018         }                                        
2019                                                  
2020         psched_ratecfg_precompute(&cl->rate,     
2021         psched_ratecfg_precompute(&cl->ceil,     
2022                                                  
2023         /* it used to be a nasty bug here, we    
2024          * is really leaf before changing cl-    
2025          */                                      
2026         if (!cl->level) {                        
2027                 u64 quantum = cl->rate.rate_b    
2028                                                  
2029                 do_div(quantum, q->rate2quant    
2030                 cl->quantum = min_t(u64, quan    
2031                                                  
2032                 if (!hopt->quantum && cl->qua    
2033                         warn = -1;               
2034                         cl->quantum = 1000;      
2035                 }                                
2036                 if (!hopt->quantum && cl->qua    
2037                         warn = 1;                
2038                         cl->quantum = 200000;    
2039                 }                                
2040                 if (hopt->quantum)               
2041                         cl->quantum = hopt->q    
2042                 if ((cl->prio = hopt->prio) >    
2043                         cl->prio = TC_HTB_NUM    
2044         }                                        
2045                                                  
2046         cl->buffer = PSCHED_TICKS2NS(hopt->bu    
2047         cl->cbuffer = PSCHED_TICKS2NS(hopt->c    
2048                                                  
2049         sch_tree_unlock(sch);                    
2050         qdisc_put(parent_qdisc);                 
2051                                                  
2052         if (warn)                                
2053                 NL_SET_ERR_MSG_FMT_MOD(extack    
2054                                        "quant    
2055                                        cl->co    
2056                                                  
2057         qdisc_class_hash_grow(sch, &q->clhash    
2058                                                  
2059         *arg = (unsigned long)cl;                
2060         return 0;                                
2061                                                  
2062 err_kill_estimator:                              
2063         gen_kill_estimator(&cl->rate_est);       
2064 err_block_put:                                   
2065         tcf_block_put(cl->block);                
2066         kfree(cl);                               
2067 failure:                                         
2068         return err;                              
2069 }                                                
2070                                                  
2071 static struct tcf_block *htb_tcf_block(struct    
2072                                        struct    
2073 {                                                
2074         struct htb_sched *q = qdisc_priv(sch)    
2075         struct htb_class *cl = (struct htb_cl    
2076                                                  
2077         return cl ? cl->block : q->block;        
2078 }                                                
2079                                                  
2080 static unsigned long htb_bind_filter(struct Q    
2081                                      u32 clas    
2082 {                                                
2083         struct htb_class *cl = htb_find(class    
2084                                                  
2085         /*if (cl && !cl->level) return 0;        
2086          * The line above used to be there to    
2087          * leaves. But at least tc_index filt    
2088          * for other reasons so that we have     
2089          * ----                                  
2090          * 19.6.2002 As Werner explained it i    
2091          * another way to "lock" the class -     
2092          * be broken by class during destroy     
2093          */                                      
2094         if (cl)                                  
2095                 qdisc_class_get(&cl->common);    
2096         return (unsigned long)cl;                
2097 }                                                
2098                                                  
2099 static void htb_unbind_filter(struct Qdisc *s    
2100 {                                                
2101         struct htb_class *cl = (struct htb_cl    
2102                                                  
2103         qdisc_class_put(&cl->common);            
2104 }                                                
2105                                                  
2106 static void htb_walk(struct Qdisc *sch, struc    
2107 {                                                
2108         struct htb_sched *q = qdisc_priv(sch)    
2109         struct htb_class *cl;                    
2110         unsigned int i;                          
2111                                                  
2112         if (arg->stop)                           
2113                 return;                          
2114                                                  
2115         for (i = 0; i < q->clhash.hashsize; i    
2116                 hlist_for_each_entry(cl, &q->    
2117                         if (!tc_qdisc_stats_d    
2118                                 return;          
2119                 }                                
2120         }                                        
2121 }                                                
2122                                                  
2123 static const struct Qdisc_class_ops htb_class    
2124         .select_queue   =       htb_select_qu    
2125         .graft          =       htb_graft,       
2126         .leaf           =       htb_leaf,        
2127         .qlen_notify    =       htb_qlen_noti    
2128         .find           =       htb_search,      
2129         .change         =       htb_change_cl    
2130         .delete         =       htb_delete,      
2131         .walk           =       htb_walk,        
2132         .tcf_block      =       htb_tcf_block    
2133         .bind_tcf       =       htb_bind_filt    
2134         .unbind_tcf     =       htb_unbind_fi    
2135         .dump           =       htb_dump_clas    
2136         .dump_stats     =       htb_dump_clas    
2137 };                                               
2138                                                  
2139 static struct Qdisc_ops htb_qdisc_ops __read_    
2140         .cl_ops         =       &htb_class_op    
2141         .id             =       "htb",           
2142         .priv_size      =       sizeof(struct    
2143         .enqueue        =       htb_enqueue,     
2144         .dequeue        =       htb_dequeue,     
2145         .peek           =       qdisc_peek_de    
2146         .init           =       htb_init,        
2147         .attach         =       htb_attach,      
2148         .reset          =       htb_reset,       
2149         .destroy        =       htb_destroy,     
2150         .dump           =       htb_dump,        
2151         .owner          =       THIS_MODULE,     
2152 };                                               
2153 MODULE_ALIAS_NET_SCH("htb");                     
2154                                                  
2155 static int __init htb_module_init(void)          
2156 {                                                
2157         return register_qdisc(&htb_qdisc_ops)    
2158 }                                                
2159 static void __exit htb_module_exit(void)         
2160 {                                                
2161         unregister_qdisc(&htb_qdisc_ops);        
2162 }                                                
2163                                                  
2164 module_init(htb_module_init)                     
2165 module_exit(htb_module_exit)                     
2166 MODULE_LICENSE("GPL");                           
2167 MODULE_DESCRIPTION("Hierarchical Token Bucket    
2168                                                  

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php