~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sched/sch_htb.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /net/sched/sch_htb.c (Version linux-6.11.5) and /net/sched/sch_htb.c (Version linux-2.6.32.71)


  1 // SPDX-License-Identifier: GPL-2.0-or-later   << 
  2 /*                                                  1 /*
  3  * net/sched/sch_htb.c  Hierarchical token buc      2  * net/sched/sch_htb.c  Hierarchical token bucket, feed tree version
  4  *                                                  3  *
                                                   >>   4  *              This program is free software; you can redistribute it and/or
                                                   >>   5  *              modify it under the terms of the GNU General Public License
                                                   >>   6  *              as published by the Free Software Foundation; either version
                                                   >>   7  *              2 of the License, or (at your option) any later version.
                                                   >>   8  *
  5  * Authors:     Martin Devera, <devik@cdi.cz>       9  * Authors:     Martin Devera, <devik@cdi.cz>
  6  *                                                 10  *
  7  * Credits (in time order) for older HTB versi     11  * Credits (in time order) for older HTB versions:
  8  *              Stef Coene <stef.coene@docum.o     12  *              Stef Coene <stef.coene@docum.org>
  9  *                      HTB support at LARTC m     13  *                      HTB support at LARTC mailing list
 10  *              Ondrej Kraus, <krauso@barr.cz>     14  *              Ondrej Kraus, <krauso@barr.cz>
 11  *                      found missing INIT_QDI     15  *                      found missing INIT_QDISC(htb)
 12  *              Vladimir Smelhaus, Aamer Akhte     16  *              Vladimir Smelhaus, Aamer Akhter, Bert Hubert
 13  *                      helped a lot to locate     17  *                      helped a lot to locate nasty class stall bug
 14  *              Andi Kleen, Jamal Hadi, Bert H     18  *              Andi Kleen, Jamal Hadi, Bert Hubert
 15  *                      code review and helpfu     19  *                      code review and helpful comments on shaping
 16  *              Tomasz Wrona, <tw@eter.tym.pl>     20  *              Tomasz Wrona, <tw@eter.tym.pl>
 17  *                      created test case so t     21  *                      created test case so that I was able to fix nasty bug
 18  *              Wilfried Weissmann                 22  *              Wilfried Weissmann
 19  *                      spotted bug in dequeue     23  *                      spotted bug in dequeue code and helped with fix
 20  *              Jiri Fojtasek                      24  *              Jiri Fojtasek
 21  *                      fixed requeue routine      25  *                      fixed requeue routine
 22  *              and many others. thanks.           26  *              and many others. thanks.
 23  */                                                27  */
 24 #include <linux/module.h>                          28 #include <linux/module.h>
 25 #include <linux/moduleparam.h>                     29 #include <linux/moduleparam.h>
 26 #include <linux/types.h>                           30 #include <linux/types.h>
 27 #include <linux/kernel.h>                          31 #include <linux/kernel.h>
 28 #include <linux/string.h>                          32 #include <linux/string.h>
 29 #include <linux/errno.h>                           33 #include <linux/errno.h>
 30 #include <linux/skbuff.h>                          34 #include <linux/skbuff.h>
 31 #include <linux/list.h>                            35 #include <linux/list.h>
 32 #include <linux/compiler.h>                        36 #include <linux/compiler.h>
 33 #include <linux/rbtree.h>                          37 #include <linux/rbtree.h>
 34 #include <linux/workqueue.h>                       38 #include <linux/workqueue.h>
 35 #include <linux/slab.h>                        << 
 36 #include <net/netlink.h>                           39 #include <net/netlink.h>
 37 #include <net/sch_generic.h>                   << 
 38 #include <net/pkt_sched.h>                         40 #include <net/pkt_sched.h>
 39 #include <net/pkt_cls.h>                       << 
 40                                                    41 
 41 /* HTB algorithm.                                  42 /* HTB algorithm.
 42     Author: devik@cdi.cz                           43     Author: devik@cdi.cz
 43     ==========================================     44     ========================================================================
 44     HTB is like TBF with multiple classes. It      45     HTB is like TBF with multiple classes. It is also similar to CBQ because
 45     it allows to assign priority to each class     46     it allows to assign priority to each class in hierarchy.
 46     In fact it is another implementation of Fl     47     In fact it is another implementation of Floyd's formal sharing.
 47                                                    48 
 48     Levels:                                        49     Levels:
 49     Each class is assigned level. Leaf has ALW     50     Each class is assigned level. Leaf has ALWAYS level 0 and root
 50     classes have level TC_HTB_MAXDEPTH-1. Inte     51     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
 51     one less than their parent.                    52     one less than their parent.
 52 */                                                 53 */
 53                                                    54 
 54 static int htb_hysteresis __read_mostly = 0; /     55 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
 55 #define HTB_VER 0x30011         /* major must  !!  56 #define HTB_VER 0x30011         /* major must be matched with number suplied by TC as version */
 56                                                    57 
 57 #if HTB_VER >> 16 != TC_HTB_PROTOVER               58 #if HTB_VER >> 16 != TC_HTB_PROTOVER
 58 #error "Mismatched sch_htb.c and pkt_sch.h"        59 #error "Mismatched sch_htb.c and pkt_sch.h"
 59 #endif                                             60 #endif
 60                                                    61 
 61 /* Module parameter and sysfs export */            62 /* Module parameter and sysfs export */
 62 module_param    (htb_hysteresis, int, 0640);       63 module_param    (htb_hysteresis, int, 0640);
 63 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis m     64 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
 64                                                    65 
 65 static int htb_rate_est = 0; /* htb classes ha << 
 66 module_param(htb_rate_est, int, 0640);         << 
 67 MODULE_PARM_DESC(htb_rate_est, "setup a defaul << 
 68                                                << 
 69 /* used internaly to keep status of single cla     66 /* used internaly to keep status of single class */
 70 enum htb_cmode {                                   67 enum htb_cmode {
 71         HTB_CANT_SEND,          /* class can't     68         HTB_CANT_SEND,          /* class can't send and can't borrow */
 72         HTB_MAY_BORROW,         /* class can't     69         HTB_MAY_BORROW,         /* class can't send but may borrow */
 73         HTB_CAN_SEND            /* class can s     70         HTB_CAN_SEND            /* class can send */
 74 };                                                 71 };
 75                                                    72 
 76 struct htb_prio {                              !!  73 /* interior & leaf nodes; props specific to leaves are marked L: */
 77         union {                                << 
 78                 struct rb_root  row;           << 
 79                 struct rb_root  feed;          << 
 80         };                                     << 
 81         struct rb_node  *ptr;                  << 
 82         /* When class changes from state 1->2  << 
 83          * parent's feed then we lost ptr valu << 
 84          * first child again. Here we store cl << 
 85          * last valid ptr (used when ptr is NU << 
 86          */                                    << 
 87         u32             last_ptr_id;           << 
 88 };                                             << 
 89                                                << 
 90 /* interior & leaf nodes; props specific to le << 
 91  * To reduce false sharing, place mostly read  << 
 92  * and mostly written ones at the end.         << 
 93  */                                            << 
 94 struct htb_class {                                 74 struct htb_class {
 95         struct Qdisc_class_common common;          75         struct Qdisc_class_common common;
 96         struct psched_ratecfg   rate;          !!  76         /* general class parameters */
 97         struct psched_ratecfg   ceil;          !!  77         struct gnet_stats_basic_packed bstats;
 98         s64                     buffer, cbuffe !!  78         struct gnet_stats_queue qstats;
 99         s64                     mbuffer;       !!  79         struct gnet_stats_rate_est rate_est;
100         u32                     prio;          !!  80         struct tc_htb_xstats xstats;    /* our special stats */
101         int                     quantum;       !!  81         int refcnt;             /* usage count of this class */
102                                                !!  82 
103         struct tcf_proto __rcu  *filter_list;  !!  83         /* topology */
104         struct tcf_block        *block;        !!  84         int level;              /* our level (see above) */
105                                                !!  85         unsigned int children;
106         int                     level;         !!  86         struct htb_class *parent;       /* parent class */
107         unsigned int            children;      << 
108         struct htb_class        *parent;       << 
109                                                << 
110         struct net_rate_estimator __rcu *rate_ << 
111                                                << 
112         /*                                     << 
113          * Written often fields                << 
114          */                                    << 
115         struct gnet_stats_basic_sync bstats;   << 
116         struct gnet_stats_basic_sync bstats_bi << 
117         struct tc_htb_xstats    xstats; /* our << 
118                                                    87 
119         /* token bucket parameters */          !!  88         u32 prio;               /* these two are used only by leaves... */
120         s64                     tokens, ctoken !!  89         int quantum;            /* but stored for parent-to-leaf return */
121         s64                     t_c;           << 
122                                                    90 
123         union {                                    91         union {
124                 struct htb_class_leaf {            92                 struct htb_class_leaf {
125                         int             defici !!  93                         struct Qdisc *q;
126                         struct Qdisc    *q;    !!  94                         int deficit[TC_HTB_MAXDEPTH];
127                         struct netdev_queue *o !!  95                         struct list_head drop_list;
128                 } leaf;                            96                 } leaf;
129                 struct htb_class_inner {           97                 struct htb_class_inner {
130                         struct htb_prio clprio !!  98                         struct rb_root feed[TC_HTB_NUMPRIO];    /* feed trees */
                                                   >>  99                         struct rb_node *ptr[TC_HTB_NUMPRIO];    /* current class ptr */
                                                   >> 100                         /* When class changes from state 1->2 and disconnects from
                                                   >> 101                            parent's feed then we lost ptr value and start from the
                                                   >> 102                            first child again. Here we store classid of the
                                                   >> 103                            last valid ptr (used when ptr is NULL). */
                                                   >> 104                         u32 last_ptr_id[TC_HTB_NUMPRIO];
131                 } inner;                          105                 } inner;
132         };                                     !! 106         } un;
133         s64                     pq_key;        !! 107         struct rb_node node[TC_HTB_NUMPRIO];    /* node for self or feed tree */
                                                   >> 108         struct rb_node pq_node; /* node for event queue */
                                                   >> 109         psched_time_t pq_key;
                                                   >> 110 
                                                   >> 111         int prio_activity;      /* for which prios are we active */
                                                   >> 112         enum htb_cmode cmode;   /* current mode of the class */
                                                   >> 113 
                                                   >> 114         /* class attached filters */
                                                   >> 115         struct tcf_proto *filter_list;
                                                   >> 116         int filter_cnt;
134                                                   117 
135         int                     prio_activity; !! 118         /* token bucket parameters */
136         enum htb_cmode          cmode;         !! 119         struct qdisc_rate_table *rate;  /* rate table of the class itself */
137         struct rb_node          pq_node;       !! 120         struct qdisc_rate_table *ceil;  /* ceiling rate (limits borrows too) */
138         struct rb_node          node[TC_HTB_NU !! 121         long buffer, cbuffer;   /* token bucket depth/rate */
139                                                !! 122         psched_tdiff_t mbuffer; /* max wait time */
140         unsigned int drops ____cacheline_align !! 123         long tokens, ctokens;   /* current number of tokens */
141         unsigned int            overlimits;    !! 124         psched_time_t t_c;      /* checkpoint time */
142 };                                             << 
143                                                << 
144 struct htb_level {                             << 
145         struct rb_root  wait_pq;               << 
146         struct htb_prio hprio[TC_HTB_NUMPRIO]; << 
147 };                                                125 };
148                                                   126 
149 struct htb_sched {                                127 struct htb_sched {
150         struct Qdisc_class_hash clhash;           128         struct Qdisc_class_hash clhash;
151         int                     defcls;        !! 129         struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
152         int                     rate2quantum;  << 
153                                                << 
154         /* filters for qdisc itself */         << 
155         struct tcf_proto __rcu  *filter_list;  << 
156         struct tcf_block        *block;        << 
157                                                   130 
158 #define HTB_WARN_TOOMANYEVENTS  0x1            !! 131         /* self list - roots of self generating tree */
159         unsigned int            warned; /* onl !! 132         struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
160         int                     direct_qlen;   !! 133         int row_mask[TC_HTB_MAXDEPTH];
161         struct work_struct      work;          !! 134         struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
                                                   >> 135         u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
162                                                   136 
163         /* non shaped skbs; let them go direct !! 137         /* self wait list - roots of wait PQs per row */
164         struct qdisc_skb_head   direct_queue;  !! 138         struct rb_root wait_pq[TC_HTB_MAXDEPTH];
165         u32                     direct_pkts;   << 
166         u32                     overlimits;    << 
167                                                   139 
168         struct qdisc_watchdog   watchdog;      !! 140         /* time of nearest event per level (row) */
                                                   >> 141         psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
169                                                   142 
170         s64                     now;    /* cac !! 143         int defcls;             /* class where unclassified flows go to */
171                                                   144 
172         /* time of nearest event per level (ro !! 145         /* filters for qdisc itself */
173         s64                     near_ev_cache[ !! 146         struct tcf_proto *filter_list;
174                                                   147 
175         int                     row_mask[TC_HT !! 148         int rate2quantum;       /* quant = rate / rate2quantum */
                                                   >> 149         psched_time_t now;      /* cached dequeue time */
                                                   >> 150         struct qdisc_watchdog watchdog;
176                                                   151 
177         struct htb_level        hlevel[TC_HTB_ !! 152         /* non shaped skbs; let them go directly thru */
                                                   >> 153         struct sk_buff_head direct_queue;
                                                   >> 154         int direct_qlen;        /* max qlen of above */
178                                                   155 
179         struct Qdisc            **direct_qdisc !! 156         long direct_pkts;
180         unsigned int            num_direct_qdi << 
181                                                   157 
182         bool                    offload;       !! 158 #define HTB_WARN_TOOMANYEVENTS  0x1
                                                   >> 159         unsigned int warned;    /* only one warning */
                                                   >> 160         struct work_struct work;
183 };                                                161 };
184                                                   162 
185 /* find class in global hash table using given    163 /* find class in global hash table using given handle */
186 static inline struct htb_class *htb_find(u32 h    164 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
187 {                                                 165 {
188         struct htb_sched *q = qdisc_priv(sch);    166         struct htb_sched *q = qdisc_priv(sch);
189         struct Qdisc_class_common *clc;           167         struct Qdisc_class_common *clc;
190                                                   168 
191         clc = qdisc_class_find(&q->clhash, han    169         clc = qdisc_class_find(&q->clhash, handle);
192         if (clc == NULL)                          170         if (clc == NULL)
193                 return NULL;                      171                 return NULL;
194         return container_of(clc, struct htb_cl    172         return container_of(clc, struct htb_class, common);
195 }                                                 173 }
196                                                   174 
197 static unsigned long htb_search(struct Qdisc * << 
198 {                                              << 
199         return (unsigned long)htb_find(handle, << 
200 }                                              << 
201                                                << 
202 #define HTB_DIRECT ((struct htb_class *)-1L)   << 
203                                                << 
204 /**                                               175 /**
205  * htb_classify - classify a packet into class    176  * htb_classify - classify a packet into class
206  * @skb: the socket buffer                     << 
207  * @sch: the active queue discipline           << 
208  * @qerr: pointer for returned status code     << 
209  *                                                177  *
210  * It returns NULL if the packet should be dro    178  * It returns NULL if the packet should be dropped or -1 if the packet
211  * should be passed directly thru. In all othe    179  * should be passed directly thru. In all other cases leaf class is returned.
212  * We allow direct class selection by classid     180  * We allow direct class selection by classid in priority. The we examine
213  * filters in qdisc and in inner nodes (if hig    181  * filters in qdisc and in inner nodes (if higher filter points to the inner
214  * node). If we end up with classid MAJOR:0 we    182  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
215  * internal fifo (direct). These packets then     183  * internal fifo (direct). These packets then go directly thru. If we still
216  * have no valid leaf we try to use MAJOR:defa !! 184  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
217  * then finish and return direct queue.           185  * then finish and return direct queue.
218  */                                               186  */
                                                   >> 187 #define HTB_DIRECT (struct htb_class*)-1
                                                   >> 188 
219 static struct htb_class *htb_classify(struct s    189 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
220                                       int *qer    190                                       int *qerr)
221 {                                                 191 {
222         struct htb_sched *q = qdisc_priv(sch);    192         struct htb_sched *q = qdisc_priv(sch);
223         struct htb_class *cl;                     193         struct htb_class *cl;
224         struct tcf_result res;                    194         struct tcf_result res;
225         struct tcf_proto *tcf;                    195         struct tcf_proto *tcf;
226         int result;                               196         int result;
227                                                   197 
228         /* allow to select class by setting sk    198         /* allow to select class by setting skb->priority to valid classid;
229          * note that nfmark can be used too by !! 199            note that nfmark can be used too by attaching filter fw with no
230          * rules in it                         !! 200            rules in it */
231          */                                    << 
232         if (skb->priority == sch->handle)         201         if (skb->priority == sch->handle)
233                 return HTB_DIRECT;      /* X:0    202                 return HTB_DIRECT;      /* X:0 (direct flow) selected */
234         cl = htb_find(skb->priority, sch);     !! 203         if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
235         if (cl) {                              !! 204                 return cl;
236                 if (cl->level == 0)            << 
237                         return cl;             << 
238                 /* Start with inner filter cha << 
239                 tcf = rcu_dereference_bh(cl->f << 
240         } else {                               << 
241                 tcf = rcu_dereference_bh(q->fi << 
242         }                                      << 
243                                                   205 
244         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_    206         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
245         while (tcf && (result = tcf_classify(s !! 207         tcf = q->filter_list;
                                                   >> 208         while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
246 #ifdef CONFIG_NET_CLS_ACT                         209 #ifdef CONFIG_NET_CLS_ACT
247                 switch (result) {                 210                 switch (result) {
248                 case TC_ACT_QUEUED:               211                 case TC_ACT_QUEUED:
249                 case TC_ACT_STOLEN:               212                 case TC_ACT_STOLEN:
250                 case TC_ACT_TRAP:              << 
251                         *qerr = NET_XMIT_SUCCE    213                         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
252                         fallthrough;           << 
253                 case TC_ACT_SHOT:                 214                 case TC_ACT_SHOT:
254                         return NULL;              215                         return NULL;
255                 }                                 216                 }
256 #endif                                            217 #endif
257                 cl = (void *)res.class;        !! 218                 if ((cl = (void *)res.class) == NULL) {
258                 if (!cl) {                     << 
259                         if (res.classid == sch    219                         if (res.classid == sch->handle)
260                                 return HTB_DIR    220                                 return HTB_DIRECT;      /* X:0 (direct flow) */
261                         cl = htb_find(res.clas !! 221                         if ((cl = htb_find(res.classid, sch)) == NULL)
262                         if (!cl)               << 
263                                 break;  /* fil    222                                 break;  /* filter selected invalid classid */
264                 }                                 223                 }
265                 if (!cl->level)                   224                 if (!cl->level)
266                         return cl;      /* we     225                         return cl;      /* we hit leaf; return it */
267                                                   226 
268                 /* we have got inner class; ap    227                 /* we have got inner class; apply inner filter chain */
269                 tcf = rcu_dereference_bh(cl->f !! 228                 tcf = cl->filter_list;
270         }                                         229         }
271         /* classification failed; try to use d    230         /* classification failed; try to use default class */
272         cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->    231         cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
273         if (!cl || cl->level)                     232         if (!cl || cl->level)
274                 return HTB_DIRECT;      /* bad    233                 return HTB_DIRECT;      /* bad default .. this is safe bet */
275         return cl;                                234         return cl;
276 }                                                 235 }
277                                                   236 
278 /**                                               237 /**
279  * htb_add_to_id_tree - adds class to the roun    238  * htb_add_to_id_tree - adds class to the round robin list
280  * @root: the root of the tree                 << 
281  * @cl: the class to add                       << 
282  * @prio: the give prio in class               << 
283  *                                                239  *
284  * Routine adds class to the list (actually tr    240  * Routine adds class to the list (actually tree) sorted by classid.
285  * Make sure that class is not already on such    241  * Make sure that class is not already on such list for given prio.
286  */                                               242  */
287 static void htb_add_to_id_tree(struct rb_root     243 static void htb_add_to_id_tree(struct rb_root *root,
288                                struct htb_clas    244                                struct htb_class *cl, int prio)
289 {                                                 245 {
290         struct rb_node **p = &root->rb_node, *    246         struct rb_node **p = &root->rb_node, *parent = NULL;
291                                                   247 
292         while (*p) {                              248         while (*p) {
293                 struct htb_class *c;              249                 struct htb_class *c;
294                 parent = *p;                      250                 parent = *p;
295                 c = rb_entry(parent, struct ht    251                 c = rb_entry(parent, struct htb_class, node[prio]);
296                                                   252 
297                 if (cl->common.classid > c->co    253                 if (cl->common.classid > c->common.classid)
298                         p = &parent->rb_right;    254                         p = &parent->rb_right;
299                 else                              255                 else
300                         p = &parent->rb_left;     256                         p = &parent->rb_left;
301         }                                         257         }
302         rb_link_node(&cl->node[prio], parent,     258         rb_link_node(&cl->node[prio], parent, p);
303         rb_insert_color(&cl->node[prio], root)    259         rb_insert_color(&cl->node[prio], root);
304 }                                                 260 }
305                                                   261 
306 /**                                               262 /**
307  * htb_add_to_wait_tree - adds class to the ev    263  * htb_add_to_wait_tree - adds class to the event queue with delay
308  * @q: the priority event queue                << 
309  * @cl: the class to add                       << 
310  * @delay: delay in microseconds               << 
311  *                                                264  *
312  * The class is added to priority event queue     265  * The class is added to priority event queue to indicate that class will
313  * change its mode in cl->pq_key microseconds.    266  * change its mode in cl->pq_key microseconds. Make sure that class is not
314  * already in the queue.                          267  * already in the queue.
315  */                                               268  */
316 static void htb_add_to_wait_tree(struct htb_sc    269 static void htb_add_to_wait_tree(struct htb_sched *q,
317                                  struct htb_cl !! 270                                  struct htb_class *cl, long delay)
318 {                                                 271 {
319         struct rb_node **p = &q->hlevel[cl->le !! 272         struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
320                                                   273 
321         cl->pq_key = q->now + delay;              274         cl->pq_key = q->now + delay;
322         if (cl->pq_key == q->now)                 275         if (cl->pq_key == q->now)
323                 cl->pq_key++;                     276                 cl->pq_key++;
324                                                   277 
325         /* update the nearest event cache */      278         /* update the nearest event cache */
326         if (q->near_ev_cache[cl->level] > cl->    279         if (q->near_ev_cache[cl->level] > cl->pq_key)
327                 q->near_ev_cache[cl->level] =     280                 q->near_ev_cache[cl->level] = cl->pq_key;
328                                                   281 
329         while (*p) {                              282         while (*p) {
330                 struct htb_class *c;              283                 struct htb_class *c;
331                 parent = *p;                      284                 parent = *p;
332                 c = rb_entry(parent, struct ht    285                 c = rb_entry(parent, struct htb_class, pq_node);
333                 if (cl->pq_key >= c->pq_key)      286                 if (cl->pq_key >= c->pq_key)
334                         p = &parent->rb_right;    287                         p = &parent->rb_right;
335                 else                              288                 else
336                         p = &parent->rb_left;     289                         p = &parent->rb_left;
337         }                                         290         }
338         rb_link_node(&cl->pq_node, parent, p);    291         rb_link_node(&cl->pq_node, parent, p);
339         rb_insert_color(&cl->pq_node, &q->hlev !! 292         rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
340 }                                                 293 }
341                                                   294 
342 /**                                               295 /**
343  * htb_next_rb_node - finds next node in binar    296  * htb_next_rb_node - finds next node in binary tree
344  * @n: the current node in binary tree         << 
345  *                                                297  *
346  * When we are past last key we return NULL.      298  * When we are past last key we return NULL.
347  * Average complexity is 2 steps per call.        299  * Average complexity is 2 steps per call.
348  */                                               300  */
349 static inline void htb_next_rb_node(struct rb_    301 static inline void htb_next_rb_node(struct rb_node **n)
350 {                                                 302 {
351         *n = rb_next(*n);                         303         *n = rb_next(*n);
352 }                                                 304 }
353                                                   305 
354 /**                                               306 /**
355  * htb_add_class_to_row - add class to its row    307  * htb_add_class_to_row - add class to its row
356  * @q: the priority event queue                << 
357  * @cl: the class to add                       << 
358  * @mask: the given priorities in class in bit << 
359  *                                                308  *
360  * The class is added to row at priorities mar    309  * The class is added to row at priorities marked in mask.
361  * It does nothing if mask == 0.                  310  * It does nothing if mask == 0.
362  */                                               311  */
363 static inline void htb_add_class_to_row(struct    312 static inline void htb_add_class_to_row(struct htb_sched *q,
364                                         struct    313                                         struct htb_class *cl, int mask)
365 {                                                 314 {
366         q->row_mask[cl->level] |= mask;           315         q->row_mask[cl->level] |= mask;
367         while (mask) {                            316         while (mask) {
368                 int prio = ffz(~mask);            317                 int prio = ffz(~mask);
369                 mask &= ~(1 << prio);             318                 mask &= ~(1 << prio);
370                 htb_add_to_id_tree(&q->hlevel[ !! 319                 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
371         }                                         320         }
372 }                                                 321 }
373                                                   322 
374 /* If this triggers, it is a bug in this code,    323 /* If this triggers, it is a bug in this code, but it need not be fatal */
375 static void htb_safe_rb_erase(struct rb_node *    324 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
376 {                                                 325 {
377         if (RB_EMPTY_NODE(rb)) {                  326         if (RB_EMPTY_NODE(rb)) {
378                 WARN_ON(1);                       327                 WARN_ON(1);
379         } else {                                  328         } else {
380                 rb_erase(rb, root);               329                 rb_erase(rb, root);
381                 RB_CLEAR_NODE(rb);                330                 RB_CLEAR_NODE(rb);
382         }                                         331         }
383 }                                                 332 }
384                                                   333 
385                                                   334 
386 /**                                               335 /**
387  * htb_remove_class_from_row - removes class f    336  * htb_remove_class_from_row - removes class from its row
388  * @q: the priority event queue                << 
389  * @cl: the class to add                       << 
390  * @mask: the given priorities in class in bit << 
391  *                                                337  *
392  * The class is removed from row at priorities    338  * The class is removed from row at priorities marked in mask.
393  * It does nothing if mask == 0.                  339  * It does nothing if mask == 0.
394  */                                               340  */
395 static inline void htb_remove_class_from_row(s    341 static inline void htb_remove_class_from_row(struct htb_sched *q,
396                                                   342                                                  struct htb_class *cl, int mask)
397 {                                                 343 {
398         int m = 0;                                344         int m = 0;
399         struct htb_level *hlevel = &q->hlevel[ << 
400                                                   345 
401         while (mask) {                            346         while (mask) {
402                 int prio = ffz(~mask);            347                 int prio = ffz(~mask);
403                 struct htb_prio *hprio = &hlev << 
404                                                   348 
405                 mask &= ~(1 << prio);             349                 mask &= ~(1 << prio);
406                 if (hprio->ptr == cl->node + p !! 350                 if (q->ptr[cl->level][prio] == cl->node + prio)
407                         htb_next_rb_node(&hpri !! 351                         htb_next_rb_node(q->ptr[cl->level] + prio);
408                                                   352 
409                 htb_safe_rb_erase(cl->node + p !! 353                 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
410                 if (!hprio->row.rb_node)       !! 354                 if (!q->row[cl->level][prio].rb_node)
411                         m |= 1 << prio;           355                         m |= 1 << prio;
412         }                                         356         }
413         q->row_mask[cl->level] &= ~m;             357         q->row_mask[cl->level] &= ~m;
414 }                                                 358 }
415                                                   359 
416 /**                                               360 /**
417  * htb_activate_prios - creates active classe'    361  * htb_activate_prios - creates active classe's feed chain
418  * @q: the priority event queue                << 
419  * @cl: the class to activate                  << 
420  *                                                362  *
421  * The class is connected to ancestors and/or     363  * The class is connected to ancestors and/or appropriate rows
422  * for priorities it is participating on. cl->    364  * for priorities it is participating on. cl->cmode must be new
423  * (activated) mode. It does nothing if cl->pr    365  * (activated) mode. It does nothing if cl->prio_activity == 0.
424  */                                               366  */
425 static void htb_activate_prios(struct htb_sche    367 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
426 {                                                 368 {
427         struct htb_class *p = cl->parent;         369         struct htb_class *p = cl->parent;
428         long m, mask = cl->prio_activity;         370         long m, mask = cl->prio_activity;
429                                                   371 
430         while (cl->cmode == HTB_MAY_BORROW &&     372         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
431                 m = mask;                         373                 m = mask;
432                 while (m) {                       374                 while (m) {
433                         unsigned int prio = ff !! 375                         int prio = ffz(~m);
434                                                << 
435                         if (WARN_ON_ONCE(prio  << 
436                                 break;         << 
437                         m &= ~(1 << prio);        376                         m &= ~(1 << prio);
438                                                   377 
439                         if (p->inner.clprio[pr !! 378                         if (p->un.inner.feed[prio].rb_node)
440                                 /* parent alre    379                                 /* parent already has its feed in use so that
441                                  * reset bit i !! 380                                    reset bit in mask as parent is already ok */
442                                  */            << 
443                                 mask &= ~(1 <<    381                                 mask &= ~(1 << prio);
444                                                   382 
445                         htb_add_to_id_tree(&p- !! 383                         htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
446                 }                                 384                 }
447                 p->prio_activity |= mask;         385                 p->prio_activity |= mask;
448                 cl = p;                           386                 cl = p;
449                 p = cl->parent;                   387                 p = cl->parent;
450                                                   388 
451         }                                         389         }
452         if (cl->cmode == HTB_CAN_SEND && mask)    390         if (cl->cmode == HTB_CAN_SEND && mask)
453                 htb_add_class_to_row(q, cl, ma    391                 htb_add_class_to_row(q, cl, mask);
454 }                                                 392 }
455                                                   393 
456 /**                                               394 /**
457  * htb_deactivate_prios - remove class from fe    395  * htb_deactivate_prios - remove class from feed chain
458  * @q: the priority event queue                << 
459  * @cl: the class to deactivate                << 
460  *                                                396  *
461  * cl->cmode must represent old mode (before d    397  * cl->cmode must represent old mode (before deactivation). It does
462  * nothing if cl->prio_activity == 0. Class is    398  * nothing if cl->prio_activity == 0. Class is removed from all feed
463  * chains and rows.                               399  * chains and rows.
464  */                                               400  */
465 static void htb_deactivate_prios(struct htb_sc    401 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
466 {                                                 402 {
467         struct htb_class *p = cl->parent;         403         struct htb_class *p = cl->parent;
468         long m, mask = cl->prio_activity;         404         long m, mask = cl->prio_activity;
469                                                   405 
470         while (cl->cmode == HTB_MAY_BORROW &&     406         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
471                 m = mask;                         407                 m = mask;
472                 mask = 0;                         408                 mask = 0;
473                 while (m) {                       409                 while (m) {
474                         int prio = ffz(~m);       410                         int prio = ffz(~m);
475                         m &= ~(1 << prio);        411                         m &= ~(1 << prio);
476                                                   412 
477                         if (p->inner.clprio[pr !! 413                         if (p->un.inner.ptr[prio] == cl->node + prio) {
478                                 /* we are remo    414                                 /* we are removing child which is pointed to from
479                                  * parent feed !! 415                                    parent feed - forget the pointer but remember
480                                  * classid     !! 416                                    classid */
481                                  */            !! 417                                 p->un.inner.last_ptr_id[prio] = cl->common.classid;
482                                 p->inner.clpri !! 418                                 p->un.inner.ptr[prio] = NULL;
483                                 p->inner.clpri << 
484                         }                         419                         }
485                                                   420 
486                         htb_safe_rb_erase(cl-> !! 421                         htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
487                                           &p-> << 
488                                                   422 
489                         if (!p->inner.clprio[p !! 423                         if (!p->un.inner.feed[prio].rb_node)
490                                 mask |= 1 << p    424                                 mask |= 1 << prio;
491                 }                                 425                 }
492                                                   426 
493                 p->prio_activity &= ~mask;        427                 p->prio_activity &= ~mask;
494                 cl = p;                           428                 cl = p;
495                 p = cl->parent;                   429                 p = cl->parent;
496                                                   430 
497         }                                         431         }
498         if (cl->cmode == HTB_CAN_SEND && mask)    432         if (cl->cmode == HTB_CAN_SEND && mask)
499                 htb_remove_class_from_row(q, c    433                 htb_remove_class_from_row(q, cl, mask);
500 }                                                 434 }
501                                                   435 
502 static inline s64 htb_lowater(const struct htb !! 436 static inline long htb_lowater(const struct htb_class *cl)
503 {                                                 437 {
504         if (htb_hysteresis)                       438         if (htb_hysteresis)
505                 return cl->cmode != HTB_CANT_S    439                 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
506         else                                      440         else
507                 return 0;                         441                 return 0;
508 }                                                 442 }
509 static inline s64 htb_hiwater(const struct htb !! 443 static inline long htb_hiwater(const struct htb_class *cl)
510 {                                                 444 {
511         if (htb_hysteresis)                       445         if (htb_hysteresis)
512                 return cl->cmode == HTB_CAN_SE    446                 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
513         else                                      447         else
514                 return 0;                         448                 return 0;
515 }                                                 449 }
516                                                   450 
517                                                   451 
518 /**                                               452 /**
519  * htb_class_mode - computes and returns curre    453  * htb_class_mode - computes and returns current class mode
520  * @cl: the target class                       << 
521  * @diff: diff time in microseconds            << 
522  *                                                454  *
523  * It computes cl's mode at time cl->t_c+diff     455  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
524  * is not HTB_CAN_SEND then cl->pq_key is upda    456  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
525  * from now to time when cl will change its st    457  * from now to time when cl will change its state.
526  * Also it is worth to note that class mode do    458  * Also it is worth to note that class mode doesn't change simply
527  * at cl->{c,}tokens == 0 but there can rather    459  * at cl->{c,}tokens == 0 but there can rather be hysteresis of
528  * 0 .. -cl->{c,}buffer range. It is meant to     460  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
529  * mode transitions per time unit. The speed g    461  * mode transitions per time unit. The speed gain is about 1/6.
530  */                                               462  */
531 static inline enum htb_cmode                      463 static inline enum htb_cmode
532 htb_class_mode(struct htb_class *cl, s64 *diff !! 464 htb_class_mode(struct htb_class *cl, long *diff)
533 {                                                 465 {
534         s64 toks;                              !! 466         long toks;
535                                                   467 
536         if ((toks = (cl->ctokens + *diff)) < h    468         if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
537                 *diff = -toks;                    469                 *diff = -toks;
538                 return HTB_CANT_SEND;             470                 return HTB_CANT_SEND;
539         }                                         471         }
540                                                   472 
541         if ((toks = (cl->tokens + *diff)) >= h    473         if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
542                 return HTB_CAN_SEND;              474                 return HTB_CAN_SEND;
543                                                   475 
544         *diff = -toks;                            476         *diff = -toks;
545         return HTB_MAY_BORROW;                    477         return HTB_MAY_BORROW;
546 }                                                 478 }
547                                                   479 
548 /**                                               480 /**
549  * htb_change_class_mode - changes classe's mo    481  * htb_change_class_mode - changes classe's mode
550  * @q: the priority event queue                << 
551  * @cl: the target class                       << 
552  * @diff: diff time in microseconds            << 
553  *                                                482  *
554  * This should be the only way how to change c    483  * This should be the only way how to change classe's mode under normal
555  * circumstances. Routine will update feed lis !! 484  * cirsumstances. Routine will update feed lists linkage, change mode
556  * and add class to the wait event queue if ap    485  * and add class to the wait event queue if appropriate. New mode should
557  * be different from old one and cl->pq_key ha    486  * be different from old one and cl->pq_key has to be valid if changing
558  * to mode other than HTB_CAN_SEND (see htb_ad    487  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
559  */                                               488  */
560 static void                                       489 static void
561 htb_change_class_mode(struct htb_sched *q, str !! 490 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
562 {                                                 491 {
563         enum htb_cmode new_mode = htb_class_mo    492         enum htb_cmode new_mode = htb_class_mode(cl, diff);
564                                                   493 
565         if (new_mode == cl->cmode)                494         if (new_mode == cl->cmode)
566                 return;                           495                 return;
567                                                   496 
568         if (new_mode == HTB_CANT_SEND) {       << 
569                 cl->overlimits++;              << 
570                 q->overlimits++;               << 
571         }                                      << 
572                                                << 
573         if (cl->prio_activity) {        /* not    497         if (cl->prio_activity) {        /* not necessary: speed optimization */
574                 if (cl->cmode != HTB_CANT_SEND    498                 if (cl->cmode != HTB_CANT_SEND)
575                         htb_deactivate_prios(q    499                         htb_deactivate_prios(q, cl);
576                 cl->cmode = new_mode;             500                 cl->cmode = new_mode;
577                 if (new_mode != HTB_CANT_SEND)    501                 if (new_mode != HTB_CANT_SEND)
578                         htb_activate_prios(q,     502                         htb_activate_prios(q, cl);
579         } else                                    503         } else
580                 cl->cmode = new_mode;             504                 cl->cmode = new_mode;
581 }                                                 505 }
582                                                   506 
583 /**                                               507 /**
584  * htb_activate - inserts leaf cl into appropr    508  * htb_activate - inserts leaf cl into appropriate active feeds
585  * @q: the priority event queue                << 
586  * @cl: the target class                       << 
587  *                                                509  *
588  * Routine learns (new) priority of leaf and a    510  * Routine learns (new) priority of leaf and activates feed chain
589  * for the prio. It can be called on already a    511  * for the prio. It can be called on already active leaf safely.
590  * It also adds leaf into droplist.               512  * It also adds leaf into droplist.
591  */                                               513  */
592 static inline void htb_activate(struct htb_sch    514 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
593 {                                                 515 {
594         WARN_ON(cl->level || !cl->leaf.q || !c !! 516         WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
595                                                   517 
596         if (!cl->prio_activity) {                 518         if (!cl->prio_activity) {
597                 cl->prio_activity = 1 << cl->p    519                 cl->prio_activity = 1 << cl->prio;
598                 htb_activate_prios(q, cl);        520                 htb_activate_prios(q, cl);
                                                   >> 521                 list_add_tail(&cl->un.leaf.drop_list,
                                                   >> 522                               q->drops + cl->prio);
599         }                                         523         }
600 }                                                 524 }
601                                                   525 
602 /**                                               526 /**
603  * htb_deactivate - remove leaf cl from active    527  * htb_deactivate - remove leaf cl from active feeds
604  * @q: the priority event queue                << 
605  * @cl: the target class                       << 
606  *                                                528  *
607  * Make sure that leaf is active. In the other    529  * Make sure that leaf is active. In the other words it can't be called
608  * with non-active leaf. It also removes class    530  * with non-active leaf. It also removes class from the drop list.
609  */                                               531  */
610 static inline void htb_deactivate(struct htb_s    532 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
611 {                                                 533 {
612         WARN_ON(!cl->prio_activity);              534         WARN_ON(!cl->prio_activity);
613                                                   535 
614         htb_deactivate_prios(q, cl);              536         htb_deactivate_prios(q, cl);
615         cl->prio_activity = 0;                    537         cl->prio_activity = 0;
                                                   >> 538         list_del_init(&cl->un.leaf.drop_list);
616 }                                                 539 }
617                                                   540 
618 static int htb_enqueue(struct sk_buff *skb, st !! 541 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
619                        struct sk_buff **to_fre << 
620 {                                                 542 {
621         int ret;                               !! 543         int uninitialized_var(ret);
622         unsigned int len = qdisc_pkt_len(skb); << 
623         struct htb_sched *q = qdisc_priv(sch);    544         struct htb_sched *q = qdisc_priv(sch);
624         struct htb_class *cl = htb_classify(sk    545         struct htb_class *cl = htb_classify(skb, sch, &ret);
625                                                   546 
626         if (cl == HTB_DIRECT) {                   547         if (cl == HTB_DIRECT) {
627                 /* enqueue to helper queue */     548                 /* enqueue to helper queue */
628                 if (q->direct_queue.qlen < q->    549                 if (q->direct_queue.qlen < q->direct_qlen) {
629                         __qdisc_enqueue_tail(s !! 550                         __skb_queue_tail(&q->direct_queue, skb);
630                         q->direct_pkts++;         551                         q->direct_pkts++;
631                 } else {                          552                 } else {
632                         return qdisc_drop(skb, !! 553                         kfree_skb(skb);
                                                   >> 554                         sch->qstats.drops++;
                                                   >> 555                         return NET_XMIT_DROP;
633                 }                                 556                 }
634 #ifdef CONFIG_NET_CLS_ACT                         557 #ifdef CONFIG_NET_CLS_ACT
635         } else if (!cl) {                         558         } else if (!cl) {
636                 if (ret & __NET_XMIT_BYPASS)      559                 if (ret & __NET_XMIT_BYPASS)
637                         qdisc_qstats_drop(sch) !! 560                         sch->qstats.drops++;
638                 __qdisc_drop(skb, to_free);    !! 561                 kfree_skb(skb);
639                 return ret;                       562                 return ret;
640 #endif                                            563 #endif
641         } else if ((ret = qdisc_enqueue(skb, c !! 564         } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
642                                         to_fre << 
643                 if (net_xmit_drop_count(ret))     565                 if (net_xmit_drop_count(ret)) {
644                         qdisc_qstats_drop(sch) !! 566                         sch->qstats.drops++;
645                         cl->drops++;           !! 567                         cl->qstats.drops++;
646                 }                                 568                 }
647                 return ret;                       569                 return ret;
648         } else {                                  570         } else {
                                                   >> 571                 cl->bstats.packets +=
                                                   >> 572                         skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
                                                   >> 573                 cl->bstats.bytes += qdisc_pkt_len(skb);
649                 htb_activate(q, cl);              574                 htb_activate(q, cl);
650         }                                         575         }
651                                                   576 
652         sch->qstats.backlog += len;            << 
653         sch->q.qlen++;                            577         sch->q.qlen++;
                                                   >> 578         sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
                                                   >> 579         sch->bstats.bytes += qdisc_pkt_len(skb);
654         return NET_XMIT_SUCCESS;                  580         return NET_XMIT_SUCCESS;
655 }                                                 581 }
656                                                   582 
657 static inline void htb_accnt_tokens(struct htb !! 583 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff)
658 {                                                 584 {
659         s64 toks = diff + cl->tokens;          !! 585         long toks = diff + cl->tokens;
660                                                   586 
661         if (toks > cl->buffer)                    587         if (toks > cl->buffer)
662                 toks = cl->buffer;                588                 toks = cl->buffer;
663         toks -= (s64) psched_l2t_ns(&cl->rate, !! 589         toks -= (long) qdisc_l2t(cl->rate, bytes);
664         if (toks <= -cl->mbuffer)                 590         if (toks <= -cl->mbuffer)
665                 toks = 1 - cl->mbuffer;           591                 toks = 1 - cl->mbuffer;
666                                                   592 
667         cl->tokens = toks;                        593         cl->tokens = toks;
668 }                                                 594 }
669                                                   595 
670 static inline void htb_accnt_ctokens(struct ht !! 596 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff)
671 {                                                 597 {
672         s64 toks = diff + cl->ctokens;         !! 598         long toks = diff + cl->ctokens;
673                                                   599 
674         if (toks > cl->cbuffer)                   600         if (toks > cl->cbuffer)
675                 toks = cl->cbuffer;               601                 toks = cl->cbuffer;
676         toks -= (s64) psched_l2t_ns(&cl->ceil, !! 602         toks -= (long) qdisc_l2t(cl->ceil, bytes);
677         if (toks <= -cl->mbuffer)                 603         if (toks <= -cl->mbuffer)
678                 toks = 1 - cl->mbuffer;           604                 toks = 1 - cl->mbuffer;
679                                                   605 
680         cl->ctokens = toks;                       606         cl->ctokens = toks;
681 }                                                 607 }
682                                                   608 
683 /**                                               609 /**
684  * htb_charge_class - charges amount "bytes" t    610  * htb_charge_class - charges amount "bytes" to leaf and ancestors
685  * @q: the priority event queue                << 
686  * @cl: the class to start iterate             << 
687  * @level: the minimum level to account        << 
688  * @skb: the socket buffer                     << 
689  *                                                611  *
690  * Routine assumes that packet "bytes" long wa    612  * Routine assumes that packet "bytes" long was dequeued from leaf cl
691  * borrowing from "level". It accounts bytes t    613  * borrowing from "level". It accounts bytes to ceil leaky bucket for
692  * leaf and all ancestors and to rate bucket f    614  * leaf and all ancestors and to rate bucket for ancestors at levels
693  * "level" and higher. It also handles possibl    615  * "level" and higher. It also handles possible change of mode resulting
694  * from the update. Note that mode can also in    616  * from the update. Note that mode can also increase here (MAY_BORROW to
695  * CAN_SEND) because we can use more precise c    617  * CAN_SEND) because we can use more precise clock that event queue here.
696  * In such case we remove class from event que    618  * In such case we remove class from event queue first.
697  */                                               619  */
698 static void htb_charge_class(struct htb_sched     620 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
699                              int level, struct    621                              int level, struct sk_buff *skb)
700 {                                                 622 {
701         int bytes = qdisc_pkt_len(skb);           623         int bytes = qdisc_pkt_len(skb);
702         enum htb_cmode old_mode;                  624         enum htb_cmode old_mode;
703         s64 diff;                              !! 625         long diff;
704                                                   626 
705         while (cl) {                              627         while (cl) {
706                 diff = min_t(s64, q->now - cl- !! 628                 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
707                 if (cl->level >= level) {         629                 if (cl->level >= level) {
708                         if (cl->level == level    630                         if (cl->level == level)
709                                 cl->xstats.len    631                                 cl->xstats.lends++;
710                         htb_accnt_tokens(cl, b    632                         htb_accnt_tokens(cl, bytes, diff);
711                 } else {                          633                 } else {
712                         cl->xstats.borrows++;     634                         cl->xstats.borrows++;
713                         cl->tokens += diff;       635                         cl->tokens += diff;     /* we moved t_c; update tokens */
714                 }                                 636                 }
715                 htb_accnt_ctokens(cl, bytes, d    637                 htb_accnt_ctokens(cl, bytes, diff);
716                 cl->t_c = q->now;                 638                 cl->t_c = q->now;
717                                                   639 
718                 old_mode = cl->cmode;             640                 old_mode = cl->cmode;
719                 diff = 0;                         641                 diff = 0;
720                 htb_change_class_mode(q, cl, &    642                 htb_change_class_mode(q, cl, &diff);
721                 if (old_mode != cl->cmode) {      643                 if (old_mode != cl->cmode) {
722                         if (old_mode != HTB_CA    644                         if (old_mode != HTB_CAN_SEND)
723                                 htb_safe_rb_er !! 645                                 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
724                         if (cl->cmode != HTB_C    646                         if (cl->cmode != HTB_CAN_SEND)
725                                 htb_add_to_wai    647                                 htb_add_to_wait_tree(q, cl, diff);
726                 }                                 648                 }
727                                                   649 
728                 /* update basic stats except f !! 650                 /* update byte stats except for leaves which are already updated */
729                 if (cl->level)                 !! 651                 if (cl->level) {
730                         bstats_update(&cl->bst !! 652                         cl->bstats.bytes += bytes;
731                                                !! 653                         cl->bstats.packets += skb_is_gso(skb)?
                                                   >> 654                                         skb_shinfo(skb)->gso_segs:1;
                                                   >> 655                 }
732                 cl = cl->parent;                  656                 cl = cl->parent;
733         }                                         657         }
734 }                                                 658 }
735                                                   659 
736 /**                                               660 /**
737  * htb_do_events - make mode changes to classe    661  * htb_do_events - make mode changes to classes at the level
738  * @q: the priority event queue                << 
739  * @level: which wait_pq in 'q->hlevel'        << 
740  * @start: start jiffies                       << 
741  *                                                662  *
742  * Scans event queue for pending events and ap    663  * Scans event queue for pending events and applies them. Returns time of
743  * next pending event (0 for no event in pq, q    664  * next pending event (0 for no event in pq, q->now for too many events).
744  * Note: Applied are events whose have cl->pq_    665  * Note: Applied are events whose have cl->pq_key <= q->now.
745  */                                               666  */
746 static s64 htb_do_events(struct htb_sched *q,  !! 667 static psched_time_t htb_do_events(struct htb_sched *q, int level,
747                          unsigned long start)  !! 668                                    unsigned long start)
748 {                                                 669 {
749         /* don't run for longer than 2 jiffies    670         /* don't run for longer than 2 jiffies; 2 is used instead of
750          * 1 to simplify things when jiffy is  !! 671            1 to simplify things when jiffy is going to be incremented
751          * too soon                            !! 672            too soon */
752          */                                    << 
753         unsigned long stop_at = start + 2;        673         unsigned long stop_at = start + 2;
754         struct rb_root *wait_pq = &q->hlevel[l << 
755                                                << 
756         while (time_before(jiffies, stop_at))     674         while (time_before(jiffies, stop_at)) {
757                 struct htb_class *cl;             675                 struct htb_class *cl;
758                 s64 diff;                      !! 676                 long diff;
759                 struct rb_node *p = rb_first(w !! 677                 struct rb_node *p = rb_first(&q->wait_pq[level]);
760                                                   678 
761                 if (!p)                           679                 if (!p)
762                         return 0;                 680                         return 0;
763                                                   681 
764                 cl = rb_entry(p, struct htb_cl    682                 cl = rb_entry(p, struct htb_class, pq_node);
765                 if (cl->pq_key > q->now)          683                 if (cl->pq_key > q->now)
766                         return cl->pq_key;        684                         return cl->pq_key;
767                                                   685 
768                 htb_safe_rb_erase(p, wait_pq); !! 686                 htb_safe_rb_erase(p, q->wait_pq + level);
769                 diff = min_t(s64, q->now - cl- !! 687                 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
770                 htb_change_class_mode(q, cl, &    688                 htb_change_class_mode(q, cl, &diff);
771                 if (cl->cmode != HTB_CAN_SEND)    689                 if (cl->cmode != HTB_CAN_SEND)
772                         htb_add_to_wait_tree(q    690                         htb_add_to_wait_tree(q, cl, diff);
773         }                                         691         }
774                                                   692 
775         /* too much load - let's continue afte    693         /* too much load - let's continue after a break for scheduling */
776         if (!(q->warned & HTB_WARN_TOOMANYEVEN    694         if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
777                 pr_warn("htb: too many events! !! 695                 printk(KERN_WARNING "htb: too many events!\n");
778                 q->warned |= HTB_WARN_TOOMANYE    696                 q->warned |= HTB_WARN_TOOMANYEVENTS;
779         }                                         697         }
780                                                   698 
781         return q->now;                            699         return q->now;
782 }                                                 700 }
783                                                   701 
784 /* Returns class->node+prio from id-tree where    702 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
785  * is no such one exists.                      !! 703    is no such one exists. */
786  */                                            << 
787 static struct rb_node *htb_id_find_next_upper(    704 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
788                                                   705                                               u32 id)
789 {                                                 706 {
790         struct rb_node *r = NULL;                 707         struct rb_node *r = NULL;
791         while (n) {                               708         while (n) {
792                 struct htb_class *cl =            709                 struct htb_class *cl =
793                     rb_entry(n, struct htb_cla    710                     rb_entry(n, struct htb_class, node[prio]);
794                                                   711 
795                 if (id > cl->common.classid) {    712                 if (id > cl->common.classid) {
796                         n = n->rb_right;          713                         n = n->rb_right;
797                 } else if (id < cl->common.cla    714                 } else if (id < cl->common.classid) {
798                         r = n;                    715                         r = n;
799                         n = n->rb_left;           716                         n = n->rb_left;
800                 } else {                          717                 } else {
801                         return n;                 718                         return n;
802                 }                                 719                 }
803         }                                         720         }
804         return r;                                 721         return r;
805 }                                                 722 }
806                                                   723 
807 /**                                               724 /**
808  * htb_lookup_leaf - returns next leaf class i    725  * htb_lookup_leaf - returns next leaf class in DRR order
809  * @hprio: the current one                     << 
810  * @prio: which prio in class                  << 
811  *                                                726  *
812  * Find leaf where current feed pointers point    727  * Find leaf where current feed pointers points to.
813  */                                               728  */
814 static struct htb_class *htb_lookup_leaf(struc !! 729 static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
                                                   >> 730                                          struct rb_node **pptr, u32 * pid)
815 {                                                 731 {
816         int i;                                    732         int i;
817         struct {                                  733         struct {
818                 struct rb_node *root;             734                 struct rb_node *root;
819                 struct rb_node **pptr;            735                 struct rb_node **pptr;
820                 u32 *pid;                         736                 u32 *pid;
821         } stk[TC_HTB_MAXDEPTH], *sp = stk;        737         } stk[TC_HTB_MAXDEPTH], *sp = stk;
822                                                   738 
823         BUG_ON(!hprio->row.rb_node);           !! 739         BUG_ON(!tree->rb_node);
824         sp->root = hprio->row.rb_node;         !! 740         sp->root = tree->rb_node;
825         sp->pptr = &hprio->ptr;                !! 741         sp->pptr = pptr;
826         sp->pid = &hprio->last_ptr_id;         !! 742         sp->pid = pid;
827                                                   743 
828         for (i = 0; i < 65535; i++) {             744         for (i = 0; i < 65535; i++) {
829                 if (!*sp->pptr && *sp->pid) {     745                 if (!*sp->pptr && *sp->pid) {
830                         /* ptr was invalidated    746                         /* ptr was invalidated but id is valid - try to recover
831                          * the original or nex !! 747                            the original or next ptr */
832                          */                    << 
833                         *sp->pptr =               748                         *sp->pptr =
834                             htb_id_find_next_u    749                             htb_id_find_next_upper(prio, sp->root, *sp->pid);
835                 }                                 750                 }
836                 *sp->pid = 0;   /* ptr is vali    751                 *sp->pid = 0;   /* ptr is valid now so that remove this hint as it
837                                  * can become  !! 752                                    can become out of date quickly */
838                                  */            << 
839                 if (!*sp->pptr) {       /* we     753                 if (!*sp->pptr) {       /* we are at right end; rewind & go up */
840                         *sp->pptr = sp->root;     754                         *sp->pptr = sp->root;
841                         while ((*sp->pptr)->rb    755                         while ((*sp->pptr)->rb_left)
842                                 *sp->pptr = (*    756                                 *sp->pptr = (*sp->pptr)->rb_left;
843                         if (sp > stk) {           757                         if (sp > stk) {
844                                 sp--;             758                                 sp--;
845                                 if (!*sp->pptr    759                                 if (!*sp->pptr) {
846                                         WARN_O    760                                         WARN_ON(1);
847                                         return    761                                         return NULL;
848                                 }                 762                                 }
849                                 htb_next_rb_no    763                                 htb_next_rb_node(sp->pptr);
850                         }                         764                         }
851                 } else {                          765                 } else {
852                         struct htb_class *cl;     766                         struct htb_class *cl;
853                         struct htb_prio *clp;  << 
854                                                << 
855                         cl = rb_entry(*sp->ppt    767                         cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
856                         if (!cl->level)           768                         if (!cl->level)
857                                 return cl;        769                                 return cl;
858                         clp = &cl->inner.clpri !! 770                         (++sp)->root = cl->un.inner.feed[prio].rb_node;
859                         (++sp)->root = clp->fe !! 771                         sp->pptr = cl->un.inner.ptr + prio;
860                         sp->pptr = &clp->ptr;  !! 772                         sp->pid = cl->un.inner.last_ptr_id + prio;
861                         sp->pid = &clp->last_p << 
862                 }                                 773                 }
863         }                                         774         }
864         WARN_ON(1);                               775         WARN_ON(1);
865         return NULL;                              776         return NULL;
866 }                                                 777 }
867                                                   778 
868 /* dequeues packet at given priority and level    779 /* dequeues packet at given priority and level; call only if
869  * you are sure that there is active class at  !! 780    you are sure that there is active class at prio/level */
870  */                                            !! 781 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
871 static struct sk_buff *htb_dequeue_tree(struct !! 782                                         int level)
872                                         const  << 
873 {                                                 783 {
874         struct sk_buff *skb = NULL;               784         struct sk_buff *skb = NULL;
875         struct htb_class *cl, *start;             785         struct htb_class *cl, *start;
876         struct htb_level *hlevel = &q->hlevel[ << 
877         struct htb_prio *hprio = &hlevel->hpri << 
878                                                << 
879         /* look initial class up in the row */    786         /* look initial class up in the row */
880         start = cl = htb_lookup_leaf(hprio, pr !! 787         start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
                                                   >> 788                                      q->ptr[level] + prio,
                                                   >> 789                                      q->last_ptr_id[level] + prio);
881                                                   790 
882         do {                                      791         do {
883 next:                                             792 next:
884                 if (unlikely(!cl))                793                 if (unlikely(!cl))
885                         return NULL;              794                         return NULL;
886                                                   795 
887                 /* class can be empty - it is     796                 /* class can be empty - it is unlikely but can be true if leaf
888                  * qdisc drops packets in enqu !! 797                    qdisc drops packets in enqueue routine or if someone used
889                  * graft operation on the leaf !! 798                    graft operation on the leaf since last dequeue;
890                  * simply deactivate and skip  !! 799                    simply deactivate and skip such class */
891                  */                            !! 800                 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
892                 if (unlikely(cl->leaf.q->q.qle << 
893                         struct htb_class *next    801                         struct htb_class *next;
894                         htb_deactivate(q, cl);    802                         htb_deactivate(q, cl);
895                                                   803 
896                         /* row/level might bec    804                         /* row/level might become empty */
897                         if ((q->row_mask[level    805                         if ((q->row_mask[level] & (1 << prio)) == 0)
898                                 return NULL;      806                                 return NULL;
899                                                   807 
900                         next = htb_lookup_leaf !! 808                         next = htb_lookup_leaf(q->row[level] + prio,
                                                   >> 809                                                prio, q->ptr[level] + prio,
                                                   >> 810                                                q->last_ptr_id[level] + prio);
901                                                   811 
902                         if (cl == start)          812                         if (cl == start)        /* fix start if we just deleted it */
903                                 start = next;     813                                 start = next;
904                         cl = next;                814                         cl = next;
905                         goto next;                815                         goto next;
906                 }                                 816                 }
907                                                   817 
908                 skb = cl->leaf.q->dequeue(cl-> !! 818                 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
909                 if (likely(skb != NULL))          819                 if (likely(skb != NULL))
910                         break;                    820                         break;
911                                                   821 
912                 qdisc_warn_nonwc("htb", cl->le !! 822                 qdisc_warn_nonwc("htb", cl->un.leaf.q);
913                 htb_next_rb_node(level ? &cl-> !! 823                 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
914                                          &q->h !! 824                                   ptr[0]) + prio);
915                 cl = htb_lookup_leaf(hprio, pr !! 825                 cl = htb_lookup_leaf(q->row[level] + prio, prio,
                                                   >> 826                                      q->ptr[level] + prio,
                                                   >> 827                                      q->last_ptr_id[level] + prio);
916                                                   828 
917         } while (cl != start);                    829         } while (cl != start);
918                                                   830 
919         if (likely(skb != NULL)) {                831         if (likely(skb != NULL)) {
920                 bstats_update(&cl->bstats, skb !! 832                 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
921                 cl->leaf.deficit[level] -= qdi !! 833                 if (cl->un.leaf.deficit[level] < 0) {
922                 if (cl->leaf.deficit[level] <  !! 834                         cl->un.leaf.deficit[level] += cl->quantum;
923                         cl->leaf.deficit[level !! 835                         htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
924                         htb_next_rb_node(level !! 836                                           ptr[0]) + prio);
925                                                << 
926                 }                                 837                 }
927                 /* this used to be after charg    838                 /* this used to be after charge_class but this constelation
928                  * gives us slightly better pe !! 839                    gives us slightly better performance */
929                  */                            !! 840                 if (!cl->un.leaf.q->q.qlen)
930                 if (!cl->leaf.q->q.qlen)       << 
931                         htb_deactivate(q, cl);    841                         htb_deactivate(q, cl);
932                 htb_charge_class(q, cl, level,    842                 htb_charge_class(q, cl, level, skb);
933         }                                         843         }
934         return skb;                               844         return skb;
935 }                                                 845 }
936                                                   846 
937 static struct sk_buff *htb_dequeue(struct Qdis    847 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
938 {                                                 848 {
939         struct sk_buff *skb;                   !! 849         struct sk_buff *skb = NULL;
940         struct htb_sched *q = qdisc_priv(sch);    850         struct htb_sched *q = qdisc_priv(sch);
941         int level;                                851         int level;
942         s64 next_event;                        !! 852         psched_time_t next_event;
943         unsigned long start_at;                   853         unsigned long start_at;
944                                                   854 
945         /* try to dequeue direct packets as hi    855         /* try to dequeue direct packets as high prio (!) to minimize cpu work */
946         skb = __qdisc_dequeue_head(&q->direct_ !! 856         skb = __skb_dequeue(&q->direct_queue);
947         if (skb != NULL) {                        857         if (skb != NULL) {
948 ok:                                            !! 858                 sch->flags &= ~TCQ_F_THROTTLED;
949                 qdisc_bstats_update(sch, skb); << 
950                 qdisc_qstats_backlog_dec(sch,  << 
951                 sch->q.qlen--;                    859                 sch->q.qlen--;
952                 return skb;                       860                 return skb;
953         }                                         861         }
954                                                   862 
955         if (!sch->q.qlen)                         863         if (!sch->q.qlen)
956                 goto fin;                         864                 goto fin;
957         q->now = ktime_get_ns();               !! 865         q->now = psched_get_time();
958         start_at = jiffies;                       866         start_at = jiffies;
959                                                   867 
960         next_event = q->now + 5LLU * NSEC_PER_ !! 868         next_event = q->now + 5LLU * PSCHED_TICKS_PER_SEC;
961                                                   869 
962         for (level = 0; level < TC_HTB_MAXDEPT    870         for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
963                 /* common case optimization -     871                 /* common case optimization - skip event handler quickly */
964                 int m;                            872                 int m;
965                 s64 event = q->near_ev_cache[l !! 873                 psched_time_t event;
966                                                   874 
967                 if (q->now >= event) {         !! 875                 if (q->now >= q->near_ev_cache[level]) {
968                         event = htb_do_events(    876                         event = htb_do_events(q, level, start_at);
969                         if (!event)               877                         if (!event)
970                                 event = q->now !! 878                                 event = q->now + PSCHED_TICKS_PER_SEC;
971                         q->near_ev_cache[level    879                         q->near_ev_cache[level] = event;
972                 }                              !! 880                 } else
                                                   >> 881                         event = q->near_ev_cache[level];
973                                                   882 
974                 if (next_event > event)           883                 if (next_event > event)
975                         next_event = event;       884                         next_event = event;
976                                                   885 
977                 m = ~q->row_mask[level];          886                 m = ~q->row_mask[level];
978                 while (m != (int)(-1)) {          887                 while (m != (int)(-1)) {
979                         int prio = ffz(m);        888                         int prio = ffz(m);
980                                                << 
981                         m |= 1 << prio;           889                         m |= 1 << prio;
982                         skb = htb_dequeue_tree    890                         skb = htb_dequeue_tree(q, prio, level);
983                         if (likely(skb != NULL !! 891                         if (likely(skb != NULL)) {
984                                 goto ok;       !! 892                                 sch->q.qlen--;
                                                   >> 893                                 sch->flags &= ~TCQ_F_THROTTLED;
                                                   >> 894                                 goto fin;
                                                   >> 895                         }
985                 }                                 896                 }
986         }                                         897         }
                                                   >> 898         sch->qstats.overlimits++;
987         if (likely(next_event > q->now))          899         if (likely(next_event > q->now))
988                 qdisc_watchdog_schedule_ns(&q- !! 900                 qdisc_watchdog_schedule(&q->watchdog, next_event);
989         else                                      901         else
990                 schedule_work(&q->work);          902                 schedule_work(&q->work);
991 fin:                                              903 fin:
992         return skb;                               904         return skb;
993 }                                                 905 }
994                                                   906 
                                                   >> 907 /* try to drop from each class (by prio) until one succeed */
                                                   >> 908 static unsigned int htb_drop(struct Qdisc *sch)
                                                   >> 909 {
                                                   >> 910         struct htb_sched *q = qdisc_priv(sch);
                                                   >> 911         int prio;
                                                   >> 912 
                                                   >> 913         for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
                                                   >> 914                 struct list_head *p;
                                                   >> 915                 list_for_each(p, q->drops + prio) {
                                                   >> 916                         struct htb_class *cl = list_entry(p, struct htb_class,
                                                   >> 917                                                           un.leaf.drop_list);
                                                   >> 918                         unsigned int len;
                                                   >> 919                         if (cl->un.leaf.q->ops->drop &&
                                                   >> 920                             (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
                                                   >> 921                                 sch->q.qlen--;
                                                   >> 922                                 if (!cl->un.leaf.q->q.qlen)
                                                   >> 923                                         htb_deactivate(q, cl);
                                                   >> 924                                 return len;
                                                   >> 925                         }
                                                   >> 926                 }
                                                   >> 927         }
                                                   >> 928         return 0;
                                                   >> 929 }
                                                   >> 930 
995 /* reset all classes */                           931 /* reset all classes */
996 /* always caled under BH & queue lock */          932 /* always caled under BH & queue lock */
997 static void htb_reset(struct Qdisc *sch)          933 static void htb_reset(struct Qdisc *sch)
998 {                                                 934 {
999         struct htb_sched *q = qdisc_priv(sch);    935         struct htb_sched *q = qdisc_priv(sch);
1000         struct htb_class *cl;                    936         struct htb_class *cl;
                                                   >> 937         struct hlist_node *n;
1001         unsigned int i;                          938         unsigned int i;
1002                                                  939 
1003         for (i = 0; i < q->clhash.hashsize; i    940         for (i = 0; i < q->clhash.hashsize; i++) {
1004                 hlist_for_each_entry(cl, &q-> !! 941                 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
1005                         if (cl->level)           942                         if (cl->level)
1006                                 memset(&cl->i !! 943                                 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
1007                         else {                   944                         else {
1008                                 if (cl->leaf. !! 945                                 if (cl->un.leaf.q)
1009                                         qdisc !! 946                                         qdisc_reset(cl->un.leaf.q);
                                                   >> 947                                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1010                         }                        948                         }
1011                         cl->prio_activity = 0    949                         cl->prio_activity = 0;
1012                         cl->cmode = HTB_CAN_S    950                         cl->cmode = HTB_CAN_SEND;
                                                   >> 951 
1013                 }                                952                 }
1014         }                                        953         }
1015         qdisc_watchdog_cancel(&q->watchdog);     954         qdisc_watchdog_cancel(&q->watchdog);
1016         __qdisc_reset_queue(&q->direct_queue) !! 955         __skb_queue_purge(&q->direct_queue);
1017         memset(q->hlevel, 0, sizeof(q->hlevel !! 956         sch->q.qlen = 0;
                                                   >> 957         memset(q->row, 0, sizeof(q->row));
1018         memset(q->row_mask, 0, sizeof(q->row_    958         memset(q->row_mask, 0, sizeof(q->row_mask));
                                                   >> 959         memset(q->wait_pq, 0, sizeof(q->wait_pq));
                                                   >> 960         memset(q->ptr, 0, sizeof(q->ptr));
                                                   >> 961         for (i = 0; i < TC_HTB_NUMPRIO; i++)
                                                   >> 962                 INIT_LIST_HEAD(q->drops + i);
1019 }                                                963 }
1020                                                  964 
1021 static const struct nla_policy htb_policy[TCA    965 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
1022         [TCA_HTB_PARMS] = { .len = sizeof(str    966         [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
1023         [TCA_HTB_INIT]  = { .len = sizeof(str    967         [TCA_HTB_INIT]  = { .len = sizeof(struct tc_htb_glob) },
1024         [TCA_HTB_CTAB]  = { .type = NLA_BINAR    968         [TCA_HTB_CTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1025         [TCA_HTB_RTAB]  = { .type = NLA_BINAR    969         [TCA_HTB_RTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1026         [TCA_HTB_DIRECT_QLEN] = { .type = NLA << 
1027         [TCA_HTB_RATE64] = { .type = NLA_U64  << 
1028         [TCA_HTB_CEIL64] = { .type = NLA_U64  << 
1029         [TCA_HTB_OFFLOAD] = { .type = NLA_FLA << 
1030 };                                               970 };
1031                                                  971 
1032 static void htb_work_func(struct work_struct     972 static void htb_work_func(struct work_struct *work)
1033 {                                                973 {
1034         struct htb_sched *q = container_of(wo    974         struct htb_sched *q = container_of(work, struct htb_sched, work);
1035         struct Qdisc *sch = q->watchdog.qdisc    975         struct Qdisc *sch = q->watchdog.qdisc;
1036                                                  976 
1037         rcu_read_lock();                      << 
1038         __netif_schedule(qdisc_root(sch));       977         __netif_schedule(qdisc_root(sch));
1039         rcu_read_unlock();                    << 
1040 }                                                978 }
1041                                                  979 
1042 static int htb_offload(struct net_device *dev !! 980 static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1043 {                                                981 {
1044         return dev->netdev_ops->ndo_setup_tc( << 
1045 }                                             << 
1046                                               << 
1047 static int htb_init(struct Qdisc *sch, struct << 
1048                     struct netlink_ext_ack *e << 
1049 {                                             << 
1050         struct net_device *dev = qdisc_dev(sc << 
1051         struct tc_htb_qopt_offload offload_op << 
1052         struct htb_sched *q = qdisc_priv(sch)    982         struct htb_sched *q = qdisc_priv(sch);
1053         struct nlattr *tb[TCA_HTB_MAX + 1];   !! 983         struct nlattr *tb[TCA_HTB_INIT + 1];
1054         struct tc_htb_glob *gopt;                984         struct tc_htb_glob *gopt;
1055         unsigned int ntx;                     << 
1056         bool offload;                         << 
1057         int err;                                 985         int err;
1058                                               !! 986         int i;
1059         qdisc_watchdog_init(&q->watchdog, sch << 
1060         INIT_WORK(&q->work, htb_work_func);   << 
1061                                                  987 
1062         if (!opt)                                988         if (!opt)
1063                 return -EINVAL;                  989                 return -EINVAL;
1064                                                  990 
1065         err = tcf_block_get(&q->block, &q->fi !! 991         err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy);
1066         if (err)                              << 
1067                 return err;                   << 
1068                                               << 
1069         err = nla_parse_nested_deprecated(tb, << 
1070                                           NUL << 
1071         if (err < 0)                             992         if (err < 0)
1072                 return err;                      993                 return err;
1073                                                  994 
1074         if (!tb[TCA_HTB_INIT])                !! 995         if (tb[TCA_HTB_INIT] == NULL) {
                                                   >> 996                 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1075                 return -EINVAL;                  997                 return -EINVAL;
1076                                               !! 998         }
1077         gopt = nla_data(tb[TCA_HTB_INIT]);       999         gopt = nla_data(tb[TCA_HTB_INIT]);
1078         if (gopt->version != HTB_VER >> 16)   !! 1000         if (gopt->version != HTB_VER >> 16) {
                                                   >> 1001                 printk(KERN_ERR
                                                   >> 1002                        "HTB: need tc/htb version %d (minor is %d), you have %d\n",
                                                   >> 1003                        HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1079                 return -EINVAL;                  1004                 return -EINVAL;
1080                                               << 
1081         offload = nla_get_flag(tb[TCA_HTB_OFF << 
1082                                               << 
1083         if (offload) {                        << 
1084                 if (sch->parent != TC_H_ROOT) << 
1085                         NL_SET_ERR_MSG(extack << 
1086                         return -EOPNOTSUPP;   << 
1087                 }                             << 
1088                                               << 
1089                 if (!tc_can_offload(dev) || ! << 
1090                         NL_SET_ERR_MSG(extack << 
1091                         return -EOPNOTSUPP;   << 
1092                 }                             << 
1093                                               << 
1094                 q->num_direct_qdiscs = dev->r << 
1095                 q->direct_qdiscs = kcalloc(q- << 
1096                                            si << 
1097                                            GF << 
1098                 if (!q->direct_qdiscs)        << 
1099                         return -ENOMEM;       << 
1100         }                                        1005         }
1101                                                  1006 
1102         err = qdisc_class_hash_init(&q->clhas    1007         err = qdisc_class_hash_init(&q->clhash);
1103         if (err < 0)                             1008         if (err < 0)
1104                 return err;                      1009                 return err;
                                                   >> 1010         for (i = 0; i < TC_HTB_NUMPRIO; i++)
                                                   >> 1011                 INIT_LIST_HEAD(q->drops + i);
1105                                                  1012 
1106         if (tb[TCA_HTB_DIRECT_QLEN])          !! 1013         qdisc_watchdog_init(&q->watchdog, sch);
1107                 q->direct_qlen = nla_get_u32( !! 1014         INIT_WORK(&q->work, htb_work_func);
1108         else                                  !! 1015         skb_queue_head_init(&q->direct_queue);
1109                 q->direct_qlen = qdisc_dev(sc !! 1016 
                                                   >> 1017         q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
                                                   >> 1018         if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
                                                   >> 1019                 q->direct_qlen = 2;
1110                                                  1020 
1111         if ((q->rate2quantum = gopt->rate2qua    1021         if ((q->rate2quantum = gopt->rate2quantum) < 1)
1112                 q->rate2quantum = 1;             1022                 q->rate2quantum = 1;
1113         q->defcls = gopt->defcls;                1023         q->defcls = gopt->defcls;
1114                                                  1024 
1115         if (!offload)                         << 
1116                 return 0;                     << 
1117                                               << 
1118         for (ntx = 0; ntx < q->num_direct_qdi << 
1119                 struct netdev_queue *dev_queu << 
1120                 struct Qdisc *qdisc;          << 
1121                                               << 
1122                 qdisc = qdisc_create_dflt(dev << 
1123                                           TC_ << 
1124                 if (!qdisc) {                 << 
1125                         return -ENOMEM;       << 
1126                 }                             << 
1127                                               << 
1128                 q->direct_qdiscs[ntx] = qdisc << 
1129                 qdisc->flags |= TCQ_F_ONETXQU << 
1130         }                                     << 
1131                                               << 
1132         sch->flags |= TCQ_F_MQROOT;           << 
1133                                               << 
1134         offload_opt = (struct tc_htb_qopt_off << 
1135                 .command = TC_HTB_CREATE,     << 
1136                 .parent_classid = TC_H_MAJ(sc << 
1137                 .classid = TC_H_MIN(q->defcls << 
1138                 .extack = extack,             << 
1139         };                                    << 
1140         err = htb_offload(dev, &offload_opt); << 
1141         if (err)                              << 
1142                 return err;                   << 
1143                                               << 
1144         /* Defer this assignment, so that htb << 
1145          * parts (especially calling ndo_setu << 
1146          */                                   << 
1147         q->offload = true;                    << 
1148                                               << 
1149         return 0;                                1025         return 0;
1150 }                                                1026 }
1151                                                  1027 
1152 static void htb_attach_offload(struct Qdisc * << 
1153 {                                             << 
1154         struct net_device *dev = qdisc_dev(sc << 
1155         struct htb_sched *q = qdisc_priv(sch) << 
1156         unsigned int ntx;                     << 
1157                                               << 
1158         for (ntx = 0; ntx < q->num_direct_qdi << 
1159                 struct Qdisc *old, *qdisc = q << 
1160                                               << 
1161                 old = dev_graft_qdisc(qdisc-> << 
1162                 qdisc_put(old);               << 
1163                 qdisc_hash_add(qdisc, false); << 
1164         }                                     << 
1165         for (ntx = q->num_direct_qdiscs; ntx  << 
1166                 struct netdev_queue *dev_queu << 
1167                 struct Qdisc *old = dev_graft << 
1168                                               << 
1169                 qdisc_put(old);               << 
1170         }                                     << 
1171                                               << 
1172         kfree(q->direct_qdiscs);              << 
1173         q->direct_qdiscs = NULL;              << 
1174 }                                             << 
1175                                               << 
1176 static void htb_attach_software(struct Qdisc  << 
1177 {                                             << 
1178         struct net_device *dev = qdisc_dev(sc << 
1179         unsigned int ntx;                     << 
1180                                               << 
1181         /* Resemble qdisc_graft behavior. */  << 
1182         for (ntx = 0; ntx < dev->num_tx_queue << 
1183                 struct netdev_queue *dev_queu << 
1184                 struct Qdisc *old = dev_graft << 
1185                                               << 
1186                 qdisc_refcount_inc(sch);      << 
1187                                               << 
1188                 qdisc_put(old);               << 
1189         }                                     << 
1190 }                                             << 
1191                                               << 
1192 static void htb_attach(struct Qdisc *sch)     << 
1193 {                                             << 
1194         struct htb_sched *q = qdisc_priv(sch) << 
1195                                               << 
1196         if (q->offload)                       << 
1197                 htb_attach_offload(sch);      << 
1198         else                                  << 
1199                 htb_attach_software(sch);     << 
1200 }                                             << 
1201                                               << 
1202 static int htb_dump(struct Qdisc *sch, struct    1028 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1203 {                                                1029 {
                                                   >> 1030         spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1204         struct htb_sched *q = qdisc_priv(sch)    1031         struct htb_sched *q = qdisc_priv(sch);
1205         struct nlattr *nest;                     1032         struct nlattr *nest;
1206         struct tc_htb_glob gopt;                 1033         struct tc_htb_glob gopt;
1207                                                  1034 
1208         if (q->offload)                       !! 1035         spin_lock_bh(root_lock);
1209                 sch->flags |= TCQ_F_OFFLOADED << 
1210         else                                  << 
1211                 sch->flags &= ~TCQ_F_OFFLOADE << 
1212                                               << 
1213         sch->qstats.overlimits = q->overlimit << 
1214         /* Its safe to not acquire qdisc lock << 
1215          * no change can happen on the qdisc  << 
1216          */                                   << 
1217                                                  1036 
1218         gopt.direct_pkts = q->direct_pkts;       1037         gopt.direct_pkts = q->direct_pkts;
1219         gopt.version = HTB_VER;                  1038         gopt.version = HTB_VER;
1220         gopt.rate2quantum = q->rate2quantum;     1039         gopt.rate2quantum = q->rate2quantum;
1221         gopt.defcls = q->defcls;                 1040         gopt.defcls = q->defcls;
1222         gopt.debug = 0;                          1041         gopt.debug = 0;
1223                                                  1042 
1224         nest = nla_nest_start_noflag(skb, TCA !! 1043         nest = nla_nest_start(skb, TCA_OPTIONS);
1225         if (nest == NULL)                        1044         if (nest == NULL)
1226                 goto nla_put_failure;            1045                 goto nla_put_failure;
1227         if (nla_put(skb, TCA_HTB_INIT, sizeof !! 1046         NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1228             nla_put_u32(skb, TCA_HTB_DIRECT_Q !! 1047         nla_nest_end(skb, nest);
1229                 goto nla_put_failure;         << 
1230         if (q->offload && nla_put_flag(skb, T << 
1231                 goto nla_put_failure;         << 
1232                                                  1048 
1233         return nla_nest_end(skb, nest);       !! 1049         spin_unlock_bh(root_lock);
                                                   >> 1050         return skb->len;
1234                                                  1051 
1235 nla_put_failure:                                 1052 nla_put_failure:
                                                   >> 1053         spin_unlock_bh(root_lock);
1236         nla_nest_cancel(skb, nest);              1054         nla_nest_cancel(skb, nest);
1237         return -1;                               1055         return -1;
1238 }                                                1056 }
1239                                                  1057 
1240 static int htb_dump_class(struct Qdisc *sch,     1058 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1241                           struct sk_buff *skb    1059                           struct sk_buff *skb, struct tcmsg *tcm)
1242 {                                                1060 {
1243         struct htb_class *cl = (struct htb_cl    1061         struct htb_class *cl = (struct htb_class *)arg;
1244         struct htb_sched *q = qdisc_priv(sch) !! 1062         spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1245         struct nlattr *nest;                     1063         struct nlattr *nest;
1246         struct tc_htb_opt opt;                   1064         struct tc_htb_opt opt;
1247                                                  1065 
1248         /* Its safe to not acquire qdisc lock !! 1066         spin_lock_bh(root_lock);
1249          * no change can happen on the class  << 
1250          */                                   << 
1251         tcm->tcm_parent = cl->parent ? cl->pa    1067         tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1252         tcm->tcm_handle = cl->common.classid;    1068         tcm->tcm_handle = cl->common.classid;
1253         if (!cl->level && cl->leaf.q)         !! 1069         if (!cl->level && cl->un.leaf.q)
1254                 tcm->tcm_info = cl->leaf.q->h !! 1070                 tcm->tcm_info = cl->un.leaf.q->handle;
1255                                                  1071 
1256         nest = nla_nest_start_noflag(skb, TCA !! 1072         nest = nla_nest_start(skb, TCA_OPTIONS);
1257         if (nest == NULL)                        1073         if (nest == NULL)
1258                 goto nla_put_failure;            1074                 goto nla_put_failure;
1259                                                  1075 
1260         memset(&opt, 0, sizeof(opt));            1076         memset(&opt, 0, sizeof(opt));
1261                                                  1077 
1262         psched_ratecfg_getrate(&opt.rate, &cl !! 1078         opt.rate = cl->rate->rate;
1263         opt.buffer = PSCHED_NS2TICKS(cl->buff !! 1079         opt.buffer = cl->buffer;
1264         psched_ratecfg_getrate(&opt.ceil, &cl !! 1080         opt.ceil = cl->ceil->rate;
1265         opt.cbuffer = PSCHED_NS2TICKS(cl->cbu !! 1081         opt.cbuffer = cl->cbuffer;
1266         opt.quantum = cl->quantum;               1082         opt.quantum = cl->quantum;
1267         opt.prio = cl->prio;                     1083         opt.prio = cl->prio;
1268         opt.level = cl->level;                   1084         opt.level = cl->level;
1269         if (nla_put(skb, TCA_HTB_PARMS, sizeo !! 1085         NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1270                 goto nla_put_failure;         << 
1271         if (q->offload && nla_put_flag(skb, T << 
1272                 goto nla_put_failure;         << 
1273         if ((cl->rate.rate_bytes_ps >= (1ULL  << 
1274             nla_put_u64_64bit(skb, TCA_HTB_RA << 
1275                               TCA_HTB_PAD))   << 
1276                 goto nla_put_failure;         << 
1277         if ((cl->ceil.rate_bytes_ps >= (1ULL  << 
1278             nla_put_u64_64bit(skb, TCA_HTB_CE << 
1279                               TCA_HTB_PAD))   << 
1280                 goto nla_put_failure;         << 
1281                                                  1086 
1282         return nla_nest_end(skb, nest);       !! 1087         nla_nest_end(skb, nest);
                                                   >> 1088         spin_unlock_bh(root_lock);
                                                   >> 1089         return skb->len;
1283                                                  1090 
1284 nla_put_failure:                                 1091 nla_put_failure:
                                                   >> 1092         spin_unlock_bh(root_lock);
1285         nla_nest_cancel(skb, nest);              1093         nla_nest_cancel(skb, nest);
1286         return -1;                               1094         return -1;
1287 }                                                1095 }
1288                                                  1096 
1289 static void htb_offload_aggregate_stats(struc << 
1290                                         struc << 
1291 {                                             << 
1292         u64 bytes = 0, packets = 0;           << 
1293         struct htb_class *c;                  << 
1294         unsigned int i;                       << 
1295                                               << 
1296         gnet_stats_basic_sync_init(&cl->bstat << 
1297                                               << 
1298         for (i = 0; i < q->clhash.hashsize; i << 
1299                 hlist_for_each_entry(c, &q->c << 
1300                         struct htb_class *p = << 
1301                                               << 
1302                         while (p && p->level  << 
1303                                 p = p->parent << 
1304                                               << 
1305                         if (p != cl)          << 
1306                                 continue;     << 
1307                                               << 
1308                         bytes += u64_stats_re << 
1309                         packets += u64_stats_ << 
1310                         if (c->level == 0) {  << 
1311                                 bytes += u64_ << 
1312                                 packets += u6 << 
1313                         }                     << 
1314                 }                             << 
1315         }                                     << 
1316         _bstats_update(&cl->bstats, bytes, pa << 
1317 }                                             << 
1318                                               << 
1319 static int                                       1097 static int
1320 htb_dump_class_stats(struct Qdisc *sch, unsig    1098 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1321 {                                                1099 {
1322         struct htb_class *cl = (struct htb_cl    1100         struct htb_class *cl = (struct htb_class *)arg;
1323         struct htb_sched *q = qdisc_priv(sch) << 
1324         struct gnet_stats_queue qs = {        << 
1325                 .drops = cl->drops,           << 
1326                 .overlimits = cl->overlimits, << 
1327         };                                    << 
1328         __u32 qlen = 0;                       << 
1329                                               << 
1330         if (!cl->level && cl->leaf.q)         << 
1331                 qdisc_qstats_qlen_backlog(cl- << 
1332                                               << 
1333         cl->xstats.tokens = clamp_t(s64, PSCH << 
1334                                     INT_MIN,  << 
1335         cl->xstats.ctokens = clamp_t(s64, PSC << 
1336                                      INT_MIN, << 
1337                                               << 
1338         if (q->offload) {                     << 
1339                 if (!cl->level) {             << 
1340                         if (cl->leaf.q)       << 
1341                                 cl->bstats =  << 
1342                         else                  << 
1343                                 gnet_stats_ba << 
1344                         _bstats_update(&cl->b << 
1345                                        u64_st << 
1346                                        u64_st << 
1347                 } else {                      << 
1348                         htb_offload_aggregate << 
1349                 }                             << 
1350         }                                     << 
1351                                                  1101 
1352         if (gnet_stats_copy_basic(d, NULL, &c !! 1102         if (!cl->level && cl->un.leaf.q)
                                                   >> 1103                 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
                                                   >> 1104         cl->xstats.tokens = cl->tokens;
                                                   >> 1105         cl->xstats.ctokens = cl->ctokens;
                                                   >> 1106 
                                                   >> 1107         if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1353             gnet_stats_copy_rate_est(d, &cl->    1108             gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1354             gnet_stats_copy_queue(d, NULL, &q !! 1109             gnet_stats_copy_queue(d, &cl->qstats) < 0)
1355                 return -1;                       1110                 return -1;
1356                                                  1111 
1357         return gnet_stats_copy_app(d, &cl->xs    1112         return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1358 }                                                1113 }
1359                                                  1114 
1360 static struct netdev_queue *                  << 
1361 htb_select_queue(struct Qdisc *sch, struct tc << 
1362 {                                             << 
1363         struct net_device *dev = qdisc_dev(sc << 
1364         struct tc_htb_qopt_offload offload_op << 
1365         struct htb_sched *q = qdisc_priv(sch) << 
1366         int err;                              << 
1367                                               << 
1368         if (!q->offload)                      << 
1369                 return sch->dev_queue;        << 
1370                                               << 
1371         offload_opt = (struct tc_htb_qopt_off << 
1372                 .command = TC_HTB_LEAF_QUERY_ << 
1373                 .classid = TC_H_MIN(tcm->tcm_ << 
1374         };                                    << 
1375         err = htb_offload(dev, &offload_opt); << 
1376         if (err || offload_opt.qid >= dev->nu << 
1377                 return NULL;                  << 
1378         return netdev_get_tx_queue(dev, offlo << 
1379 }                                             << 
1380                                               << 
1381 static struct Qdisc *                         << 
1382 htb_graft_helper(struct netdev_queue *dev_que << 
1383 {                                             << 
1384         struct net_device *dev = dev_queue->d << 
1385         struct Qdisc *old_q;                  << 
1386                                               << 
1387         if (dev->flags & IFF_UP)              << 
1388                 dev_deactivate(dev);          << 
1389         old_q = dev_graft_qdisc(dev_queue, ne << 
1390         if (new_q)                            << 
1391                 new_q->flags |= TCQ_F_ONETXQU << 
1392         if (dev->flags & IFF_UP)              << 
1393                 dev_activate(dev);            << 
1394                                               << 
1395         return old_q;                         << 
1396 }                                             << 
1397                                               << 
1398 static struct netdev_queue *htb_offload_get_q << 
1399 {                                             << 
1400         struct netdev_queue *queue;           << 
1401                                               << 
1402         queue = cl->leaf.offload_queue;       << 
1403         if (!(cl->leaf.q->flags & TCQ_F_BUILT << 
1404                 WARN_ON(cl->leaf.q->dev_queue << 
1405                                               << 
1406         return queue;                         << 
1407 }                                             << 
1408                                               << 
1409 static void htb_offload_move_qdisc(struct Qdi << 
1410                                    struct htb << 
1411 {                                             << 
1412         struct netdev_queue *queue_old, *queu << 
1413         struct net_device *dev = qdisc_dev(sc << 
1414                                               << 
1415         queue_old = htb_offload_get_queue(cl_ << 
1416         queue_new = htb_offload_get_queue(cl_ << 
1417                                               << 
1418         if (!destroying) {                    << 
1419                 struct Qdisc *qdisc;          << 
1420                                               << 
1421                 if (dev->flags & IFF_UP)      << 
1422                         dev_deactivate(dev);  << 
1423                 qdisc = dev_graft_qdisc(queue << 
1424                 WARN_ON(qdisc != cl_old->leaf << 
1425         }                                     << 
1426                                               << 
1427         if (!(cl_old->leaf.q->flags & TCQ_F_B << 
1428                 cl_old->leaf.q->dev_queue = q << 
1429         cl_old->leaf.offload_queue = queue_ne << 
1430                                               << 
1431         if (!destroying) {                    << 
1432                 struct Qdisc *qdisc;          << 
1433                                               << 
1434                 qdisc = dev_graft_qdisc(queue << 
1435                 if (dev->flags & IFF_UP)      << 
1436                         dev_activate(dev);    << 
1437                 WARN_ON(!(qdisc->flags & TCQ_ << 
1438         }                                     << 
1439 }                                             << 
1440                                               << 
1441 static int htb_graft(struct Qdisc *sch, unsig    1115 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1442                      struct Qdisc **old, stru !! 1116                      struct Qdisc **old)
1443 {                                                1117 {
1444         struct netdev_queue *dev_queue = sch- << 
1445         struct htb_class *cl = (struct htb_cl    1118         struct htb_class *cl = (struct htb_class *)arg;
1446         struct htb_sched *q = qdisc_priv(sch) << 
1447         struct Qdisc *old_q;                  << 
1448                                                  1119 
1449         if (cl->level)                           1120         if (cl->level)
1450                 return -EINVAL;                  1121                 return -EINVAL;
                                                   >> 1122         if (new == NULL &&
                                                   >> 1123             (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                                   >> 1124                                      &pfifo_qdisc_ops,
                                                   >> 1125                                      cl->common.classid)) == NULL)
                                                   >> 1126                 return -ENOBUFS;
1451                                                  1127 
1452         if (q->offload)                       !! 1128         sch_tree_lock(sch);
1453                 dev_queue = htb_offload_get_q !! 1129         *old = cl->un.leaf.q;
1454                                               !! 1130         cl->un.leaf.q = new;
1455         if (!new) {                           !! 1131         if (*old != NULL) {
1456                 new = qdisc_create_dflt(dev_q !! 1132                 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1457                                         cl->c !! 1133                 qdisc_reset(*old);
1458                 if (!new)                     << 
1459                         return -ENOBUFS;      << 
1460         }                                     << 
1461                                               << 
1462         if (q->offload) {                     << 
1463                 /* One ref for cl->leaf.q, th << 
1464                 qdisc_refcount_inc(new);      << 
1465                 old_q = htb_graft_helper(dev_ << 
1466         }                                     << 
1467                                               << 
1468         *old = qdisc_replace(sch, new, &cl->l << 
1469                                               << 
1470         if (q->offload) {                     << 
1471                 WARN_ON(old_q != *old);       << 
1472                 qdisc_put(old_q);             << 
1473         }                                        1134         }
1474                                               !! 1135         sch_tree_unlock(sch);
1475         return 0;                                1136         return 0;
1476 }                                                1137 }
1477                                                  1138 
1478 static struct Qdisc *htb_leaf(struct Qdisc *s    1139 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1479 {                                                1140 {
1480         struct htb_class *cl = (struct htb_cl    1141         struct htb_class *cl = (struct htb_class *)arg;
1481         return !cl->level ? cl->leaf.q : NULL !! 1142         return !cl->level ? cl->un.leaf.q : NULL;
1482 }                                                1143 }
1483                                                  1144 
1484 static void htb_qlen_notify(struct Qdisc *sch    1145 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1485 {                                                1146 {
1486         struct htb_class *cl = (struct htb_cl    1147         struct htb_class *cl = (struct htb_class *)arg;
1487                                                  1148 
1488         htb_deactivate(qdisc_priv(sch), cl);  !! 1149         if (cl->un.leaf.q->q.qlen == 0)
                                                   >> 1150                 htb_deactivate(qdisc_priv(sch), cl);
                                                   >> 1151 }
                                                   >> 1152 
                                                   >> 1153 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
                                                   >> 1154 {
                                                   >> 1155         struct htb_class *cl = htb_find(classid, sch);
                                                   >> 1156         if (cl)
                                                   >> 1157                 cl->refcnt++;
                                                   >> 1158         return (unsigned long)cl;
1489 }                                                1159 }
1490                                                  1160 
1491 static inline int htb_parent_last_child(struc    1161 static inline int htb_parent_last_child(struct htb_class *cl)
1492 {                                                1162 {
1493         if (!cl->parent)                         1163         if (!cl->parent)
1494                 /* the root class */             1164                 /* the root class */
1495                 return 0;                        1165                 return 0;
1496         if (cl->parent->children > 1)            1166         if (cl->parent->children > 1)
1497                 /* not the last child */         1167                 /* not the last child */
1498                 return 0;                        1168                 return 0;
1499         return 1;                                1169         return 1;
1500 }                                                1170 }
1501                                                  1171 
1502 static void htb_parent_to_leaf(struct Qdisc * !! 1172 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1503                                struct Qdisc *    1173                                struct Qdisc *new_q)
1504 {                                                1174 {
1505         struct htb_sched *q = qdisc_priv(sch) << 
1506         struct htb_class *parent = cl->parent    1175         struct htb_class *parent = cl->parent;
1507                                                  1176 
1508         WARN_ON(cl->level || !cl->leaf.q || c !! 1177         WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1509                                                  1178 
1510         if (parent->cmode != HTB_CAN_SEND)       1179         if (parent->cmode != HTB_CAN_SEND)
1511                 htb_safe_rb_erase(&parent->pq !! 1180                 htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
1512                                   &q->hlevel[ << 
1513                                                  1181 
1514         parent->level = 0;                       1182         parent->level = 0;
1515         memset(&parent->inner, 0, sizeof(pare !! 1183         memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1516         parent->leaf.q = new_q ? new_q : &noo !! 1184         INIT_LIST_HEAD(&parent->un.leaf.drop_list);
                                                   >> 1185         parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1517         parent->tokens = parent->buffer;         1186         parent->tokens = parent->buffer;
1518         parent->ctokens = parent->cbuffer;       1187         parent->ctokens = parent->cbuffer;
1519         parent->t_c = ktime_get_ns();         !! 1188         parent->t_c = psched_get_time();
1520         parent->cmode = HTB_CAN_SEND;            1189         parent->cmode = HTB_CAN_SEND;
1521         if (q->offload)                       << 
1522                 parent->leaf.offload_queue =  << 
1523 }                                             << 
1524                                               << 
1525 static void htb_parent_to_leaf_offload(struct << 
1526                                        struct << 
1527                                        struct << 
1528 {                                             << 
1529         struct Qdisc *old_q;                  << 
1530                                               << 
1531         /* One ref for cl->leaf.q, the other  << 
1532         if (new_q)                            << 
1533                 qdisc_refcount_inc(new_q);    << 
1534         old_q = htb_graft_helper(dev_queue, n << 
1535         WARN_ON(!(old_q->flags & TCQ_F_BUILTI << 
1536 }                                             << 
1537                                               << 
1538 static int htb_destroy_class_offload(struct Q << 
1539                                      bool las << 
1540                                      struct n << 
1541 {                                             << 
1542         struct tc_htb_qopt_offload offload_op << 
1543         struct netdev_queue *dev_queue;       << 
1544         struct Qdisc *q = cl->leaf.q;         << 
1545         struct Qdisc *old;                    << 
1546         int err;                              << 
1547                                               << 
1548         if (cl->level)                        << 
1549                 return -EINVAL;               << 
1550                                               << 
1551         WARN_ON(!q);                          << 
1552         dev_queue = htb_offload_get_queue(cl) << 
1553         /* When destroying, caller qdisc_graf << 
1554          * qdisc_put for the qdisc being dest << 
1555          * does not need to graft or qdisc_pu << 
1556          */                                   << 
1557         if (!destroying) {                    << 
1558                 old = htb_graft_helper(dev_qu << 
1559                 /* Last qdisc grafted should  << 
1560                  * calling htb_delete.        << 
1561                  */                           << 
1562                 WARN_ON(old != q);            << 
1563         }                                     << 
1564                                               << 
1565         if (cl->parent) {                     << 
1566                 _bstats_update(&cl->parent->b << 
1567                                u64_stats_read << 
1568                                u64_stats_read << 
1569         }                                     << 
1570                                               << 
1571         offload_opt = (struct tc_htb_qopt_off << 
1572                 .command = !last_child ? TC_H << 
1573                            destroying ? TC_HT << 
1574                            TC_HTB_LEAF_DEL_LA << 
1575                 .classid = cl->common.classid << 
1576                 .extack = extack,             << 
1577         };                                    << 
1578         err = htb_offload(qdisc_dev(sch), &of << 
1579                                               << 
1580         if (!destroying) {                    << 
1581                 if (!err)                     << 
1582                         qdisc_put(old);       << 
1583                 else                          << 
1584                         htb_graft_helper(dev_ << 
1585         }                                     << 
1586                                               << 
1587         if (last_child)                       << 
1588                 return err;                   << 
1589                                               << 
1590         if (!err && offload_opt.classid != TC << 
1591                 u32 classid = TC_H_MAJ(sch->h << 
1592                               TC_H_MIN(offloa << 
1593                 struct htb_class *moved_cl =  << 
1594                                               << 
1595                 htb_offload_move_qdisc(sch, m << 
1596         }                                     << 
1597                                               << 
1598         return err;                           << 
1599 }                                                1190 }
1600                                                  1191 
1601 static void htb_destroy_class(struct Qdisc *s    1192 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1602 {                                                1193 {
1603         if (!cl->level) {                        1194         if (!cl->level) {
1604                 WARN_ON(!cl->leaf.q);         !! 1195                 WARN_ON(!cl->un.leaf.q);
1605                 qdisc_put(cl->leaf.q);        !! 1196                 qdisc_destroy(cl->un.leaf.q);
1606         }                                        1197         }
1607         gen_kill_estimator(&cl->rate_est);    !! 1198         gen_kill_estimator(&cl->bstats, &cl->rate_est);
1608         tcf_block_put(cl->block);             !! 1199         qdisc_put_rtab(cl->rate);
                                                   >> 1200         qdisc_put_rtab(cl->ceil);
                                                   >> 1201 
                                                   >> 1202         tcf_destroy_chain(&cl->filter_list);
1609         kfree(cl);                               1203         kfree(cl);
1610 }                                                1204 }
1611                                                  1205 
1612 static void htb_destroy(struct Qdisc *sch)       1206 static void htb_destroy(struct Qdisc *sch)
1613 {                                                1207 {
1614         struct net_device *dev = qdisc_dev(sc << 
1615         struct tc_htb_qopt_offload offload_op << 
1616         struct htb_sched *q = qdisc_priv(sch)    1208         struct htb_sched *q = qdisc_priv(sch);
1617         struct hlist_node *next;              !! 1209         struct hlist_node *n, *next;
1618         bool nonempty, changed;               << 
1619         struct htb_class *cl;                    1210         struct htb_class *cl;
1620         unsigned int i;                          1211         unsigned int i;
1621                                                  1212 
1622         cancel_work_sync(&q->work);              1213         cancel_work_sync(&q->work);
1623         qdisc_watchdog_cancel(&q->watchdog);     1214         qdisc_watchdog_cancel(&q->watchdog);
1624         /* This line used to be after htb_des    1215         /* This line used to be after htb_destroy_class call below
1625          * and surprisingly it worked in 2.4. !! 1216            and surprisingly it worked in 2.4. But it must precede it
1626          * because filter need its target cla !! 1217            because filter need its target class alive to be able to call
1627          * unbind_filter on it (without Oops) !! 1218            unbind_filter on it (without Oops). */
1628          */                                   !! 1219         tcf_destroy_chain(&q->filter_list);
1629         tcf_block_put(q->block);              << 
1630                                                  1220 
1631         for (i = 0; i < q->clhash.hashsize; i    1221         for (i = 0; i < q->clhash.hashsize; i++) {
1632                 hlist_for_each_entry(cl, &q-> !! 1222                 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1633                         tcf_block_put(cl->blo !! 1223                         tcf_destroy_chain(&cl->filter_list);
1634                         cl->block = NULL;     << 
1635                 }                             << 
1636         }                                        1224         }
1637                                               !! 1225         for (i = 0; i < q->clhash.hashsize; i++) {
1638         do {                                  !! 1226                 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1639                 nonempty = false;             !! 1227                                           common.hnode)
1640                 changed = false;              !! 1228                         htb_destroy_class(sch, cl);
1641                 for (i = 0; i < q->clhash.has << 
1642                         hlist_for_each_entry_ << 
1643                                               << 
1644                                 bool last_chi << 
1645                                               << 
1646                                 if (!q->offlo << 
1647                                         htb_d << 
1648                                         conti << 
1649                                 }             << 
1650                                               << 
1651                                 nonempty = tr << 
1652                                               << 
1653                                 if (cl->level << 
1654                                         conti << 
1655                                               << 
1656                                 changed = tru << 
1657                                               << 
1658                                 last_child =  << 
1659                                 htb_destroy_c << 
1660                                               << 
1661                                 qdisc_class_h << 
1662                                               << 
1663                                 if (cl->paren << 
1664                                         cl->p << 
1665                                 if (last_chil << 
1666                                         htb_p << 
1667                                 htb_destroy_c << 
1668                         }                     << 
1669                 }                             << 
1670         } while (changed);                    << 
1671         WARN_ON(nonempty);                    << 
1672                                               << 
1673         qdisc_class_hash_destroy(&q->clhash); << 
1674         __qdisc_reset_queue(&q->direct_queue) << 
1675                                               << 
1676         if (q->offload) {                     << 
1677                 offload_opt = (struct tc_htb_ << 
1678                         .command = TC_HTB_DES << 
1679                 };                            << 
1680                 htb_offload(dev, &offload_opt << 
1681         }                                        1229         }
1682                                               !! 1230         qdisc_class_hash_destroy(&q->clhash);
1683         if (!q->direct_qdiscs)                !! 1231         __skb_queue_purge(&q->direct_queue);
1684                 return;                       << 
1685         for (i = 0; i < q->num_direct_qdiscs  << 
1686                 qdisc_put(q->direct_qdiscs[i] << 
1687         kfree(q->direct_qdiscs);              << 
1688 }                                                1232 }
1689                                                  1233 
1690 static int htb_delete(struct Qdisc *sch, unsi !! 1234 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1691                       struct netlink_ext_ack  << 
1692 {                                                1235 {
1693         struct htb_sched *q = qdisc_priv(sch)    1236         struct htb_sched *q = qdisc_priv(sch);
1694         struct htb_class *cl = (struct htb_cl    1237         struct htb_class *cl = (struct htb_class *)arg;
                                                   >> 1238         unsigned int qlen;
1695         struct Qdisc *new_q = NULL;              1239         struct Qdisc *new_q = NULL;
1696         int last_child = 0;                      1240         int last_child = 0;
1697         int err;                              << 
1698                                                  1241 
1699         /* TODO: why don't allow to delete su !! 1242         // TODO: why don't allow to delete subtree ? references ? does
1700          * tc subsys guarantee us that in htb !! 1243         // tc subsys quarantee us that in htb_destroy it holds no class
1701          * refs so that we can remove childre !! 1244         // refs so that we can remove children safely there ?
1702          */                                   !! 1245         if (cl->children || cl->filter_cnt)
1703         if (cl->children || qdisc_class_in_us << 
1704                 NL_SET_ERR_MSG(extack, "HTB c << 
1705                 return -EBUSY;                   1246                 return -EBUSY;
1706         }                                     << 
1707                                                  1247 
1708         if (!cl->level && htb_parent_last_chi !! 1248         if (!cl->level && htb_parent_last_child(cl)) {
                                                   >> 1249                 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                                   >> 1250                                           &pfifo_qdisc_ops,
                                                   >> 1251                                           cl->parent->common.classid);
1709                 last_child = 1;                  1252                 last_child = 1;
1710                                               << 
1711         if (q->offload) {                     << 
1712                 err = htb_destroy_class_offlo << 
1713                                               << 
1714                 if (err)                      << 
1715                         return err;           << 
1716         }                                     << 
1717                                               << 
1718         if (last_child) {                     << 
1719                 struct netdev_queue *dev_queu << 
1720                                               << 
1721                 if (q->offload)               << 
1722                         dev_queue = htb_offlo << 
1723                                               << 
1724                 new_q = qdisc_create_dflt(dev << 
1725                                           cl- << 
1726                                           NUL << 
1727                 if (q->offload)               << 
1728                         htb_parent_to_leaf_of << 
1729         }                                        1253         }
1730                                                  1254 
1731         sch_tree_lock(sch);                      1255         sch_tree_lock(sch);
1732                                                  1256 
1733         if (!cl->level)                       !! 1257         if (!cl->level) {
1734                 qdisc_purge_queue(cl->leaf.q) !! 1258                 qlen = cl->un.leaf.q->q.qlen;
                                                   >> 1259                 qdisc_reset(cl->un.leaf.q);
                                                   >> 1260                 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
                                                   >> 1261         }
1735                                                  1262 
1736         /* delete from hash and active; remai    1263         /* delete from hash and active; remainder in destroy_class */
1737         qdisc_class_hash_remove(&q->clhash, &    1264         qdisc_class_hash_remove(&q->clhash, &cl->common);
1738         if (cl->parent)                          1265         if (cl->parent)
1739                 cl->parent->children--;          1266                 cl->parent->children--;
1740                                                  1267 
1741         if (cl->prio_activity)                   1268         if (cl->prio_activity)
1742                 htb_deactivate(q, cl);           1269                 htb_deactivate(q, cl);
1743                                                  1270 
1744         if (cl->cmode != HTB_CAN_SEND)           1271         if (cl->cmode != HTB_CAN_SEND)
1745                 htb_safe_rb_erase(&cl->pq_nod !! 1272                 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1746                                   &q->hlevel[ << 
1747                                                  1273 
1748         if (last_child)                          1274         if (last_child)
1749                 htb_parent_to_leaf(sch, cl, n !! 1275                 htb_parent_to_leaf(q, cl, new_q);
1750                                                  1276 
1751         sch_tree_unlock(sch);                 !! 1277         BUG_ON(--cl->refcnt == 0);
                                                   >> 1278         /*
                                                   >> 1279          * This shouldn't happen: we "hold" one cops->get() when called
                                                   >> 1280          * from tc_ctl_tclass; the destroy method is done from cops->put().
                                                   >> 1281          */
1752                                                  1282 
1753         htb_destroy_class(sch, cl);           !! 1283         sch_tree_unlock(sch);
1754         return 0;                                1284         return 0;
1755 }                                                1285 }
1756                                                  1286 
                                                   >> 1287 static void htb_put(struct Qdisc *sch, unsigned long arg)
                                                   >> 1288 {
                                                   >> 1289         struct htb_class *cl = (struct htb_class *)arg;
                                                   >> 1290 
                                                   >> 1291         if (--cl->refcnt == 0)
                                                   >> 1292                 htb_destroy_class(sch, cl);
                                                   >> 1293 }
                                                   >> 1294 
1757 static int htb_change_class(struct Qdisc *sch    1295 static int htb_change_class(struct Qdisc *sch, u32 classid,
1758                             u32 parentid, str    1296                             u32 parentid, struct nlattr **tca,
1759                             unsigned long *ar !! 1297                             unsigned long *arg)
1760 {                                                1298 {
1761         int err = -EINVAL;                       1299         int err = -EINVAL;
1762         struct htb_sched *q = qdisc_priv(sch)    1300         struct htb_sched *q = qdisc_priv(sch);
1763         struct htb_class *cl = (struct htb_cl    1301         struct htb_class *cl = (struct htb_class *)*arg, *parent;
1764         struct tc_htb_qopt_offload offload_op << 
1765         struct nlattr *opt = tca[TCA_OPTIONS]    1302         struct nlattr *opt = tca[TCA_OPTIONS];
1766         struct nlattr *tb[TCA_HTB_MAX + 1];   !! 1303         struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1767         struct Qdisc *parent_qdisc = NULL;    !! 1304         struct nlattr *tb[TCA_HTB_RTAB + 1];
1768         struct netdev_queue *dev_queue;       << 
1769         struct tc_htb_opt *hopt;                 1305         struct tc_htb_opt *hopt;
1770         u64 rate64, ceil64;                   << 
1771         int warn = 0;                         << 
1772                                                  1306 
1773         /* extract all subattrs from opt attr    1307         /* extract all subattrs from opt attr */
1774         if (!opt)                                1308         if (!opt)
1775                 goto failure;                    1309                 goto failure;
1776                                                  1310 
1777         err = nla_parse_nested_deprecated(tb, !! 1311         err = nla_parse_nested(tb, TCA_HTB_RTAB, opt, htb_policy);
1778                                           ext << 
1779         if (err < 0)                             1312         if (err < 0)
1780                 goto failure;                    1313                 goto failure;
1781                                                  1314 
1782         err = -EINVAL;                           1315         err = -EINVAL;
1783         if (tb[TCA_HTB_PARMS] == NULL)           1316         if (tb[TCA_HTB_PARMS] == NULL)
1784                 goto failure;                    1317                 goto failure;
1785                                                  1318 
1786         parent = parentid == TC_H_ROOT ? NULL    1319         parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1787                                                  1320 
1788         hopt = nla_data(tb[TCA_HTB_PARMS]);      1321         hopt = nla_data(tb[TCA_HTB_PARMS]);
1789         if (!hopt->rate.rate || !hopt->ceil.r << 
1790                 goto failure;                 << 
1791                                               << 
1792         if (q->offload) {                     << 
1793                 /* Options not supported by t << 
1794                 if (hopt->rate.overhead || ho << 
1795                         NL_SET_ERR_MSG(extack << 
1796                         goto failure;         << 
1797                 }                             << 
1798                 if (hopt->rate.mpu || hopt->c << 
1799                         NL_SET_ERR_MSG(extack << 
1800                         goto failure;         << 
1801                 }                             << 
1802         }                                     << 
1803                                               << 
1804         /* Keeping backward compatible with r << 
1805         if (hopt->rate.linklayer == TC_LINKLA << 
1806                 qdisc_put_rtab(qdisc_get_rtab << 
1807                                               << 
1808                                               << 
1809         if (hopt->ceil.linklayer == TC_LINKLA << 
1810                 qdisc_put_rtab(qdisc_get_rtab << 
1811                                               << 
1812                                                  1322 
1813         rate64 = tb[TCA_HTB_RATE64] ? nla_get !! 1323         rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1814         ceil64 = tb[TCA_HTB_CEIL64] ? nla_get !! 1324         ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
                                                   >> 1325         if (!rtab || !ctab)
                                                   >> 1326                 goto failure;
1815                                                  1327 
1816         if (!cl) {              /* new class     1328         if (!cl) {              /* new class */
1817                 struct net_device *dev = qdis !! 1329                 struct Qdisc *new_q;
1818                 struct Qdisc *new_q, *old_q;  << 
1819                 int prio;                        1330                 int prio;
1820                 struct {                         1331                 struct {
1821                         struct nlattr            1332                         struct nlattr           nla;
1822                         struct gnet_estimator    1333                         struct gnet_estimator   opt;
1823                 } est = {                        1334                 } est = {
1824                         .nla = {                 1335                         .nla = {
1825                                 .nla_len         1336                                 .nla_len        = nla_attr_size(sizeof(est.opt)),
1826                                 .nla_type        1337                                 .nla_type       = TCA_RATE,
1827                         },                       1338                         },
1828                         .opt = {                 1339                         .opt = {
1829                                 /* 4s interva    1340                                 /* 4s interval, 16s averaging constant */
1830                                 .interval        1341                                 .interval       = 2,
1831                                 .ewma_log        1342                                 .ewma_log       = 2,
1832                         },                       1343                         },
1833                 };                               1344                 };
1834                                                  1345 
1835                 /* check for valid classid */    1346                 /* check for valid classid */
1836                 if (!classid || TC_H_MAJ(clas !! 1347                 if (!classid || TC_H_MAJ(classid ^ sch->handle)
1837                     htb_find(classid, sch))   !! 1348                     || htb_find(classid, sch))
1838                         goto failure;            1349                         goto failure;
1839                                                  1350 
1840                 /* check maximal depth */        1351                 /* check maximal depth */
1841                 if (parent && parent->parent     1352                 if (parent && parent->parent && parent->parent->level < 2) {
1842                         NL_SET_ERR_MSG_MOD(ex !! 1353                         printk(KERN_ERR "htb: tree is too deep\n");
1843                         goto failure;            1354                         goto failure;
1844                 }                                1355                 }
1845                 err = -ENOBUFS;                  1356                 err = -ENOBUFS;
1846                 cl = kzalloc(sizeof(*cl), GFP !! 1357                 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1847                 if (!cl)                      << 
1848                         goto failure;            1358                         goto failure;
1849                                                  1359 
1850                 gnet_stats_basic_sync_init(&c !! 1360                 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1851                 gnet_stats_basic_sync_init(&c !! 1361                                         qdisc_root_sleeping_lock(sch),
1852                                               !! 1362                                         tca[TCA_RATE] ? : &est.nla);
1853                 err = tcf_block_get(&cl->bloc << 
1854                 if (err) {                       1363                 if (err) {
1855                         kfree(cl);               1364                         kfree(cl);
1856                         goto failure;            1365                         goto failure;
1857                 }                                1366                 }
1858                 if (htb_rate_est || tca[TCA_R << 
1859                         err = gen_new_estimat << 
1860                                               << 
1861                                               << 
1862                                               << 
1863                                               << 
1864                         if (err)              << 
1865                                 goto err_bloc << 
1866                 }                             << 
1867                                                  1367 
                                                   >> 1368                 cl->refcnt = 1;
1868                 cl->children = 0;                1369                 cl->children = 0;
                                                   >> 1370                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1869                 RB_CLEAR_NODE(&cl->pq_node);     1371                 RB_CLEAR_NODE(&cl->pq_node);
1870                                                  1372 
1871                 for (prio = 0; prio < TC_HTB_    1373                 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1872                         RB_CLEAR_NODE(&cl->no    1374                         RB_CLEAR_NODE(&cl->node[prio]);
1873                                                  1375 
1874                 cl->common.classid = classid; << 
1875                                               << 
1876                 /* Make sure nothing interrup << 
1877                  * ndo_setup_tc calls.        << 
1878                  */                           << 
1879                 ASSERT_RTNL();                << 
1880                                               << 
1881                 /* create leaf qdisc early be    1376                 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1882                  * so that can't be used insi !! 1377                    so that can't be used inside of sch_tree_lock
1883                  * -- thanks to Karlis Peisen !! 1378                    -- thanks to Karlis Peisenieks */
1884                  */                           !! 1379                 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1885                 if (!q->offload) {            !! 1380                                           &pfifo_qdisc_ops, classid);
1886                         dev_queue = sch->dev_ << 
1887                 } else if (!(parent && !paren << 
1888                         /* Assign a dev_queue << 
1889                         offload_opt = (struct << 
1890                                 .command = TC << 
1891                                 .classid = cl << 
1892                                 .parent_class << 
1893                                         TC_H_ << 
1894                                         TC_HT << 
1895                                 .rate = max_t << 
1896                                 .ceil = max_t << 
1897                                 .prio = hopt- << 
1898                                 .quantum = ho << 
1899                                 .extack = ext << 
1900                         };                    << 
1901                         err = htb_offload(dev << 
1902                         if (err) {            << 
1903                                 NL_SET_ERR_MS << 
1904                                               << 
1905                                 goto err_kill << 
1906                         }                     << 
1907                         dev_queue = netdev_ge << 
1908                 } else { /* First child. */   << 
1909                         dev_queue = htb_offlo << 
1910                         old_q = htb_graft_hel << 
1911                         WARN_ON(old_q != pare << 
1912                         offload_opt = (struct << 
1913                                 .command = TC << 
1914                                 .classid = cl << 
1915                                 .parent_class << 
1916                                         TC_H_ << 
1917                                 .rate = max_t << 
1918                                 .ceil = max_t << 
1919                                 .prio = hopt- << 
1920                                 .quantum = ho << 
1921                                 .extack = ext << 
1922                         };                    << 
1923                         err = htb_offload(dev << 
1924                         if (err) {            << 
1925                                 NL_SET_ERR_MS << 
1926                                               << 
1927                                 htb_graft_hel << 
1928                                 goto err_kill << 
1929                         }                     << 
1930                         _bstats_update(&paren << 
1931                                        u64_st << 
1932                                        u64_st << 
1933                         qdisc_put(old_q);     << 
1934                 }                             << 
1935                 new_q = qdisc_create_dflt(dev << 
1936                                           cla << 
1937                 if (q->offload) {             << 
1938                         /* One ref for cl->le << 
1939                         if (new_q)            << 
1940                                 qdisc_refcoun << 
1941                         old_q = htb_graft_hel << 
1942                         /* No qdisc_put neede << 
1943                         WARN_ON(!(old_q->flag << 
1944                 }                             << 
1945                 sch_tree_lock(sch);              1381                 sch_tree_lock(sch);
1946                 if (parent && !parent->level)    1382                 if (parent && !parent->level) {
                                                   >> 1383                         unsigned int qlen = parent->un.leaf.q->q.qlen;
                                                   >> 1384 
1947                         /* turn parent into i    1385                         /* turn parent into inner node */
1948                         qdisc_purge_queue(par !! 1386                         qdisc_reset(parent->un.leaf.q);
1949                         parent_qdisc = parent !! 1387                         qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
                                                   >> 1388                         qdisc_destroy(parent->un.leaf.q);
1950                         if (parent->prio_acti    1389                         if (parent->prio_activity)
1951                                 htb_deactivat    1390                                 htb_deactivate(q, parent);
1952                                                  1391 
1953                         /* remove from evt li    1392                         /* remove from evt list because of level change */
1954                         if (parent->cmode !=     1393                         if (parent->cmode != HTB_CAN_SEND) {
1955                                 htb_safe_rb_e !! 1394                                 htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1956                                 parent->cmode    1395                                 parent->cmode = HTB_CAN_SEND;
1957                         }                        1396                         }
1958                         parent->level = (pare    1397                         parent->level = (parent->parent ? parent->parent->level
1959                                          : TC    1398                                          : TC_HTB_MAXDEPTH) - 1;
1960                         memset(&parent->inner !! 1399                         memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1961                 }                                1400                 }
1962                                               << 
1963                 /* leaf (we) needs elementary    1401                 /* leaf (we) needs elementary qdisc */
1964                 cl->leaf.q = new_q ? new_q :  !! 1402                 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1965                 if (q->offload)               << 
1966                         cl->leaf.offload_queu << 
1967                                                  1403 
                                                   >> 1404                 cl->common.classid = classid;
1968                 cl->parent = parent;             1405                 cl->parent = parent;
1969                                                  1406 
1970                 /* set class to be in HTB_CAN    1407                 /* set class to be in HTB_CAN_SEND state */
1971                 cl->tokens = PSCHED_TICKS2NS( !! 1408                 cl->tokens = hopt->buffer;
1972                 cl->ctokens = PSCHED_TICKS2NS !! 1409                 cl->ctokens = hopt->cbuffer;
1973                 cl->mbuffer = 60ULL * NSEC_PE !! 1410                 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC;        /* 1min */
1974                 cl->t_c = ktime_get_ns();     !! 1411                 cl->t_c = psched_get_time();
1975                 cl->cmode = HTB_CAN_SEND;        1412                 cl->cmode = HTB_CAN_SEND;
1976                                                  1413 
1977                 /* attach to the hash list an    1414                 /* attach to the hash list and parent's family */
1978                 qdisc_class_hash_insert(&q->c    1415                 qdisc_class_hash_insert(&q->clhash, &cl->common);
1979                 if (parent)                      1416                 if (parent)
1980                         parent->children++;      1417                         parent->children++;
1981                 if (cl->leaf.q != &noop_qdisc << 
1982                         qdisc_hash_add(cl->le << 
1983         } else {                                 1418         } else {
1984                 if (tca[TCA_RATE]) {             1419                 if (tca[TCA_RATE]) {
1985                         err = gen_replace_est !! 1420                         err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1986                                               !! 1421                                                     qdisc_root_sleeping_lock(sch),
1987                                               << 
1988                                               << 
1989                                                  1422                                                     tca[TCA_RATE]);
1990                         if (err)                 1423                         if (err)
1991                                 return err;      1424                                 return err;
1992                 }                                1425                 }
1993                                               << 
1994                 if (q->offload) {             << 
1995                         struct net_device *de << 
1996                                               << 
1997                         offload_opt = (struct << 
1998                                 .command = TC << 
1999                                 .classid = cl << 
2000                                 .rate = max_t << 
2001                                 .ceil = max_t << 
2002                                 .prio = hopt- << 
2003                                 .quantum = ho << 
2004                                 .extack = ext << 
2005                         };                    << 
2006                         err = htb_offload(dev << 
2007                         if (err)              << 
2008                                 /* Estimator  << 
2009                                  * as well, s << 
2010                                  * the estima << 
2011                                  * offload an << 
2012                                  * only when  << 
2013                                  */           << 
2014                                 return err;   << 
2015                 }                             << 
2016                                               << 
2017                 sch_tree_lock(sch);              1426                 sch_tree_lock(sch);
2018         }                                        1427         }
2019                                                  1428 
2020         psched_ratecfg_precompute(&cl->rate,  << 
2021         psched_ratecfg_precompute(&cl->ceil,  << 
2022                                               << 
2023         /* it used to be a nasty bug here, we    1429         /* it used to be a nasty bug here, we have to check that node
2024          * is really leaf before changing cl- !! 1430            is really leaf before changing cl->un.leaf ! */
2025          */                                   << 
2026         if (!cl->level) {                        1431         if (!cl->level) {
2027                 u64 quantum = cl->rate.rate_b !! 1432                 cl->quantum = rtab->rate.rate / q->rate2quantum;
2028                                               << 
2029                 do_div(quantum, q->rate2quant << 
2030                 cl->quantum = min_t(u64, quan << 
2031                                               << 
2032                 if (!hopt->quantum && cl->qua    1433                 if (!hopt->quantum && cl->quantum < 1000) {
2033                         warn = -1;            !! 1434                         printk(KERN_WARNING
                                                   >> 1435                                "HTB: quantum of class %X is small. Consider r2q change.\n",
                                                   >> 1436                                cl->common.classid);
2034                         cl->quantum = 1000;      1437                         cl->quantum = 1000;
2035                 }                                1438                 }
2036                 if (!hopt->quantum && cl->qua    1439                 if (!hopt->quantum && cl->quantum > 200000) {
2037                         warn = 1;             !! 1440                         printk(KERN_WARNING
                                                   >> 1441                                "HTB: quantum of class %X is big. Consider r2q change.\n",
                                                   >> 1442                                cl->common.classid);
2038                         cl->quantum = 200000;    1443                         cl->quantum = 200000;
2039                 }                                1444                 }
2040                 if (hopt->quantum)               1445                 if (hopt->quantum)
2041                         cl->quantum = hopt->q    1446                         cl->quantum = hopt->quantum;
2042                 if ((cl->prio = hopt->prio) >    1447                 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
2043                         cl->prio = TC_HTB_NUM    1448                         cl->prio = TC_HTB_NUMPRIO - 1;
2044         }                                        1449         }
2045                                                  1450 
2046         cl->buffer = PSCHED_TICKS2NS(hopt->bu !! 1451         cl->buffer = hopt->buffer;
2047         cl->cbuffer = PSCHED_TICKS2NS(hopt->c !! 1452         cl->cbuffer = hopt->cbuffer;
2048                                               !! 1453         if (cl->rate)
                                                   >> 1454                 qdisc_put_rtab(cl->rate);
                                                   >> 1455         cl->rate = rtab;
                                                   >> 1456         if (cl->ceil)
                                                   >> 1457                 qdisc_put_rtab(cl->ceil);
                                                   >> 1458         cl->ceil = ctab;
2049         sch_tree_unlock(sch);                    1459         sch_tree_unlock(sch);
2050         qdisc_put(parent_qdisc);              << 
2051                                               << 
2052         if (warn)                             << 
2053                 NL_SET_ERR_MSG_FMT_MOD(extack << 
2054                                        "quant << 
2055                                        cl->co << 
2056                                                  1460 
2057         qdisc_class_hash_grow(sch, &q->clhash    1461         qdisc_class_hash_grow(sch, &q->clhash);
2058                                                  1462 
2059         *arg = (unsigned long)cl;                1463         *arg = (unsigned long)cl;
2060         return 0;                                1464         return 0;
2061                                                  1465 
2062 err_kill_estimator:                           << 
2063         gen_kill_estimator(&cl->rate_est);    << 
2064 err_block_put:                                << 
2065         tcf_block_put(cl->block);             << 
2066         kfree(cl);                            << 
2067 failure:                                         1466 failure:
                                                   >> 1467         if (rtab)
                                                   >> 1468                 qdisc_put_rtab(rtab);
                                                   >> 1469         if (ctab)
                                                   >> 1470                 qdisc_put_rtab(ctab);
2068         return err;                              1471         return err;
2069 }                                                1472 }
2070                                                  1473 
2071 static struct tcf_block *htb_tcf_block(struct !! 1474 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
2072                                        struct << 
2073 {                                                1475 {
2074         struct htb_sched *q = qdisc_priv(sch)    1476         struct htb_sched *q = qdisc_priv(sch);
2075         struct htb_class *cl = (struct htb_cl    1477         struct htb_class *cl = (struct htb_class *)arg;
                                                   >> 1478         struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
2076                                                  1479 
2077         return cl ? cl->block : q->block;     !! 1480         return fl;
2078 }                                                1481 }
2079                                                  1482 
2080 static unsigned long htb_bind_filter(struct Q    1483 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
2081                                      u32 clas    1484                                      u32 classid)
2082 {                                                1485 {
2083         struct htb_class *cl = htb_find(class    1486         struct htb_class *cl = htb_find(classid, sch);
2084                                                  1487 
2085         /*if (cl && !cl->level) return 0;        1488         /*if (cl && !cl->level) return 0;
2086          * The line above used to be there to !! 1489            The line above used to be there to prevent attaching filters to
2087          * leaves. But at least tc_index filt !! 1490            leaves. But at least tc_index filter uses this just to get class
2088          * for other reasons so that we have  !! 1491            for other reasons so that we have to allow for it.
2089          * ----                               !! 1492            ----
2090          * 19.6.2002 As Werner explained it i !! 1493            19.6.2002 As Werner explained it is ok - bind filter is just
2091          * another way to "lock" the class -  !! 1494            another way to "lock" the class - unlike "get" this lock can
2092          * be broken by class during destroy  !! 1495            be broken by class during destroy IIUC.
2093          */                                      1496          */
2094         if (cl)                                  1497         if (cl)
2095                 qdisc_class_get(&cl->common); !! 1498                 cl->filter_cnt++;
2096         return (unsigned long)cl;                1499         return (unsigned long)cl;
2097 }                                                1500 }
2098                                                  1501 
2099 static void htb_unbind_filter(struct Qdisc *s    1502 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
2100 {                                                1503 {
2101         struct htb_class *cl = (struct htb_cl    1504         struct htb_class *cl = (struct htb_class *)arg;
2102                                                  1505 
2103         qdisc_class_put(&cl->common);         !! 1506         if (cl)
                                                   >> 1507                 cl->filter_cnt--;
2104 }                                                1508 }
2105                                                  1509 
2106 static void htb_walk(struct Qdisc *sch, struc    1510 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2107 {                                                1511 {
2108         struct htb_sched *q = qdisc_priv(sch)    1512         struct htb_sched *q = qdisc_priv(sch);
2109         struct htb_class *cl;                    1513         struct htb_class *cl;
                                                   >> 1514         struct hlist_node *n;
2110         unsigned int i;                          1515         unsigned int i;
2111                                                  1516 
2112         if (arg->stop)                           1517         if (arg->stop)
2113                 return;                          1518                 return;
2114                                                  1519 
2115         for (i = 0; i < q->clhash.hashsize; i    1520         for (i = 0; i < q->clhash.hashsize; i++) {
2116                 hlist_for_each_entry(cl, &q-> !! 1521                 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
2117                         if (!tc_qdisc_stats_d !! 1522                         if (arg->count < arg->skip) {
                                                   >> 1523                                 arg->count++;
                                                   >> 1524                                 continue;
                                                   >> 1525                         }
                                                   >> 1526                         if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
                                                   >> 1527                                 arg->stop = 1;
2118                                 return;          1528                                 return;
                                                   >> 1529                         }
                                                   >> 1530                         arg->count++;
2119                 }                                1531                 }
2120         }                                        1532         }
2121 }                                                1533 }
2122                                                  1534 
2123 static const struct Qdisc_class_ops htb_class    1535 static const struct Qdisc_class_ops htb_class_ops = {
2124         .select_queue   =       htb_select_qu << 
2125         .graft          =       htb_graft,       1536         .graft          =       htb_graft,
2126         .leaf           =       htb_leaf,        1537         .leaf           =       htb_leaf,
2127         .qlen_notify    =       htb_qlen_noti    1538         .qlen_notify    =       htb_qlen_notify,
2128         .find           =       htb_search,   !! 1539         .get            =       htb_get,
                                                   >> 1540         .put            =       htb_put,
2129         .change         =       htb_change_cl    1541         .change         =       htb_change_class,
2130         .delete         =       htb_delete,      1542         .delete         =       htb_delete,
2131         .walk           =       htb_walk,        1543         .walk           =       htb_walk,
2132         .tcf_block      =       htb_tcf_block !! 1544         .tcf_chain      =       htb_find_tcf,
2133         .bind_tcf       =       htb_bind_filt    1545         .bind_tcf       =       htb_bind_filter,
2134         .unbind_tcf     =       htb_unbind_fi    1546         .unbind_tcf     =       htb_unbind_filter,
2135         .dump           =       htb_dump_clas    1547         .dump           =       htb_dump_class,
2136         .dump_stats     =       htb_dump_clas    1548         .dump_stats     =       htb_dump_class_stats,
2137 };                                               1549 };
2138                                                  1550 
2139 static struct Qdisc_ops htb_qdisc_ops __read_    1551 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
                                                   >> 1552         .next           =       NULL,
2140         .cl_ops         =       &htb_class_op    1553         .cl_ops         =       &htb_class_ops,
2141         .id             =       "htb",           1554         .id             =       "htb",
2142         .priv_size      =       sizeof(struct    1555         .priv_size      =       sizeof(struct htb_sched),
2143         .enqueue        =       htb_enqueue,     1556         .enqueue        =       htb_enqueue,
2144         .dequeue        =       htb_dequeue,     1557         .dequeue        =       htb_dequeue,
2145         .peek           =       qdisc_peek_de    1558         .peek           =       qdisc_peek_dequeued,
                                                   >> 1559         .drop           =       htb_drop,
2146         .init           =       htb_init,        1560         .init           =       htb_init,
2147         .attach         =       htb_attach,   << 
2148         .reset          =       htb_reset,       1561         .reset          =       htb_reset,
2149         .destroy        =       htb_destroy,     1562         .destroy        =       htb_destroy,
                                                   >> 1563         .change         =       NULL /* htb_change */,
2150         .dump           =       htb_dump,        1564         .dump           =       htb_dump,
2151         .owner          =       THIS_MODULE,     1565         .owner          =       THIS_MODULE,
2152 };                                               1566 };
2153 MODULE_ALIAS_NET_SCH("htb");                  << 
2154                                                  1567 
2155 static int __init htb_module_init(void)          1568 static int __init htb_module_init(void)
2156 {                                                1569 {
2157         return register_qdisc(&htb_qdisc_ops)    1570         return register_qdisc(&htb_qdisc_ops);
2158 }                                                1571 }
2159 static void __exit htb_module_exit(void)         1572 static void __exit htb_module_exit(void)
2160 {                                                1573 {
2161         unregister_qdisc(&htb_qdisc_ops);        1574         unregister_qdisc(&htb_qdisc_ops);
2162 }                                                1575 }
2163                                                  1576 
2164 module_init(htb_module_init)                     1577 module_init(htb_module_init)
2165 module_exit(htb_module_exit)                     1578 module_exit(htb_module_exit)
2166 MODULE_LICENSE("GPL");                           1579 MODULE_LICENSE("GPL");
2167 MODULE_DESCRIPTION("Hierarchical Token Bucket << 
2168                                                  1580 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php