~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sched/sch_htb.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /net/sched/sch_htb.c (Version linux-6.11.5) and /net/sched/sch_htb.c (Version linux-2.4.37.11)


  1 // SPDX-License-Identifier: GPL-2.0-or-later   !!   1 /* vim: ts=8 sw=8
  2 /*                                             << 
  3  * net/sched/sch_htb.c  Hierarchical token buc      2  * net/sched/sch_htb.c  Hierarchical token bucket, feed tree version
  4  *                                                  3  *
                                                   >>   4  *              This program is free software; you can redistribute it and/or
                                                   >>   5  *              modify it under the terms of the GNU General Public License
                                                   >>   6  *              as published by the Free Software Foundation; either version
                                                   >>   7  *              2 of the License, or (at your option) any later version.
                                                   >>   8  *
  5  * Authors:     Martin Devera, <devik@cdi.cz>       9  * Authors:     Martin Devera, <devik@cdi.cz>
  6  *                                                 10  *
  7  * Credits (in time order) for older HTB versi     11  * Credits (in time order) for older HTB versions:
  8  *              Stef Coene <stef.coene@docum.o     12  *              Stef Coene <stef.coene@docum.org>
  9  *                      HTB support at LARTC m     13  *                      HTB support at LARTC mailing list
 10  *              Ondrej Kraus, <krauso@barr.cz> !!  14  *              Ondrej Kraus, <krauso@barr.cz> 
 11  *                      found missing INIT_QDI     15  *                      found missing INIT_QDISC(htb)
 12  *              Vladimir Smelhaus, Aamer Akhte     16  *              Vladimir Smelhaus, Aamer Akhter, Bert Hubert
 13  *                      helped a lot to locate     17  *                      helped a lot to locate nasty class stall bug
 14  *              Andi Kleen, Jamal Hadi, Bert H     18  *              Andi Kleen, Jamal Hadi, Bert Hubert
 15  *                      code review and helpfu     19  *                      code review and helpful comments on shaping
 16  *              Tomasz Wrona, <tw@eter.tym.pl>     20  *              Tomasz Wrona, <tw@eter.tym.pl>
 17  *                      created test case so t     21  *                      created test case so that I was able to fix nasty bug
 18  *              Wilfried Weissmann                 22  *              Wilfried Weissmann
 19  *                      spotted bug in dequeue     23  *                      spotted bug in dequeue code and helped with fix
 20  *              Jiri Fojtasek                      24  *              Jiri Fojtasek
 21  *                      fixed requeue routine      25  *                      fixed requeue routine
 22  *              and many others. thanks.           26  *              and many others. thanks.
                                                   >>  27  *
                                                   >>  28  * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
 23  */                                                29  */
                                                   >>  30 #include <linux/config.h>
 24 #include <linux/module.h>                          31 #include <linux/module.h>
 25 #include <linux/moduleparam.h>                 !!  32 #include <asm/uaccess.h>
                                                   >>  33 #include <asm/system.h>
                                                   >>  34 #include <asm/bitops.h>
 26 #include <linux/types.h>                           35 #include <linux/types.h>
 27 #include <linux/kernel.h>                          36 #include <linux/kernel.h>
                                                   >>  37 #include <linux/version.h>
                                                   >>  38 #include <linux/sched.h>
 28 #include <linux/string.h>                          39 #include <linux/string.h>
                                                   >>  40 #include <linux/mm.h>
                                                   >>  41 #include <linux/socket.h>
                                                   >>  42 #include <linux/sockios.h>
                                                   >>  43 #include <linux/in.h>
 29 #include <linux/errno.h>                           44 #include <linux/errno.h>
                                                   >>  45 #include <linux/interrupt.h>
                                                   >>  46 #include <linux/if_ether.h>
                                                   >>  47 #include <linux/inet.h>
                                                   >>  48 #include <linux/netdevice.h>
                                                   >>  49 #include <linux/etherdevice.h>
                                                   >>  50 #include <linux/notifier.h>
                                                   >>  51 #include <net/ip.h>
                                                   >>  52 #include <net/route.h>
 30 #include <linux/skbuff.h>                          53 #include <linux/skbuff.h>
 31 #include <linux/list.h>                            54 #include <linux/list.h>
 32 #include <linux/compiler.h>                        55 #include <linux/compiler.h>
 33 #include <linux/rbtree.h>                      !!  56 #include <net/sock.h>
 34 #include <linux/workqueue.h>                   << 
 35 #include <linux/slab.h>                        << 
 36 #include <net/netlink.h>                       << 
 37 #include <net/sch_generic.h>                   << 
 38 #include <net/pkt_sched.h>                         57 #include <net/pkt_sched.h>
 39 #include <net/pkt_cls.h>                       !!  58 #include <linux/rbtree.h>
 40                                                    59 
 41 /* HTB algorithm.                                  60 /* HTB algorithm.
 42     Author: devik@cdi.cz                           61     Author: devik@cdi.cz
 43     ==========================================     62     ========================================================================
 44     HTB is like TBF with multiple classes. It      63     HTB is like TBF with multiple classes. It is also similar to CBQ because
 45     it allows to assign priority to each class !!  64     it allows to assign priority to each class in hierarchy. 
 46     In fact it is another implementation of Fl     65     In fact it is another implementation of Floyd's formal sharing.
 47                                                    66 
 48     Levels:                                        67     Levels:
 49     Each class is assigned level. Leaf has ALW !!  68     Each class is assigned level. Leaf has ALWAYS level 0 and root 
 50     classes have level TC_HTB_MAXDEPTH-1. Inte     69     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
 51     one less than their parent.                    70     one less than their parent.
 52 */                                                 71 */
 53                                                    72 
 54 static int htb_hysteresis __read_mostly = 0; / !!  73 #define HTB_HSIZE 16    /* classid hash size */
 55 #define HTB_VER 0x30011         /* major must  !!  74 #define HTB_EWMAC 2     /* rate average over HTB_EWMAC*HTB_HSIZE sec */
                                                   >>  75 #define HTB_DEBUG 1     /* compile debugging support (activated by tc tool) */
                                                   >>  76 #define HTB_RATECM 1    /* whether to use rate computer */
                                                   >>  77 #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
                                                   >>  78 #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
                                                   >>  79 #define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
                                                   >>  80 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
 56                                                    81 
 57 #if HTB_VER >> 16 != TC_HTB_PROTOVER               82 #if HTB_VER >> 16 != TC_HTB_PROTOVER
 58 #error "Mismatched sch_htb.c and pkt_sch.h"        83 #error "Mismatched sch_htb.c and pkt_sch.h"
 59 #endif                                             84 #endif
 60                                                    85 
 61 /* Module parameter and sysfs export */        !!  86 /* debugging support; S is subsystem, these are defined:
 62 module_param    (htb_hysteresis, int, 0640);   !!  87   0 - netlink messages
 63 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis m !!  88   1 - enqueue
 64                                                !!  89   2 - drop & requeue
 65 static int htb_rate_est = 0; /* htb classes ha !!  90   3 - dequeue main
 66 module_param(htb_rate_est, int, 0640);         !!  91   4 - dequeue one prio DRR part
 67 MODULE_PARM_DESC(htb_rate_est, "setup a defaul !!  92   5 - dequeue class accounting
                                                   >>  93   6 - class overlimit status computation
                                                   >>  94   7 - hint tree
                                                   >>  95   8 - event queue
                                                   >>  96  10 - rate estimator
                                                   >>  97  11 - classifier 
                                                   >>  98  12 - fast dequeue cache
                                                   >>  99 
                                                   >> 100  L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full
                                                   >> 101  q->debug uint32 contains 16 2-bit fields one for subsystem starting
                                                   >> 102  from LSB
                                                   >> 103  */
                                                   >> 104 #ifdef HTB_DEBUG
                                                   >> 105 #define HTB_DBG_COND(S,L) (((q->debug>>(2*S))&3) >= L)
                                                   >> 106 #define HTB_DBG(S,L,FMT,ARG...) if (HTB_DBG_COND(S,L)) \
                                                   >> 107         printk(KERN_DEBUG FMT,##ARG)
                                                   >> 108 #define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC)
                                                   >> 109 #define HTB_PASSQ q,
                                                   >> 110 #define HTB_ARGQ struct htb_sched *q,
                                                   >> 111 #define static
                                                   >> 112 #undef __inline__
                                                   >> 113 #define __inline__
                                                   >> 114 #undef inline
                                                   >> 115 #define inline
                                                   >> 116 #define HTB_CMAGIC 0xFEFAFEF1
                                                   >> 117 #define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \
                                                   >> 118                 if ((N)->rb_color == -1) break; \
                                                   >> 119                 rb_erase(N,R); \
                                                   >> 120                 (N)->rb_color = -1; } while (0)
                                                   >> 121 #else
                                                   >> 122 #define HTB_DBG_COND(S,L) (0)
                                                   >> 123 #define HTB_DBG(S,L,FMT,ARG...)
                                                   >> 124 #define HTB_PASSQ
                                                   >> 125 #define HTB_ARGQ
                                                   >> 126 #define HTB_CHCL(cl)
                                                   >> 127 #define htb_safe_rb_erase(N,R) rb_erase(N,R)
                                                   >> 128 #endif
                                                   >> 129 
 68                                                   130 
 69 /* used internaly to keep status of single cla    131 /* used internaly to keep status of single class */
 70 enum htb_cmode {                                  132 enum htb_cmode {
 71         HTB_CANT_SEND,          /* class can't !! 133     HTB_CANT_SEND,              /* class can't send and can't borrow */
 72         HTB_MAY_BORROW,         /* class can't !! 134     HTB_MAY_BORROW,             /* class can't send but may borrow */
 73         HTB_CAN_SEND            /* class can s !! 135     HTB_CAN_SEND                /* class can send */
 74 };                                                136 };
 75                                                   137 
 76 struct htb_prio {                              !! 138 /* interior & leaf nodes; props specific to leaves are marked L: */
 77         union {                                !! 139 struct htb_class
 78                 struct rb_root  row;           !! 140 {
 79                 struct rb_root  feed;          !! 141 #ifdef HTB_DEBUG
 80         };                                     !! 142         unsigned magic;
 81         struct rb_node  *ptr;                  !! 143 #endif
 82         /* When class changes from state 1->2  !! 144     /* general class parameters */
 83          * parent's feed then we lost ptr valu !! 145     u32 classid;
 84          * first child again. Here we store cl !! 146     struct tc_stats     stats;  /* generic stats */
 85          * last valid ptr (used when ptr is NU !! 147     struct tc_htb_xstats xstats;/* our special stats */
 86          */                                    !! 148     int refcnt;                 /* usage count of this class */
 87         u32             last_ptr_id;           !! 149 
 88 };                                             !! 150 #ifdef HTB_RATECM
 89                                                !! 151     /* rate measurement counters */
 90 /* interior & leaf nodes; props specific to le !! 152     unsigned long rate_bytes,sum_bytes;
 91  * To reduce false sharing, place mostly read  !! 153     unsigned long rate_packets,sum_packets;
 92  * and mostly written ones at the end.         !! 154 #endif
 93  */                                            << 
 94 struct htb_class {                             << 
 95         struct Qdisc_class_common common;      << 
 96         struct psched_ratecfg   rate;          << 
 97         struct psched_ratecfg   ceil;          << 
 98         s64                     buffer, cbuffe << 
 99         s64                     mbuffer;       << 
100         u32                     prio;          << 
101         int                     quantum;       << 
102                                                << 
103         struct tcf_proto __rcu  *filter_list;  << 
104         struct tcf_block        *block;        << 
105                                                << 
106         int                     level;         << 
107         unsigned int            children;      << 
108         struct htb_class        *parent;       << 
109                                                << 
110         struct net_rate_estimator __rcu *rate_ << 
111                                                << 
112         /*                                     << 
113          * Written often fields                << 
114          */                                    << 
115         struct gnet_stats_basic_sync bstats;   << 
116         struct gnet_stats_basic_sync bstats_bi << 
117         struct tc_htb_xstats    xstats; /* our << 
118                                                << 
119         /* token bucket parameters */          << 
120         s64                     tokens, ctoken << 
121         s64                     t_c;           << 
122                                                << 
123         union {                                << 
124                 struct htb_class_leaf {        << 
125                         int             defici << 
126                         struct Qdisc    *q;    << 
127                         struct netdev_queue *o << 
128                 } leaf;                        << 
129                 struct htb_class_inner {       << 
130                         struct htb_prio clprio << 
131                 } inner;                       << 
132         };                                     << 
133         s64                     pq_key;        << 
134                                                << 
135         int                     prio_activity; << 
136         enum htb_cmode          cmode;         << 
137         struct rb_node          pq_node;       << 
138         struct rb_node          node[TC_HTB_NU << 
139                                                << 
140         unsigned int drops ____cacheline_align << 
141         unsigned int            overlimits;    << 
142 };                                             << 
143                                                   155 
144 struct htb_level {                             !! 156     /* topology */
145         struct rb_root  wait_pq;               !! 157     int level;                  /* our level (see above) */
146         struct htb_prio hprio[TC_HTB_NUMPRIO]; !! 158     struct htb_class *parent;   /* parent class */
                                                   >> 159     struct list_head hlist;     /* classid hash list item */
                                                   >> 160     struct list_head sibling;   /* sibling list item */
                                                   >> 161     struct list_head children;  /* children list */
                                                   >> 162 
                                                   >> 163     union {
                                                   >> 164             struct htb_class_leaf {
                                                   >> 165                     struct Qdisc *q;
                                                   >> 166                     int prio;
                                                   >> 167                     int aprio;  
                                                   >> 168                     int quantum;
                                                   >> 169                     int deficit[TC_HTB_MAXDEPTH];
                                                   >> 170                     struct list_head drop_list;
                                                   >> 171             } leaf;
                                                   >> 172             struct htb_class_inner {
                                                   >> 173                     rb_root_t feed[TC_HTB_NUMPRIO];     /* feed trees */
                                                   >> 174                     rb_node_t *ptr[TC_HTB_NUMPRIO];     /* current class ptr */
                                                   >> 175                     /* When class changes from state 1->2 and disconnects from 
                                                   >> 176                        parent's feed then we lost ptr value and start from the
                                                   >> 177                        first child again. Here we store classid of the
                                                   >> 178                        last valid ptr (used when ptr is NULL). */
                                                   >> 179                     u32 last_ptr_id[TC_HTB_NUMPRIO];
                                                   >> 180             } inner;
                                                   >> 181     } un;
                                                   >> 182     rb_node_t node[TC_HTB_NUMPRIO];     /* node for self or feed tree */
                                                   >> 183     rb_node_t pq_node;                  /* node for event queue */
                                                   >> 184     unsigned long pq_key;       /* the same type as jiffies global */
                                                   >> 185     
                                                   >> 186     int prio_activity;          /* for which prios are we active */
                                                   >> 187     enum htb_cmode cmode;       /* current mode of the class */
                                                   >> 188 
                                                   >> 189     /* class attached filters */
                                                   >> 190     struct tcf_proto *filter_list;
                                                   >> 191     int filter_cnt;
                                                   >> 192 
                                                   >> 193     int warned;         /* only one warning about non work conserving .. */
                                                   >> 194 
                                                   >> 195     /* token bucket parameters */
                                                   >> 196     struct qdisc_rate_table *rate;      /* rate table of the class itself */
                                                   >> 197     struct qdisc_rate_table *ceil;      /* ceiling rate (limits borrows too) */
                                                   >> 198     long buffer,cbuffer;                /* token bucket depth/rate */
                                                   >> 199     long mbuffer;                       /* max wait time */
                                                   >> 200     long tokens,ctokens;                /* current number of tokens */
                                                   >> 201     psched_time_t t_c;                  /* checkpoint time */
147 };                                                202 };
148                                                   203 
149 struct htb_sched {                             !! 204 /* TODO: maybe compute rate when size is too large .. or drop ? */
150         struct Qdisc_class_hash clhash;        !! 205 static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
151         int                     defcls;        !! 206         int size)
152         int                     rate2quantum;  !! 207 { 
153                                                !! 208     int slot = size >> rate->rate.cell_log;
154         /* filters for qdisc itself */         !! 209     if (slot > 255) {
155         struct tcf_proto __rcu  *filter_list;  !! 210         cl->xstats.giants++;
156         struct tcf_block        *block;        !! 211         slot = 255;
157                                                !! 212     }
158 #define HTB_WARN_TOOMANYEVENTS  0x1            !! 213     return rate->data[slot];
159         unsigned int            warned; /* onl !! 214 }
160         int                     direct_qlen;   !! 215 
161         struct work_struct      work;          !! 216 struct htb_sched
162                                                !! 217 {
163         /* non shaped skbs; let them go direct !! 218     struct list_head root;                      /* root classes list */
164         struct qdisc_skb_head   direct_queue;  !! 219     struct list_head hash[HTB_HSIZE];           /* hashed by classid */
165         u32                     direct_pkts;   !! 220     struct list_head drops[TC_HTB_NUMPRIO];     /* active leaves (for drops) */
166         u32                     overlimits;    !! 221     
167                                                !! 222     /* self list - roots of self generating tree */
168         struct qdisc_watchdog   watchdog;      !! 223     rb_root_t row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
169                                                !! 224     int row_mask[TC_HTB_MAXDEPTH];
170         s64                     now;    /* cac !! 225     rb_node_t *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
171                                                !! 226     u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
172         /* time of nearest event per level (ro !! 227 
173         s64                     near_ev_cache[ !! 228     /* self wait list - roots of wait PQs per row */
174                                                !! 229     rb_root_t wait_pq[TC_HTB_MAXDEPTH];
175         int                     row_mask[TC_HT !! 230 
176                                                !! 231     /* time of nearest event per level (row) */
177         struct htb_level        hlevel[TC_HTB_ !! 232     unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
178                                                !! 233 
179         struct Qdisc            **direct_qdisc !! 234     /* cached value of jiffies in dequeue */
180         unsigned int            num_direct_qdi !! 235     unsigned long jiffies;
                                                   >> 236 
                                                   >> 237     /* whether we hit non-work conserving class during this dequeue; we use */
                                                   >> 238     int nwc_hit;        /* this to disable mindelay complaint in dequeue */
                                                   >> 239 
                                                   >> 240     int defcls;         /* class where unclassified flows go to */
                                                   >> 241     u32 debug;          /* subsystem debug levels */
                                                   >> 242 
                                                   >> 243     /* filters for qdisc itself */
                                                   >> 244     struct tcf_proto *filter_list;
                                                   >> 245     int filter_cnt;
                                                   >> 246 
                                                   >> 247     int rate2quantum;           /* quant = rate / rate2quantum */
                                                   >> 248     psched_time_t now;          /* cached dequeue time */
                                                   >> 249     struct timer_list timer;    /* send delay timer */
                                                   >> 250 #ifdef HTB_RATECM
                                                   >> 251     struct timer_list rttim;    /* rate computer timer */
                                                   >> 252     int recmp_bucket;           /* which hash bucket to recompute next */
                                                   >> 253 #endif
                                                   >> 254     
                                                   >> 255     /* non shaped skbs; let them go directly thru */
                                                   >> 256     struct sk_buff_head direct_queue;
                                                   >> 257     int direct_qlen;  /* max qlen of above */
181                                                   258 
182         bool                    offload;       !! 259     long direct_pkts;
183 };                                                260 };
184                                                   261 
185 /* find class in global hash table using given !! 262 /* compute hash of size HTB_HSIZE for given handle */
186 static inline struct htb_class *htb_find(u32 h !! 263 static __inline__ int htb_hash(u32 h) 
187 {                                                 264 {
188         struct htb_sched *q = qdisc_priv(sch); !! 265 #if HTB_HSIZE != 16
189         struct Qdisc_class_common *clc;        !! 266  #error "Declare new hash for your HTB_HSIZE"
190                                                !! 267 #endif
191         clc = qdisc_class_find(&q->clhash, han !! 268     h ^= h>>8;  /* stolen from cbq_hash */
192         if (clc == NULL)                       !! 269     h ^= h>>4;
193                 return NULL;                   !! 270     return h & 0xf;
194         return container_of(clc, struct htb_cl << 
195 }                                                 271 }
196                                                   272 
197 static unsigned long htb_search(struct Qdisc * !! 273 /* find class in global hash table using given handle */
                                                   >> 274 static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
198 {                                                 275 {
199         return (unsigned long)htb_find(handle, !! 276         struct htb_sched *q = (struct htb_sched *)sch->data;
                                                   >> 277         struct list_head *p;
                                                   >> 278         if (TC_H_MAJ(handle) != sch->handle) 
                                                   >> 279                 return NULL;
                                                   >> 280         
                                                   >> 281         list_for_each (p,q->hash+htb_hash(handle)) {
                                                   >> 282                 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
                                                   >> 283                 if (cl->classid == handle)
                                                   >> 284                         return cl;
                                                   >> 285         }
                                                   >> 286         return NULL;
200 }                                                 287 }
201                                                   288 
202 #define HTB_DIRECT ((struct htb_class *)-1L)   << 
203                                                << 
204 /**                                               289 /**
205  * htb_classify - classify a packet into class    290  * htb_classify - classify a packet into class
206  * @skb: the socket buffer                     << 
207  * @sch: the active queue discipline           << 
208  * @qerr: pointer for returned status code     << 
209  *                                                291  *
210  * It returns NULL if the packet should be dro    292  * It returns NULL if the packet should be dropped or -1 if the packet
211  * should be passed directly thru. In all othe    293  * should be passed directly thru. In all other cases leaf class is returned.
212  * We allow direct class selection by classid     294  * We allow direct class selection by classid in priority. The we examine
213  * filters in qdisc and in inner nodes (if hig    295  * filters in qdisc and in inner nodes (if higher filter points to the inner
214  * node). If we end up with classid MAJOR:0 we    296  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
215  * internal fifo (direct). These packets then  !! 297  * internal fifo (direct). These packets then go directly thru. If we still 
216  * have no valid leaf we try to use MAJOR:defa !! 298  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
217  * then finish and return direct queue.           299  * then finish and return direct queue.
218  */                                               300  */
219 static struct htb_class *htb_classify(struct s !! 301 #define HTB_DIRECT (struct htb_class*)-1
220                                       int *qer !! 302 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
221 {                                                 303 {
222         struct htb_sched *q = qdisc_priv(sch); !! 304         struct htb_sched *q = (struct htb_sched *)sch->data;
223         struct htb_class *cl;                     305         struct htb_class *cl;
224         struct tcf_result res;                    306         struct tcf_result res;
225         struct tcf_proto *tcf;                    307         struct tcf_proto *tcf;
226         int result;                               308         int result;
227                                                   309 
228         /* allow to select class by setting sk    310         /* allow to select class by setting skb->priority to valid classid;
229          * note that nfmark can be used too by !! 311            note that nfmark can be used too by attaching filter fw with no
230          * rules in it                         !! 312            rules in it */
231          */                                    << 
232         if (skb->priority == sch->handle)         313         if (skb->priority == sch->handle)
233                 return HTB_DIRECT;      /* X:0 !! 314                 return HTB_DIRECT;  /* X:0 (direct flow) selected */
234         cl = htb_find(skb->priority, sch);     !! 315         if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 
235         if (cl) {                              !! 316                 return cl;
236                 if (cl->level == 0)            !! 317 
237                         return cl;             !! 318         tcf = q->filter_list;
238                 /* Start with inner filter cha !! 319         while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
239                 tcf = rcu_dereference_bh(cl->f !! 320 #ifdef CONFIG_NET_CLS_POLICE
240         } else {                               !! 321                 if (result == TC_POLICE_SHOT)
241                 tcf = rcu_dereference_bh(q->fi << 
242         }                                      << 
243                                                << 
244         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_ << 
245         while (tcf && (result = tcf_classify(s << 
246 #ifdef CONFIG_NET_CLS_ACT                      << 
247                 switch (result) {              << 
248                 case TC_ACT_QUEUED:            << 
249                 case TC_ACT_STOLEN:            << 
250                 case TC_ACT_TRAP:              << 
251                         *qerr = NET_XMIT_SUCCE << 
252                         fallthrough;           << 
253                 case TC_ACT_SHOT:              << 
254                         return NULL;              322                         return NULL;
255                 }                              << 
256 #endif                                            323 #endif
257                 cl = (void *)res.class;        !! 324                 if ((cl = (void*)res.class) == NULL) {
258                 if (!cl) {                     << 
259                         if (res.classid == sch    325                         if (res.classid == sch->handle)
260                                 return HTB_DIR !! 326                                 return HTB_DIRECT;  /* X:0 (direct flow) */
261                         cl = htb_find(res.clas !! 327                         if ((cl = htb_find(res.classid,sch)) == NULL)
262                         if (!cl)               !! 328                                 break; /* filter selected invalid classid */
263                                 break;  /* fil << 
264                 }                                 329                 }
265                 if (!cl->level)                   330                 if (!cl->level)
266                         return cl;      /* we  !! 331                         return cl; /* we hit leaf; return it */
267                                                   332 
268                 /* we have got inner class; ap    333                 /* we have got inner class; apply inner filter chain */
269                 tcf = rcu_dereference_bh(cl->f !! 334                 tcf = cl->filter_list;
270         }                                         335         }
271         /* classification failed; try to use d    336         /* classification failed; try to use default class */
272         cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch-> !! 337         cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch);
273         if (!cl || cl->level)                     338         if (!cl || cl->level)
274                 return HTB_DIRECT;      /* bad !! 339                 return HTB_DIRECT; /* bad default .. this is safe bet */
275         return cl;                                340         return cl;
276 }                                                 341 }
277                                                   342 
                                                   >> 343 #ifdef HTB_DEBUG
                                                   >> 344 static void htb_next_rb_node(rb_node_t **n);
                                                   >> 345 #define HTB_DUMTREE(root,memb) if(root) { \
                                                   >> 346         rb_node_t *n = (root)->rb_node; \
                                                   >> 347         while (n->rb_left) n = n->rb_left; \
                                                   >> 348         while (n) { \
                                                   >> 349                 struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
                                                   >> 350                 printk(" %x",cl->classid); htb_next_rb_node (&n); \
                                                   >> 351         } }
                                                   >> 352 
                                                   >> 353 static void htb_debug_dump (struct htb_sched *q)
                                                   >> 354 {
                                                   >> 355         int i,p;
                                                   >> 356         printk(KERN_DEBUG "htb*g j=%lu lj=%lu\n",jiffies,q->jiffies);
                                                   >> 357         /* rows */
                                                   >> 358         for (i=TC_HTB_MAXDEPTH-1;i>=0;i--) {
                                                   >> 359                 printk(KERN_DEBUG "htb*r%d m=%x",i,q->row_mask[i]);
                                                   >> 360                 for (p=0;p<TC_HTB_NUMPRIO;p++) {
                                                   >> 361                         if (!q->row[i][p].rb_node) continue;
                                                   >> 362                         printk(" p%d:",p);
                                                   >> 363                         HTB_DUMTREE(q->row[i]+p,node[p]);
                                                   >> 364                 }
                                                   >> 365                 printk("\n");
                                                   >> 366         }
                                                   >> 367         /* classes */
                                                   >> 368         for (i = 0; i < HTB_HSIZE; i++) {
                                                   >> 369                 struct list_head *l;
                                                   >> 370                 list_for_each (l,q->hash+i) {
                                                   >> 371                         struct htb_class *cl = list_entry(l,struct htb_class,hlist);
                                                   >> 372                         long diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
                                                   >> 373                         printk(KERN_DEBUG "htb*c%x m=%d t=%ld c=%ld pq=%lu df=%ld ql=%d "
                                                   >> 374                                         "pa=%x f:",
                                                   >> 375                                 cl->classid,cl->cmode,cl->tokens,cl->ctokens,
                                                   >> 376                                 cl->pq_node.rb_color==-1?0:cl->pq_key,diff,
                                                   >> 377                                 cl->level?0:cl->un.leaf.q->q.qlen,cl->prio_activity);
                                                   >> 378                         if (cl->level)
                                                   >> 379                         for (p=0;p<TC_HTB_NUMPRIO;p++) {
                                                   >> 380                                 if (!cl->un.inner.feed[p].rb_node) continue;
                                                   >> 381                                 printk(" p%d a=%x:",p,cl->un.inner.ptr[p]?rb_entry(cl->un.inner.ptr[p], struct htb_class,node[p])->classid:0);
                                                   >> 382                                 HTB_DUMTREE(cl->un.inner.feed+p,node[p]);
                                                   >> 383                         }
                                                   >> 384                         printk("\n");
                                                   >> 385                 }
                                                   >> 386         }
                                                   >> 387 }
                                                   >> 388 #endif
278 /**                                               389 /**
279  * htb_add_to_id_tree - adds class to the roun    390  * htb_add_to_id_tree - adds class to the round robin list
280  * @root: the root of the tree                 << 
281  * @cl: the class to add                       << 
282  * @prio: the give prio in class               << 
283  *                                                391  *
284  * Routine adds class to the list (actually tr    392  * Routine adds class to the list (actually tree) sorted by classid.
285  * Make sure that class is not already on such    393  * Make sure that class is not already on such list for given prio.
286  */                                               394  */
287 static void htb_add_to_id_tree(struct rb_root  !! 395 static void htb_add_to_id_tree (HTB_ARGQ rb_root_t *root,
288                                struct htb_clas !! 396                 struct htb_class *cl,int prio)
289 {                                                 397 {
290         struct rb_node **p = &root->rb_node, * !! 398         rb_node_t **p = &root->rb_node, *parent = NULL;
291                                                !! 399         HTB_DBG(7,3,"htb_add_id_tree cl=%X prio=%d\n",cl->classid,prio);
                                                   >> 400 #ifdef HTB_DEBUG
                                                   >> 401         if (cl->node[prio].rb_color != -1) { BUG_TRAP(0); return; }
                                                   >> 402         HTB_CHCL(cl);
                                                   >> 403         if (*p) {
                                                   >> 404                 struct htb_class *x = rb_entry(*p,struct htb_class,node[prio]);
                                                   >> 405                 HTB_CHCL(x);
                                                   >> 406         }
                                                   >> 407 #endif
292         while (*p) {                              408         while (*p) {
293                 struct htb_class *c;           !! 409                 struct htb_class *c; parent = *p;
294                 parent = *p;                   << 
295                 c = rb_entry(parent, struct ht    410                 c = rb_entry(parent, struct htb_class, node[prio]);
296                                                !! 411                 HTB_CHCL(c);
297                 if (cl->common.classid > c->co !! 412                 if (cl->classid > c->classid)
298                         p = &parent->rb_right;    413                         p = &parent->rb_right;
299                 else                           !! 414                 else 
300                         p = &parent->rb_left;     415                         p = &parent->rb_left;
301         }                                         416         }
302         rb_link_node(&cl->node[prio], parent,     417         rb_link_node(&cl->node[prio], parent, p);
303         rb_insert_color(&cl->node[prio], root)    418         rb_insert_color(&cl->node[prio], root);
304 }                                                 419 }
305                                                   420 
306 /**                                               421 /**
307  * htb_add_to_wait_tree - adds class to the ev    422  * htb_add_to_wait_tree - adds class to the event queue with delay
308  * @q: the priority event queue                << 
309  * @cl: the class to add                       << 
310  * @delay: delay in microseconds               << 
311  *                                                423  *
312  * The class is added to priority event queue     424  * The class is added to priority event queue to indicate that class will
313  * change its mode in cl->pq_key microseconds.    425  * change its mode in cl->pq_key microseconds. Make sure that class is not
314  * already in the queue.                          426  * already in the queue.
315  */                                               427  */
316 static void htb_add_to_wait_tree(struct htb_sc !! 428 static void htb_add_to_wait_tree (struct htb_sched *q,
317                                  struct htb_cl !! 429                 struct htb_class *cl,long delay,int debug_hint)
318 {                                                 430 {
319         struct rb_node **p = &q->hlevel[cl->le !! 431         rb_node_t **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
320                                                !! 432         HTB_DBG(7,3,"htb_add_wt cl=%X key=%lu\n",cl->classid,cl->pq_key);
321         cl->pq_key = q->now + delay;           !! 433 #ifdef HTB_DEBUG
322         if (cl->pq_key == q->now)              !! 434         if (cl->pq_node.rb_color != -1) { BUG_TRAP(0); return; }
                                                   >> 435         HTB_CHCL(cl);
                                                   >> 436         if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
                                                   >> 437                 printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
                                                   >> 438 #endif
                                                   >> 439         cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
                                                   >> 440         if (cl->pq_key == q->jiffies)
323                 cl->pq_key++;                     441                 cl->pq_key++;
324                                                   442 
325         /* update the nearest event cache */      443         /* update the nearest event cache */
326         if (q->near_ev_cache[cl->level] > cl-> !! 444         if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
327                 q->near_ev_cache[cl->level] =     445                 q->near_ev_cache[cl->level] = cl->pq_key;
328                                                !! 446         
329         while (*p) {                              447         while (*p) {
330                 struct htb_class *c;           !! 448                 struct htb_class *c; parent = *p;
331                 parent = *p;                   << 
332                 c = rb_entry(parent, struct ht    449                 c = rb_entry(parent, struct htb_class, pq_node);
333                 if (cl->pq_key >= c->pq_key)   !! 450                 if (time_after_eq(cl->pq_key, c->pq_key))
334                         p = &parent->rb_right;    451                         p = &parent->rb_right;
335                 else                           !! 452                 else 
336                         p = &parent->rb_left;     453                         p = &parent->rb_left;
337         }                                         454         }
338         rb_link_node(&cl->pq_node, parent, p);    455         rb_link_node(&cl->pq_node, parent, p);
339         rb_insert_color(&cl->pq_node, &q->hlev !! 456         rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
340 }                                                 457 }
341                                                   458 
342 /**                                               459 /**
343  * htb_next_rb_node - finds next node in binar    460  * htb_next_rb_node - finds next node in binary tree
344  * @n: the current node in binary tree         << 
345  *                                                461  *
346  * When we are past last key we return NULL.      462  * When we are past last key we return NULL.
347  * Average complexity is 2 steps per call.        463  * Average complexity is 2 steps per call.
348  */                                               464  */
349 static inline void htb_next_rb_node(struct rb_ !! 465 static void htb_next_rb_node(rb_node_t **n)
350 {                                                 466 {
351         *n = rb_next(*n);                      !! 467         rb_node_t *p;
                                                   >> 468         if ((*n)->rb_right) {
                                                   >> 469                 /* child at right. use it or its leftmost ancestor */
                                                   >> 470                 *n = (*n)->rb_right;
                                                   >> 471                 while ((*n)->rb_left) 
                                                   >> 472                         *n = (*n)->rb_left;
                                                   >> 473                 return;
                                                   >> 474         }
                                                   >> 475         while ((p = (*n)->rb_parent) != NULL) {
                                                   >> 476                 /* if we've arrived from left child then we have next node */
                                                   >> 477                 if (p->rb_left == *n) break;
                                                   >> 478                 *n = p;
                                                   >> 479         }
                                                   >> 480         *n = p;
352 }                                                 481 }
353                                                   482 
354 /**                                               483 /**
355  * htb_add_class_to_row - add class to its row    484  * htb_add_class_to_row - add class to its row
356  * @q: the priority event queue                << 
357  * @cl: the class to add                       << 
358  * @mask: the given priorities in class in bit << 
359  *                                                485  *
360  * The class is added to row at priorities mar    486  * The class is added to row at priorities marked in mask.
361  * It does nothing if mask == 0.                  487  * It does nothing if mask == 0.
362  */                                               488  */
363 static inline void htb_add_class_to_row(struct !! 489 static inline void htb_add_class_to_row(struct htb_sched *q, 
364                                         struct !! 490                 struct htb_class *cl,int mask)
365 {                                                 491 {
                                                   >> 492         HTB_DBG(7,2,"htb_addrow cl=%X mask=%X rmask=%X\n",
                                                   >> 493                         cl->classid,mask,q->row_mask[cl->level]);
                                                   >> 494         HTB_CHCL(cl);
366         q->row_mask[cl->level] |= mask;           495         q->row_mask[cl->level] |= mask;
367         while (mask) {                            496         while (mask) {
368                 int prio = ffz(~mask);            497                 int prio = ffz(~mask);
369                 mask &= ~(1 << prio);             498                 mask &= ~(1 << prio);
370                 htb_add_to_id_tree(&q->hlevel[ !! 499                 htb_add_to_id_tree(HTB_PASSQ q->row[cl->level]+prio,cl,prio);
371         }                                         500         }
372 }                                                 501 }
373                                                   502 
374 /* If this triggers, it is a bug in this code, << 
375 static void htb_safe_rb_erase(struct rb_node * << 
376 {                                              << 
377         if (RB_EMPTY_NODE(rb)) {               << 
378                 WARN_ON(1);                    << 
379         } else {                               << 
380                 rb_erase(rb, root);            << 
381                 RB_CLEAR_NODE(rb);             << 
382         }                                      << 
383 }                                              << 
384                                                << 
385                                                << 
386 /**                                               503 /**
387  * htb_remove_class_from_row - removes class f    504  * htb_remove_class_from_row - removes class from its row
388  * @q: the priority event queue                << 
389  * @cl: the class to add                       << 
390  * @mask: the given priorities in class in bit << 
391  *                                                505  *
392  * The class is removed from row at priorities    506  * The class is removed from row at priorities marked in mask.
393  * It does nothing if mask == 0.                  507  * It does nothing if mask == 0.
394  */                                               508  */
395 static inline void htb_remove_class_from_row(s !! 509 static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
396                                                !! 510                 struct htb_class *cl,int mask)
397 {                                                 511 {
398         int m = 0;                                512         int m = 0;
399         struct htb_level *hlevel = &q->hlevel[ !! 513         HTB_CHCL(cl);
400                                                << 
401         while (mask) {                            514         while (mask) {
402                 int prio = ffz(~mask);            515                 int prio = ffz(~mask);
403                 struct htb_prio *hprio = &hlev << 
404                                                << 
405                 mask &= ~(1 << prio);             516                 mask &= ~(1 << prio);
406                 if (hprio->ptr == cl->node + p !! 517                 if (q->ptr[cl->level][prio] == cl->node+prio)
407                         htb_next_rb_node(&hpri !! 518                         htb_next_rb_node(q->ptr[cl->level]+prio);
408                                                !! 519                 htb_safe_rb_erase(cl->node + prio,q->row[cl->level]+prio);
409                 htb_safe_rb_erase(cl->node + p !! 520                 if (!q->row[cl->level][prio].rb_node) 
410                 if (!hprio->row.rb_node)       << 
411                         m |= 1 << prio;           521                         m |= 1 << prio;
412         }                                         522         }
                                                   >> 523         HTB_DBG(7,2,"htb_delrow cl=%X mask=%X rmask=%X maskdel=%X\n",
                                                   >> 524                         cl->classid,mask,q->row_mask[cl->level],m);
413         q->row_mask[cl->level] &= ~m;             525         q->row_mask[cl->level] &= ~m;
414 }                                                 526 }
415                                                   527 
416 /**                                               528 /**
417  * htb_activate_prios - creates active classe'    529  * htb_activate_prios - creates active classe's feed chain
418  * @q: the priority event queue                << 
419  * @cl: the class to activate                  << 
420  *                                                530  *
421  * The class is connected to ancestors and/or     531  * The class is connected to ancestors and/or appropriate rows
422  * for priorities it is participating on. cl-> !! 532  * for priorities it is participating on. cl->cmode must be new 
423  * (activated) mode. It does nothing if cl->pr    533  * (activated) mode. It does nothing if cl->prio_activity == 0.
424  */                                               534  */
425 static void htb_activate_prios(struct htb_sche !! 535 static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
426 {                                                 536 {
427         struct htb_class *p = cl->parent;         537         struct htb_class *p = cl->parent;
428         long m, mask = cl->prio_activity;      !! 538         long m,mask = cl->prio_activity;
                                                   >> 539         HTB_DBG(7,2,"htb_act_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
                                                   >> 540         HTB_CHCL(cl);
429                                                   541 
430         while (cl->cmode == HTB_MAY_BORROW &&     542         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
431                 m = mask;                      !! 543                 HTB_CHCL(p);
432                 while (m) {                    !! 544                 m = mask; while (m) {
433                         unsigned int prio = ff !! 545                         int prio = ffz(~m);
434                                                << 
435                         if (WARN_ON_ONCE(prio  << 
436                                 break;         << 
437                         m &= ~(1 << prio);        546                         m &= ~(1 << prio);
438                                                !! 547                         
439                         if (p->inner.clprio[pr !! 548                         if (p->un.inner.feed[prio].rb_node)
440                                 /* parent alre    549                                 /* parent already has its feed in use so that
441                                  * reset bit i !! 550                                    reset bit in mask as parent is already ok */
442                                  */            << 
443                                 mask &= ~(1 <<    551                                 mask &= ~(1 << prio);
444                                                !! 552                         
445                         htb_add_to_id_tree(&p- !! 553                         htb_add_to_id_tree(HTB_PASSQ p->un.inner.feed+prio,cl,prio);
446                 }                                 554                 }
                                                   >> 555                 HTB_DBG(7,3,"htb_act_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
                                                   >> 556                                 p->classid,p->prio_activity,mask,p->cmode);
447                 p->prio_activity |= mask;         557                 p->prio_activity |= mask;
448                 cl = p;                        !! 558                 cl = p; p = cl->parent;
449                 p = cl->parent;                !! 559                 HTB_CHCL(cl);
450                                                << 
451         }                                         560         }
452         if (cl->cmode == HTB_CAN_SEND && mask)    561         if (cl->cmode == HTB_CAN_SEND && mask)
453                 htb_add_class_to_row(q, cl, ma !! 562                 htb_add_class_to_row(q,cl,mask);
454 }                                                 563 }
455                                                   564 
456 /**                                               565 /**
457  * htb_deactivate_prios - remove class from fe    566  * htb_deactivate_prios - remove class from feed chain
458  * @q: the priority event queue                << 
459  * @cl: the class to deactivate                << 
460  *                                                567  *
461  * cl->cmode must represent old mode (before d !! 568  * cl->cmode must represent old mode (before deactivation). It does 
462  * nothing if cl->prio_activity == 0. Class is    569  * nothing if cl->prio_activity == 0. Class is removed from all feed
463  * chains and rows.                               570  * chains and rows.
464  */                                               571  */
465 static void htb_deactivate_prios(struct htb_sc    572 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
466 {                                                 573 {
467         struct htb_class *p = cl->parent;         574         struct htb_class *p = cl->parent;
468         long m, mask = cl->prio_activity;      !! 575         long m,mask = cl->prio_activity;
                                                   >> 576         HTB_DBG(7,2,"htb_deact_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
                                                   >> 577         HTB_CHCL(cl);
469                                                   578 
470         while (cl->cmode == HTB_MAY_BORROW &&     579         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
471                 m = mask;                      !! 580                 m = mask; mask = 0; 
472                 mask = 0;                      << 
473                 while (m) {                       581                 while (m) {
474                         int prio = ffz(~m);       582                         int prio = ffz(~m);
475                         m &= ~(1 << prio);        583                         m &= ~(1 << prio);
476                                                !! 584                         
477                         if (p->inner.clprio[pr !! 585                         if (p->un.inner.ptr[prio] == cl->node+prio) {
478                                 /* we are remo    586                                 /* we are removing child which is pointed to from
479                                  * parent feed !! 587                                    parent feed - forget the pointer but remember
480                                  * classid     !! 588                                    classid */
481                                  */            !! 589                                 p->un.inner.last_ptr_id[prio] = cl->classid;
482                                 p->inner.clpri !! 590                                 p->un.inner.ptr[prio] = NULL;
483                                 p->inner.clpri << 
484                         }                         591                         }
485                                                !! 592                         
486                         htb_safe_rb_erase(cl-> !! 593                         htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio);
487                                           &p-> !! 594                         
488                                                !! 595                         if (!p->un.inner.feed[prio].rb_node) 
489                         if (!p->inner.clprio[p << 
490                                 mask |= 1 << p    596                                 mask |= 1 << prio;
491                 }                                 597                 }
492                                                !! 598                 HTB_DBG(7,3,"htb_deact_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
                                                   >> 599                                 p->classid,p->prio_activity,mask,p->cmode);
493                 p->prio_activity &= ~mask;        600                 p->prio_activity &= ~mask;
494                 cl = p;                        !! 601                 cl = p; p = cl->parent;
495                 p = cl->parent;                !! 602                 HTB_CHCL(cl);
496                                                << 
497         }                                         603         }
498         if (cl->cmode == HTB_CAN_SEND && mask) !! 604         if (cl->cmode == HTB_CAN_SEND && mask) 
499                 htb_remove_class_from_row(q, c !! 605                 htb_remove_class_from_row(q,cl,mask);
500 }                                                 606 }
501                                                   607 
502 static inline s64 htb_lowater(const struct htb << 
503 {                                              << 
504         if (htb_hysteresis)                    << 
505                 return cl->cmode != HTB_CANT_S << 
506         else                                   << 
507                 return 0;                      << 
508 }                                              << 
509 static inline s64 htb_hiwater(const struct htb << 
510 {                                              << 
511         if (htb_hysteresis)                    << 
512                 return cl->cmode == HTB_CAN_SE << 
513         else                                   << 
514                 return 0;                      << 
515 }                                              << 
516                                                << 
517                                                << 
518 /**                                               608 /**
519  * htb_class_mode - computes and returns curre    609  * htb_class_mode - computes and returns current class mode
520  * @cl: the target class                       << 
521  * @diff: diff time in microseconds            << 
522  *                                                610  *
523  * It computes cl's mode at time cl->t_c+diff     611  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
524  * is not HTB_CAN_SEND then cl->pq_key is upda    612  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
525  * from now to time when cl will change its st !! 613  * from now to time when cl will change its state. 
526  * Also it is worth to note that class mode do    614  * Also it is worth to note that class mode doesn't change simply
527  * at cl->{c,}tokens == 0 but there can rather !! 615  * at cl->{c,}tokens == 0 but there can rather be hysteresis of 
528  * 0 .. -cl->{c,}buffer range. It is meant to     616  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
529  * mode transitions per time unit. The speed g    617  * mode transitions per time unit. The speed gain is about 1/6.
530  */                                               618  */
531 static inline enum htb_cmode                   !! 619 static __inline__ enum htb_cmode 
532 htb_class_mode(struct htb_class *cl, s64 *diff !! 620 htb_class_mode(struct htb_class *cl,long *diff)
533 {                                                 621 {
534         s64 toks;                              !! 622     long toks;
535                                                   623 
536         if ((toks = (cl->ctokens + *diff)) < h !! 624     if ((toks = (cl->ctokens + *diff)) < (
537                 *diff = -toks;                 !! 625 #if HTB_HYSTERESIS
538                 return HTB_CANT_SEND;          !! 626             cl->cmode != HTB_CANT_SEND ? -cl->cbuffer :
539         }                                      !! 627 #endif
540                                                !! 628             0)) {
541         if ((toks = (cl->tokens + *diff)) >= h !! 629             *diff = -toks;
542                 return HTB_CAN_SEND;           !! 630             return HTB_CANT_SEND;
                                                   >> 631     }
                                                   >> 632     if ((toks = (cl->tokens + *diff)) >= (
                                                   >> 633 #if HTB_HYSTERESIS
                                                   >> 634             cl->cmode == HTB_CAN_SEND ? -cl->buffer :
                                                   >> 635 #endif
                                                   >> 636             0))
                                                   >> 637             return HTB_CAN_SEND;
543                                                   638 
544         *diff = -toks;                         !! 639     *diff = -toks;
545         return HTB_MAY_BORROW;                 !! 640     return HTB_MAY_BORROW;
546 }                                                 641 }
547                                                   642 
548 /**                                               643 /**
549  * htb_change_class_mode - changes classe's mo    644  * htb_change_class_mode - changes classe's mode
550  * @q: the priority event queue                << 
551  * @cl: the target class                       << 
552  * @diff: diff time in microseconds            << 
553  *                                                645  *
554  * This should be the only way how to change c    646  * This should be the only way how to change classe's mode under normal
555  * circumstances. Routine will update feed lis !! 647  * cirsumstances. Routine will update feed lists linkage, change mode
556  * and add class to the wait event queue if ap    648  * and add class to the wait event queue if appropriate. New mode should
557  * be different from old one and cl->pq_key ha    649  * be different from old one and cl->pq_key has to be valid if changing
558  * to mode other than HTB_CAN_SEND (see htb_ad    650  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
559  */                                               651  */
560 static void                                    !! 652 static void 
561 htb_change_class_mode(struct htb_sched *q, str !! 653 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
562 {                                              !! 654 { 
563         enum htb_cmode new_mode = htb_class_mo !! 655         enum htb_cmode new_mode = htb_class_mode(cl,diff);
                                                   >> 656         
                                                   >> 657         HTB_CHCL(cl);
                                                   >> 658         HTB_DBG(7,1,"htb_chging_clmode %d->%d cl=%X\n",cl->cmode,new_mode,cl->classid);
564                                                   659 
565         if (new_mode == cl->cmode)                660         if (new_mode == cl->cmode)
566                 return;                        !! 661                 return; 
567                                                !! 662         
568         if (new_mode == HTB_CANT_SEND) {       !! 663         if (cl->prio_activity) { /* not neccessary: speed optimization */
569                 cl->overlimits++;              !! 664                 if (cl->cmode != HTB_CANT_SEND) 
570                 q->overlimits++;               !! 665                         htb_deactivate_prios(q,cl);
571         }                                      << 
572                                                << 
573         if (cl->prio_activity) {        /* not << 
574                 if (cl->cmode != HTB_CANT_SEND << 
575                         htb_deactivate_prios(q << 
576                 cl->cmode = new_mode;             666                 cl->cmode = new_mode;
577                 if (new_mode != HTB_CANT_SEND) !! 667                 if (new_mode != HTB_CANT_SEND) 
578                         htb_activate_prios(q,  !! 668                         htb_activate_prios(q,cl);
579         } else                                 !! 669         } else 
580                 cl->cmode = new_mode;             670                 cl->cmode = new_mode;
581 }                                                 671 }
582                                                   672 
583 /**                                               673 /**
584  * htb_activate - inserts leaf cl into appropr !! 674  * htb_activate - inserts leaf cl into appropriate active feeds 
585  * @q: the priority event queue                << 
586  * @cl: the target class                       << 
587  *                                                675  *
588  * Routine learns (new) priority of leaf and a    676  * Routine learns (new) priority of leaf and activates feed chain
589  * for the prio. It can be called on already a    677  * for the prio. It can be called on already active leaf safely.
590  * It also adds leaf into droplist.               678  * It also adds leaf into droplist.
591  */                                               679  */
592 static inline void htb_activate(struct htb_sch !! 680 static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
593 {                                                 681 {
594         WARN_ON(cl->level || !cl->leaf.q || !c !! 682         BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
595                                                !! 683         HTB_CHCL(cl);
596         if (!cl->prio_activity) {                 684         if (!cl->prio_activity) {
597                 cl->prio_activity = 1 << cl->p !! 685                 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
598                 htb_activate_prios(q, cl);     !! 686                 htb_activate_prios(q,cl);
                                                   >> 687                 list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio);
599         }                                         688         }
600 }                                                 689 }
601                                                   690 
602 /**                                               691 /**
603  * htb_deactivate - remove leaf cl from active !! 692  * htb_deactivate - remove leaf cl from active feeds 
604  * @q: the priority event queue                << 
605  * @cl: the target class                       << 
606  *                                                693  *
607  * Make sure that leaf is active. In the other    694  * Make sure that leaf is active. In the other words it can't be called
608  * with non-active leaf. It also removes class    695  * with non-active leaf. It also removes class from the drop list.
609  */                                               696  */
610 static inline void htb_deactivate(struct htb_s !! 697 static __inline__ void 
                                                   >> 698 htb_deactivate(struct htb_sched *q,struct htb_class *cl)
611 {                                                 699 {
612         WARN_ON(!cl->prio_activity);           !! 700         BUG_TRAP(cl->prio_activity);
613                                                !! 701         HTB_CHCL(cl);
614         htb_deactivate_prios(q, cl);           !! 702         htb_deactivate_prios(q,cl);
615         cl->prio_activity = 0;                    703         cl->prio_activity = 0;
                                                   >> 704         list_del_init(&cl->un.leaf.drop_list);
616 }                                                 705 }
617                                                   706 
618 static int htb_enqueue(struct sk_buff *skb, st !! 707 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
619                        struct sk_buff **to_fre << 
620 {                                                 708 {
621         int ret;                               !! 709     struct htb_sched *q = (struct htb_sched *)sch->data;
622         unsigned int len = qdisc_pkt_len(skb); !! 710     struct htb_class *cl = htb_classify(skb,sch);
623         struct htb_sched *q = qdisc_priv(sch); !! 711 
624         struct htb_class *cl = htb_classify(sk !! 712     if (cl == HTB_DIRECT || !cl) {
625                                                !! 713         /* enqueue to helper queue */
626         if (cl == HTB_DIRECT) {                !! 714         if (q->direct_queue.qlen < q->direct_qlen && cl) {
627                 /* enqueue to helper queue */  !! 715             __skb_queue_tail(&q->direct_queue, skb);
628                 if (q->direct_queue.qlen < q-> !! 716             q->direct_pkts++;
629                         __qdisc_enqueue_tail(s !! 717         } else {
630                         q->direct_pkts++;      !! 718             kfree_skb (skb);
631                 } else {                       !! 719             sch->stats.drops++;
632                         return qdisc_drop(skb, !! 720             return NET_XMIT_DROP;
633                 }                              !! 721         }
634 #ifdef CONFIG_NET_CLS_ACT                      !! 722     } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
635         } else if (!cl) {                      !! 723         sch->stats.drops++;
636                 if (ret & __NET_XMIT_BYPASS)   !! 724         cl->stats.drops++;
637                         qdisc_qstats_drop(sch) !! 725         return NET_XMIT_DROP;
638                 __qdisc_drop(skb, to_free);    !! 726     } else {
639                 return ret;                    !! 727         cl->stats.packets++; cl->stats.bytes += skb->len;
640 #endif                                         !! 728         htb_activate (q,cl);
641         } else if ((ret = qdisc_enqueue(skb, c !! 729     }
642                                         to_fre !! 730 
643                 if (net_xmit_drop_count(ret))  !! 731     sch->q.qlen++;
644                         qdisc_qstats_drop(sch) !! 732     sch->stats.packets++; sch->stats.bytes += skb->len;
645                         cl->drops++;           !! 733     HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
646                 }                              !! 734     return NET_XMIT_SUCCESS;
647                 return ret;                    !! 735 }
                                                   >> 736 
                                                   >> 737 /* TODO: requeuing packet charges it to policers again !! */
                                                   >> 738 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
                                                   >> 739 {
                                                   >> 740     struct htb_sched *q = (struct htb_sched *)sch->data;
                                                   >> 741     struct htb_class *cl = htb_classify(skb,sch);
                                                   >> 742     struct sk_buff *tskb;
                                                   >> 743 
                                                   >> 744     if (cl == HTB_DIRECT || !cl) {
                                                   >> 745         /* enqueue to helper queue */
                                                   >> 746         if (q->direct_queue.qlen < q->direct_qlen && cl) {
                                                   >> 747             __skb_queue_head(&q->direct_queue, skb);
648         } else {                                  748         } else {
649                 htb_activate(q, cl);           !! 749             __skb_queue_head(&q->direct_queue, skb);
                                                   >> 750             tskb = __skb_dequeue_tail(&q->direct_queue);
                                                   >> 751             kfree_skb (tskb);
                                                   >> 752             sch->stats.drops++;
                                                   >> 753             return NET_XMIT_CN; 
                                                   >> 754         }
                                                   >> 755     } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
                                                   >> 756         sch->stats.drops++;
                                                   >> 757         cl->stats.drops++;
                                                   >> 758         return NET_XMIT_DROP;
                                                   >> 759     } else 
                                                   >> 760             htb_activate (q,cl);
                                                   >> 761 
                                                   >> 762     sch->q.qlen++;
                                                   >> 763     HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
                                                   >> 764     return NET_XMIT_SUCCESS;
                                                   >> 765 }
                                                   >> 766 
                                                   >> 767 static void htb_timer(unsigned long arg)
                                                   >> 768 {
                                                   >> 769     struct Qdisc *sch = (struct Qdisc*)arg;
                                                   >> 770     sch->flags &= ~TCQ_F_THROTTLED;
                                                   >> 771     wmb();
                                                   >> 772     netif_schedule(sch->dev);
                                                   >> 773 }
                                                   >> 774 
                                                   >> 775 #ifdef HTB_RATECM
                                                   >> 776 #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
                                                   >> 777 static void htb_rate_timer(unsigned long arg)
                                                   >> 778 {
                                                   >> 779         struct Qdisc *sch = (struct Qdisc*)arg;
                                                   >> 780         struct htb_sched *q = (struct htb_sched *)sch->data;
                                                   >> 781         struct list_head *p;
                                                   >> 782 
                                                   >> 783         /* lock queue so that we can muck with it */
                                                   >> 784         HTB_QLOCK(sch);
                                                   >> 785         HTB_DBG(10,1,"htb_rttmr j=%ld\n",jiffies);
                                                   >> 786 
                                                   >> 787         q->rttim.expires = jiffies + HZ;
                                                   >> 788         add_timer(&q->rttim);
                                                   >> 789 
                                                   >> 790         /* scan and recompute one bucket at time */
                                                   >> 791         if (++q->recmp_bucket >= HTB_HSIZE) 
                                                   >> 792                 q->recmp_bucket = 0;
                                                   >> 793         list_for_each (p,q->hash+q->recmp_bucket) {
                                                   >> 794                 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
                                                   >> 795                 HTB_DBG(10,2,"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu\n",
                                                   >> 796                                 cl->classid,cl->sum_bytes,cl->sum_packets);
                                                   >> 797                 RT_GEN (cl->sum_bytes,cl->rate_bytes);
                                                   >> 798                 RT_GEN (cl->sum_packets,cl->rate_packets);
650         }                                         799         }
651                                                !! 800         HTB_QUNLOCK(sch);
652         sch->qstats.backlog += len;            << 
653         sch->q.qlen++;                         << 
654         return NET_XMIT_SUCCESS;               << 
655 }                                              << 
656                                                << 
657 static inline void htb_accnt_tokens(struct htb << 
658 {                                              << 
659         s64 toks = diff + cl->tokens;          << 
660                                                << 
661         if (toks > cl->buffer)                 << 
662                 toks = cl->buffer;             << 
663         toks -= (s64) psched_l2t_ns(&cl->rate, << 
664         if (toks <= -cl->mbuffer)              << 
665                 toks = 1 - cl->mbuffer;        << 
666                                                << 
667         cl->tokens = toks;                     << 
668 }                                              << 
669                                                << 
670 static inline void htb_accnt_ctokens(struct ht << 
671 {                                              << 
672         s64 toks = diff + cl->ctokens;         << 
673                                                << 
674         if (toks > cl->cbuffer)                << 
675                 toks = cl->cbuffer;            << 
676         toks -= (s64) psched_l2t_ns(&cl->ceil, << 
677         if (toks <= -cl->mbuffer)              << 
678                 toks = 1 - cl->mbuffer;        << 
679                                                << 
680         cl->ctokens = toks;                    << 
681 }                                                 801 }
                                                   >> 802 #endif
682                                                   803 
683 /**                                               804 /**
684  * htb_charge_class - charges amount "bytes" t !! 805  * htb_charge_class - charges ammount "bytes" to leaf and ancestors
685  * @q: the priority event queue                << 
686  * @cl: the class to start iterate             << 
687  * @level: the minimum level to account        << 
688  * @skb: the socket buffer                     << 
689  *                                                806  *
690  * Routine assumes that packet "bytes" long wa    807  * Routine assumes that packet "bytes" long was dequeued from leaf cl
691  * borrowing from "level". It accounts bytes t    808  * borrowing from "level". It accounts bytes to ceil leaky bucket for
692  * leaf and all ancestors and to rate bucket f    809  * leaf and all ancestors and to rate bucket for ancestors at levels
693  * "level" and higher. It also handles possibl    810  * "level" and higher. It also handles possible change of mode resulting
694  * from the update. Note that mode can also in    811  * from the update. Note that mode can also increase here (MAY_BORROW to
695  * CAN_SEND) because we can use more precise c    812  * CAN_SEND) because we can use more precise clock that event queue here.
696  * In such case we remove class from event que    813  * In such case we remove class from event queue first.
697  */                                               814  */
698 static void htb_charge_class(struct htb_sched  !! 815 static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
699                              int level, struct !! 816                 int level,int bytes)
700 {                                              !! 817 {       
701         int bytes = qdisc_pkt_len(skb);        !! 818         long toks,diff;
702         enum htb_cmode old_mode;                  819         enum htb_cmode old_mode;
703         s64 diff;                              !! 820         HTB_DBG(5,1,"htb_chrg_cl cl=%X lev=%d len=%d\n",cl->classid,level,bytes);
                                                   >> 821 
                                                   >> 822 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
                                                   >> 823         if (toks > cl->B) toks = cl->B; \
                                                   >> 824         toks -= L2T(cl, cl->R, bytes); \
                                                   >> 825         if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
                                                   >> 826         cl->T = toks
704                                                   827 
705         while (cl) {                              828         while (cl) {
706                 diff = min_t(s64, q->now - cl- !! 829                 HTB_CHCL(cl);
                                                   >> 830                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
                                                   >> 831 #ifdef HTB_DEBUG
                                                   >> 832                 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
                                                   >> 833                         if (net_ratelimit())
                                                   >> 834                                 printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
                                                   >> 835                                        cl->classid, diff,
                                                   >> 836                                        (unsigned long long) q->now,
                                                   >> 837                                        (unsigned long long) cl->t_c,
                                                   >> 838                                        q->jiffies);
                                                   >> 839                         diff = 1000;
                                                   >> 840                 }
                                                   >> 841 #endif
707                 if (cl->level >= level) {         842                 if (cl->level >= level) {
708                         if (cl->level == level !! 843                         if (cl->level == level) cl->xstats.lends++;
709                                 cl->xstats.len !! 844                         HTB_ACCNT (tokens,buffer,rate);
710                         htb_accnt_tokens(cl, b << 
711                 } else {                          845                 } else {
712                         cl->xstats.borrows++;     846                         cl->xstats.borrows++;
713                         cl->tokens += diff;    !! 847                         cl->tokens += diff; /* we moved t_c; update tokens */
714                 }                                 848                 }
715                 htb_accnt_ctokens(cl, bytes, d !! 849                 HTB_ACCNT (ctokens,cbuffer,ceil);
716                 cl->t_c = q->now;                 850                 cl->t_c = q->now;
                                                   >> 851                 HTB_DBG(5,2,"htb_chrg_clp cl=%X diff=%ld tok=%ld ctok=%ld\n",cl->classid,diff,cl->tokens,cl->ctokens);
717                                                   852 
718                 old_mode = cl->cmode;          !! 853                 old_mode = cl->cmode; diff = 0;
719                 diff = 0;                      !! 854                 htb_change_class_mode(q,cl,&diff);
720                 htb_change_class_mode(q, cl, & << 
721                 if (old_mode != cl->cmode) {      855                 if (old_mode != cl->cmode) {
722                         if (old_mode != HTB_CA    856                         if (old_mode != HTB_CAN_SEND)
723                                 htb_safe_rb_er !! 857                                 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
724                         if (cl->cmode != HTB_C    858                         if (cl->cmode != HTB_CAN_SEND)
725                                 htb_add_to_wai !! 859                                 htb_add_to_wait_tree (q,cl,diff,1);
726                 }                                 860                 }
                                                   >> 861                 
                                                   >> 862 #ifdef HTB_RATECM
                                                   >> 863                 /* update rate counters */
                                                   >> 864                 cl->sum_bytes += bytes; cl->sum_packets++;
                                                   >> 865 #endif
727                                                   866 
728                 /* update basic stats except f !! 867                 /* update byte stats except for leaves which are already updated */
729                 if (cl->level)                 !! 868                 if (cl->level) {
730                         bstats_update(&cl->bst !! 869                         cl->stats.bytes += bytes;
731                                                !! 870                         cl->stats.packets++;
                                                   >> 871                 }
732                 cl = cl->parent;                  872                 cl = cl->parent;
733         }                                         873         }
734 }                                                 874 }
735                                                   875 
736 /**                                               876 /**
737  * htb_do_events - make mode changes to classe    877  * htb_do_events - make mode changes to classes at the level
738  * @q: the priority event queue                << 
739  * @level: which wait_pq in 'q->hlevel'        << 
740  * @start: start jiffies                       << 
741  *                                                878  *
742  * Scans event queue for pending events and ap !! 879  * Scans event queue for pending events and applies them. Returns jiffies to
743  * next pending event (0 for no event in pq, q !! 880  * next pending event (0 for no event in pq).
744  * Note: Applied are events whose have cl->pq_ !! 881  * Note: Aplied are events whose have cl->pq_key <= jiffies.
745  */                                               882  */
746 static s64 htb_do_events(struct htb_sched *q,  !! 883 static long htb_do_events(struct htb_sched *q,int level)
747                          unsigned long start)  << 
748 {                                                 884 {
749         /* don't run for longer than 2 jiffies !! 885         int i;
750          * 1 to simplify things when jiffy is  !! 886         HTB_DBG(8,1,"htb_do_events l=%d root=%p rmask=%X\n",
751          * too soon                            !! 887                         level,q->wait_pq[level].rb_node,q->row_mask[level]);
752          */                                    !! 888         for (i = 0; i < 500; i++) {
753         unsigned long stop_at = start + 2;     << 
754         struct rb_root *wait_pq = &q->hlevel[l << 
755                                                << 
756         while (time_before(jiffies, stop_at))  << 
757                 struct htb_class *cl;             889                 struct htb_class *cl;
758                 s64 diff;                      !! 890                 long diff;
759                 struct rb_node *p = rb_first(w !! 891                 rb_node_t *p = q->wait_pq[level].rb_node;
760                                                !! 892                 if (!p) return 0;
761                 if (!p)                        !! 893                 while (p->rb_left) p = p->rb_left;
762                         return 0;              << 
763                                                   894 
764                 cl = rb_entry(p, struct htb_cl    895                 cl = rb_entry(p, struct htb_class, pq_node);
765                 if (cl->pq_key > q->now)       !! 896                 if (time_after(cl->pq_key, q->jiffies)) {
766                         return cl->pq_key;     !! 897                         HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies);
767                                                !! 898                         return cl->pq_key - q->jiffies;
768                 htb_safe_rb_erase(p, wait_pq); !! 899                 }
769                 diff = min_t(s64, q->now - cl- !! 900                 htb_safe_rb_erase(p,q->wait_pq+level);
770                 htb_change_class_mode(q, cl, & !! 901                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
                                                   >> 902 #ifdef HTB_DEBUG
                                                   >> 903                 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
                                                   >> 904                         if (net_ratelimit())
                                                   >> 905                                 printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
                                                   >> 906                                        cl->classid, diff,
                                                   >> 907                                        (unsigned long long) q->now,
                                                   >> 908                                        (unsigned long long) cl->t_c,
                                                   >> 909                                        q->jiffies);
                                                   >> 910                         diff = 1000;
                                                   >> 911                 }
                                                   >> 912 #endif
                                                   >> 913                 htb_change_class_mode(q,cl,&diff);
771                 if (cl->cmode != HTB_CAN_SEND)    914                 if (cl->cmode != HTB_CAN_SEND)
772                         htb_add_to_wait_tree(q !! 915                         htb_add_to_wait_tree (q,cl,diff,2);
773         }                                         916         }
774                                                !! 917         if (net_ratelimit())
775         /* too much load - let's continue afte !! 918                 printk(KERN_WARNING "htb: too many events !\n");
776         if (!(q->warned & HTB_WARN_TOOMANYEVEN !! 919         return HZ/10;
777                 pr_warn("htb: too many events! << 
778                 q->warned |= HTB_WARN_TOOMANYE << 
779         }                                      << 
780                                                << 
781         return q->now;                         << 
782 }                                                 920 }
783                                                   921 
784 /* Returns class->node+prio from id-tree where    922 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
785  * is no such one exists.                      !! 923    is no such one exists. */
786  */                                            !! 924 static rb_node_t *
787 static struct rb_node *htb_id_find_next_upper( !! 925 htb_id_find_next_upper(int prio,rb_node_t *n,u32 id)
788                                                << 
789 {                                                 926 {
790         struct rb_node *r = NULL;              !! 927         rb_node_t *r = NULL;
791         while (n) {                               928         while (n) {
792                 struct htb_class *cl =         !! 929                 struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]);
793                     rb_entry(n, struct htb_cla !! 930                 if (id == cl->classid) return n;
794                                                !! 931                 
795                 if (id > cl->common.classid) { !! 932                 if (id > cl->classid) {
796                         n = n->rb_right;          933                         n = n->rb_right;
797                 } else if (id < cl->common.cla !! 934                 } else {
798                         r = n;                    935                         r = n;
799                         n = n->rb_left;           936                         n = n->rb_left;
800                 } else {                       << 
801                         return n;              << 
802                 }                                 937                 }
803         }                                         938         }
804         return r;                                 939         return r;
805 }                                                 940 }
806                                                   941 
807 /**                                               942 /**
808  * htb_lookup_leaf - returns next leaf class i    943  * htb_lookup_leaf - returns next leaf class in DRR order
809  * @hprio: the current one                     << 
810  * @prio: which prio in class                  << 
811  *                                                944  *
812  * Find leaf where current feed pointers point    945  * Find leaf where current feed pointers points to.
813  */                                               946  */
814 static struct htb_class *htb_lookup_leaf(struc !! 947 static struct htb_class *
                                                   >> 948 htb_lookup_leaf(HTB_ARGQ rb_root_t *tree,int prio,rb_node_t **pptr,u32 *pid)
815 {                                                 949 {
816         int i;                                    950         int i;
817         struct {                                  951         struct {
818                 struct rb_node *root;          !! 952                 rb_node_t *root;
819                 struct rb_node **pptr;         !! 953                 rb_node_t **pptr;
820                 u32 *pid;                         954                 u32 *pid;
821         } stk[TC_HTB_MAXDEPTH], *sp = stk;     !! 955         } stk[TC_HTB_MAXDEPTH],*sp = stk;
822                                                !! 956         
823         BUG_ON(!hprio->row.rb_node);           !! 957         BUG_TRAP(tree->rb_node);
824         sp->root = hprio->row.rb_node;         !! 958         sp->root = tree->rb_node;
825         sp->pptr = &hprio->ptr;                !! 959         sp->pptr = pptr;
826         sp->pid = &hprio->last_ptr_id;         !! 960         sp->pid = pid;
827                                                   961 
828         for (i = 0; i < 65535; i++) {             962         for (i = 0; i < 65535; i++) {
829                 if (!*sp->pptr && *sp->pid) {  !! 963                 HTB_DBG(4,2,"htb_lleaf ptr=%p pid=%X\n",*sp->pptr,*sp->pid);
830                         /* ptr was invalidated !! 964 
831                          * the original or nex !! 965                 if (!*sp->pptr && *sp->pid) { 
832                          */                    !! 966                         /* ptr was invalidated but id is valid - try to recover 
833                         *sp->pptr =            !! 967                            the original or next ptr */
834                             htb_id_find_next_u !! 968                         *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid);
835                 }                              !! 969                 }
836                 *sp->pid = 0;   /* ptr is vali !! 970                 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
837                                  * can become  !! 971                                  can become out of date quickly */
838                                  */            !! 972                 if (!*sp->pptr) { /* we are at right end; rewind & go up */
839                 if (!*sp->pptr) {       /* we  << 
840                         *sp->pptr = sp->root;     973                         *sp->pptr = sp->root;
841                         while ((*sp->pptr)->rb !! 974                         while ((*sp->pptr)->rb_left) 
842                                 *sp->pptr = (*    975                                 *sp->pptr = (*sp->pptr)->rb_left;
843                         if (sp > stk) {           976                         if (sp > stk) {
844                                 sp--;             977                                 sp--;
845                                 if (!*sp->pptr !! 978                                 BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL;
846                                         WARN_O !! 979                                 htb_next_rb_node (sp->pptr);
847                                         return << 
848                                 }              << 
849                                 htb_next_rb_no << 
850                         }                         980                         }
851                 } else {                          981                 } else {
852                         struct htb_class *cl;     982                         struct htb_class *cl;
853                         struct htb_prio *clp;  !! 983                         cl = rb_entry(*sp->pptr,struct htb_class,node[prio]);
854                                                !! 984                         HTB_CHCL(cl);
855                         cl = rb_entry(*sp->ppt !! 985                         if (!cl->level) 
856                         if (!cl->level)        << 
857                                 return cl;        986                                 return cl;
858                         clp = &cl->inner.clpri !! 987                         (++sp)->root = cl->un.inner.feed[prio].rb_node;
859                         (++sp)->root = clp->fe !! 988                         sp->pptr = cl->un.inner.ptr+prio;
860                         sp->pptr = &clp->ptr;  !! 989                         sp->pid = cl->un.inner.last_ptr_id+prio;
861                         sp->pid = &clp->last_p << 
862                 }                                 990                 }
863         }                                         991         }
864         WARN_ON(1);                            !! 992         BUG_TRAP(0);
865         return NULL;                              993         return NULL;
866 }                                                 994 }
867                                                   995 
868 /* dequeues packet at given priority and level    996 /* dequeues packet at given priority and level; call only if
869  * you are sure that there is active class at  !! 997    you are sure that there is active class at prio/level */
870  */                                            !! 998 static struct sk_buff *
871 static struct sk_buff *htb_dequeue_tree(struct !! 999 htb_dequeue_tree(struct htb_sched *q,int prio,int level)
872                                         const  << 
873 {                                                 1000 {
874         struct sk_buff *skb = NULL;               1001         struct sk_buff *skb = NULL;
875         struct htb_class *cl, *start;          !! 1002         struct htb_class *cl,*start;
876         struct htb_level *hlevel = &q->hlevel[ << 
877         struct htb_prio *hprio = &hlevel->hpri << 
878                                                << 
879         /* look initial class up in the row */    1003         /* look initial class up in the row */
880         start = cl = htb_lookup_leaf(hprio, pr !! 1004         start = cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,
881                                                !! 1005                         q->ptr[level]+prio,q->last_ptr_id[level]+prio);
                                                   >> 1006         
882         do {                                      1007         do {
883 next:                                             1008 next:
884                 if (unlikely(!cl))             !! 1009                 BUG_TRAP(cl); 
885                         return NULL;           !! 1010                 if (!cl) return NULL;
                                                   >> 1011                 HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n",
                                                   >> 1012                                 prio,level,cl->classid,cl->un.leaf.deficit[level]);
886                                                   1013 
887                 /* class can be empty - it is     1014                 /* class can be empty - it is unlikely but can be true if leaf
888                  * qdisc drops packets in enqu !! 1015                    qdisc drops packets in enqueue routine or if someone used
889                  * graft operation on the leaf !! 1016                    graft operation on the leaf since last dequeue; 
890                  * simply deactivate and skip  !! 1017                    simply deactivate and skip such class */
891                  */                            !! 1018                 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
892                 if (unlikely(cl->leaf.q->q.qle << 
893                         struct htb_class *next    1019                         struct htb_class *next;
894                         htb_deactivate(q, cl); !! 1020                         htb_deactivate(q,cl);
895                                                   1021 
896                         /* row/level might bec    1022                         /* row/level might become empty */
897                         if ((q->row_mask[level    1023                         if ((q->row_mask[level] & (1 << prio)) == 0)
898                                 return NULL;   !! 1024                                 return NULL; 
899                                                !! 1025                         
900                         next = htb_lookup_leaf !! 1026                         next = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,
901                                                !! 1027                                         prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
902                         if (cl == start)       !! 1028                         if (cl == start) /* fix start if we just deleted it */
903                                 start = next;     1029                                 start = next;
904                         cl = next;                1030                         cl = next;
905                         goto next;                1031                         goto next;
906                 }                                 1032                 }
907                                                !! 1033         
908                 skb = cl->leaf.q->dequeue(cl-> !! 1034                 if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL)) 
909                 if (likely(skb != NULL))       << 
910                         break;                    1035                         break;
911                                                !! 1036                 if (!cl->warned) {
912                 qdisc_warn_nonwc("htb", cl->le !! 1037                         printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid);
913                 htb_next_rb_node(level ? &cl-> !! 1038                         cl->warned = 1;
914                                          &q->h !! 1039                 }
915                 cl = htb_lookup_leaf(hprio, pr !! 1040                 q->nwc_hit++;
916                                                !! 1041                 htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
                                                   >> 1042                 cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,q->ptr[level]+prio,
                                                   >> 1043                                 q->last_ptr_id[level]+prio);
917         } while (cl != start);                    1044         } while (cl != start);
918                                                   1045 
919         if (likely(skb != NULL)) {                1046         if (likely(skb != NULL)) {
920                 bstats_update(&cl->bstats, skb !! 1047                 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
921                 cl->leaf.deficit[level] -= qdi !! 1048                         HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
922                 if (cl->leaf.deficit[level] <  !! 1049                                 level?cl->parent->un.inner.ptr[prio]:q->ptr[0][prio],cl->un.leaf.quantum);
923                         cl->leaf.deficit[level !! 1050                         cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
924                         htb_next_rb_node(level !! 1051                         htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
925                                                << 
926                 }                                 1052                 }
927                 /* this used to be after charg    1053                 /* this used to be after charge_class but this constelation
928                  * gives us slightly better pe !! 1054                    gives us slightly better performance */
929                  */                            !! 1055                 if (!cl->un.leaf.q->q.qlen)
930                 if (!cl->leaf.q->q.qlen)       !! 1056                         htb_deactivate (q,cl);
931                         htb_deactivate(q, cl); !! 1057                 htb_charge_class (q,cl,level,skb->len);
932                 htb_charge_class(q, cl, level, << 
933         }                                         1058         }
934         return skb;                               1059         return skb;
935 }                                                 1060 }
936                                                   1061 
                                                   >> 1062 static void htb_delay_by(struct Qdisc *sch,long delay)
                                                   >> 1063 {
                                                   >> 1064         struct htb_sched *q = (struct htb_sched *)sch->data;
                                                   >> 1065         if (delay <= 0) delay = 1;
                                                   >> 1066         if (unlikely(delay > 5*HZ)) {
                                                   >> 1067                 if (net_ratelimit())
                                                   >> 1068                         printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
                                                   >> 1069                 delay = 5*HZ;
                                                   >> 1070         }
                                                   >> 1071         /* why don't use jiffies here ? because expires can be in past */
                                                   >> 1072         mod_timer(&q->timer, q->jiffies + delay);
                                                   >> 1073         sch->flags |= TCQ_F_THROTTLED;
                                                   >> 1074         sch->stats.overlimits++;
                                                   >> 1075         HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
                                                   >> 1076 }
                                                   >> 1077 
937 static struct sk_buff *htb_dequeue(struct Qdis    1078 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
938 {                                                 1079 {
939         struct sk_buff *skb;                   !! 1080         struct sk_buff *skb = NULL;
940         struct htb_sched *q = qdisc_priv(sch); !! 1081         struct htb_sched *q = (struct htb_sched *)sch->data;
941         int level;                                1082         int level;
942         s64 next_event;                        !! 1083         long min_delay;
943         unsigned long start_at;                !! 1084 #ifdef HTB_DEBUG
                                                   >> 1085         int evs_used = 0;
                                                   >> 1086 #endif
                                                   >> 1087 
                                                   >> 1088         q->jiffies = jiffies;
                                                   >> 1089         HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
                                                   >> 1090                         sch->q.qlen);
944                                                   1091 
945         /* try to dequeue direct packets as hi    1092         /* try to dequeue direct packets as high prio (!) to minimize cpu work */
946         skb = __qdisc_dequeue_head(&q->direct_ !! 1093         if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
947         if (skb != NULL) {                     !! 1094                 sch->flags &= ~TCQ_F_THROTTLED;
948 ok:                                            << 
949                 qdisc_bstats_update(sch, skb); << 
950                 qdisc_qstats_backlog_dec(sch,  << 
951                 sch->q.qlen--;                    1095                 sch->q.qlen--;
952                 return skb;                       1096                 return skb;
953         }                                         1097         }
954                                                   1098 
955         if (!sch->q.qlen)                      !! 1099         if (!sch->q.qlen) goto fin;
956                 goto fin;                      !! 1100         PSCHED_GET_TIME(q->now);
957         q->now = ktime_get_ns();               << 
958         start_at = jiffies;                    << 
959                                                << 
960         next_event = q->now + 5LLU * NSEC_PER_ << 
961                                                   1101 
                                                   >> 1102         min_delay = LONG_MAX;
                                                   >> 1103         q->nwc_hit = 0;
962         for (level = 0; level < TC_HTB_MAXDEPT    1104         for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
963                 /* common case optimization -     1105                 /* common case optimization - skip event handler quickly */
964                 int m;                            1106                 int m;
965                 s64 event = q->near_ev_cache[l !! 1107                 long delay;
966                                                !! 1108                 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
967                 if (q->now >= event) {         !! 1109                         delay = htb_do_events(q,level);
968                         event = htb_do_events( !! 1110                         q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
969                         if (!event)            !! 1111 #ifdef HTB_DEBUG
970                                 event = q->now !! 1112                         evs_used++;
971                         q->near_ev_cache[level !! 1113 #endif
972                 }                              !! 1114                 } else
973                                                !! 1115                         delay = q->near_ev_cache[level] - q->jiffies;   
974                 if (next_event > event)        !! 1116                 
975                         next_event = event;    !! 1117                 if (delay && min_delay > delay) 
976                                                !! 1118                         min_delay = delay;
977                 m = ~q->row_mask[level];          1119                 m = ~q->row_mask[level];
978                 while (m != (int)(-1)) {          1120                 while (m != (int)(-1)) {
979                         int prio = ffz(m);     !! 1121                         int prio = ffz (m);
980                                                << 
981                         m |= 1 << prio;           1122                         m |= 1 << prio;
982                         skb = htb_dequeue_tree !! 1123                         skb = htb_dequeue_tree(q,prio,level);
983                         if (likely(skb != NULL !! 1124                         if (likely(skb != NULL)) {
984                                 goto ok;       !! 1125                                 sch->q.qlen--;
                                                   >> 1126                                 sch->flags &= ~TCQ_F_THROTTLED;
                                                   >> 1127                                 goto fin;
                                                   >> 1128                         }
985                 }                                 1129                 }
986         }                                         1130         }
987         if (likely(next_event > q->now))       !! 1131 #ifdef HTB_DEBUG
988                 qdisc_watchdog_schedule_ns(&q- !! 1132         if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
989         else                                   !! 1133                 if (min_delay == LONG_MAX) {
990                 schedule_work(&q->work);       !! 1134                         printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n",
                                                   >> 1135                                         evs_used,q->jiffies,jiffies);
                                                   >> 1136                         htb_debug_dump(q);
                                                   >> 1137                 } else 
                                                   >> 1138                         printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
                                                   >> 1139                                         "too small rate\n",min_delay);
                                                   >> 1140         }
                                                   >> 1141 #endif
                                                   >> 1142         htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
991 fin:                                              1143 fin:
                                                   >> 1144         HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb);
992         return skb;                               1145         return skb;
993 }                                                 1146 }
994                                                   1147 
                                                   >> 1148 /* try to drop from each class (by prio) until one succeed */
                                                   >> 1149 static unsigned int htb_drop(struct Qdisc* sch)
                                                   >> 1150 {
                                                   >> 1151         struct htb_sched *q = (struct htb_sched *)sch->data;
                                                   >> 1152         int prio;
                                                   >> 1153 
                                                   >> 1154         for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
                                                   >> 1155                 struct list_head *p;
                                                   >> 1156                 list_for_each (p,q->drops+prio) {
                                                   >> 1157                         struct htb_class *cl = list_entry(p, struct htb_class,
                                                   >> 1158                                                           un.leaf.drop_list);
                                                   >> 1159                         unsigned int len;
                                                   >> 1160                         if (cl->un.leaf.q->ops->drop && 
                                                   >> 1161                                 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
                                                   >> 1162                                 sch->q.qlen--;
                                                   >> 1163                                 if (!cl->un.leaf.q->q.qlen)
                                                   >> 1164                                         htb_deactivate (q,cl);
                                                   >> 1165                                 return len;
                                                   >> 1166                         }
                                                   >> 1167                 }
                                                   >> 1168         }
                                                   >> 1169         return 0;
                                                   >> 1170 }
                                                   >> 1171 
995 /* reset all classes */                           1172 /* reset all classes */
996 /* always caled under BH & queue lock */          1173 /* always caled under BH & queue lock */
997 static void htb_reset(struct Qdisc *sch)       !! 1174 static void htb_reset(struct Qdisc* sch)
998 {                                                 1175 {
999         struct htb_sched *q = qdisc_priv(sch); !! 1176         struct htb_sched *q = (struct htb_sched *)sch->data;
1000         struct htb_class *cl;                 !! 1177         int i;
1001         unsigned int i;                       !! 1178         HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
1002                                                  1179 
1003         for (i = 0; i < q->clhash.hashsize; i !! 1180         for (i = 0; i < HTB_HSIZE; i++) {
1004                 hlist_for_each_entry(cl, &q-> !! 1181                 struct list_head *p;
                                                   >> 1182                 list_for_each (p,q->hash+i) {
                                                   >> 1183                         struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1005                         if (cl->level)           1184                         if (cl->level)
1006                                 memset(&cl->i !! 1185                                 memset(&cl->un.inner,0,sizeof(cl->un.inner));
1007                         else {                   1186                         else {
1008                                 if (cl->leaf. !! 1187                                 if (cl->un.leaf.q) 
1009                                         qdisc !! 1188                                         qdisc_reset(cl->un.leaf.q);
                                                   >> 1189                                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1010                         }                        1190                         }
1011                         cl->prio_activity = 0    1191                         cl->prio_activity = 0;
1012                         cl->cmode = HTB_CAN_S    1192                         cl->cmode = HTB_CAN_SEND;
                                                   >> 1193 #ifdef HTB_DEBUG
                                                   >> 1194                         cl->pq_node.rb_color = -1;
                                                   >> 1195                         memset(cl->node,255,sizeof(cl->node));
                                                   >> 1196 #endif
                                                   >> 1197 
1013                 }                                1198                 }
1014         }                                        1199         }
1015         qdisc_watchdog_cancel(&q->watchdog);  !! 1200         sch->flags &= ~TCQ_F_THROTTLED;
1016         __qdisc_reset_queue(&q->direct_queue) !! 1201         del_timer(&q->timer);
1017         memset(q->hlevel, 0, sizeof(q->hlevel !! 1202         __skb_queue_purge(&q->direct_queue);
1018         memset(q->row_mask, 0, sizeof(q->row_ !! 1203         sch->q.qlen = 0;
                                                   >> 1204         memset(q->row,0,sizeof(q->row));
                                                   >> 1205         memset(q->row_mask,0,sizeof(q->row_mask));
                                                   >> 1206         memset(q->wait_pq,0,sizeof(q->wait_pq));
                                                   >> 1207         memset(q->ptr,0,sizeof(q->ptr));
                                                   >> 1208         for (i = 0; i < TC_HTB_NUMPRIO; i++)
                                                   >> 1209                 INIT_LIST_HEAD(q->drops+i);
1019 }                                                1210 }
1020                                                  1211 
1021 static const struct nla_policy htb_policy[TCA !! 1212 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1022         [TCA_HTB_PARMS] = { .len = sizeof(str << 
1023         [TCA_HTB_INIT]  = { .len = sizeof(str << 
1024         [TCA_HTB_CTAB]  = { .type = NLA_BINAR << 
1025         [TCA_HTB_RTAB]  = { .type = NLA_BINAR << 
1026         [TCA_HTB_DIRECT_QLEN] = { .type = NLA << 
1027         [TCA_HTB_RATE64] = { .type = NLA_U64  << 
1028         [TCA_HTB_CEIL64] = { .type = NLA_U64  << 
1029         [TCA_HTB_OFFLOAD] = { .type = NLA_FLA << 
1030 };                                            << 
1031                                               << 
1032 static void htb_work_func(struct work_struct  << 
1033 {                                             << 
1034         struct htb_sched *q = container_of(wo << 
1035         struct Qdisc *sch = q->watchdog.qdisc << 
1036                                               << 
1037         rcu_read_lock();                      << 
1038         __netif_schedule(qdisc_root(sch));    << 
1039         rcu_read_unlock();                    << 
1040 }                                             << 
1041                                               << 
1042 static int htb_offload(struct net_device *dev << 
1043 {                                                1213 {
1044         return dev->netdev_ops->ndo_setup_tc( !! 1214         struct htb_sched *q = (struct htb_sched*)sch->data;
1045 }                                             !! 1215         struct rtattr *tb[TCA_HTB_INIT];
1046                                               << 
1047 static int htb_init(struct Qdisc *sch, struct << 
1048                     struct netlink_ext_ack *e << 
1049 {                                             << 
1050         struct net_device *dev = qdisc_dev(sc << 
1051         struct tc_htb_qopt_offload offload_op << 
1052         struct htb_sched *q = qdisc_priv(sch) << 
1053         struct nlattr *tb[TCA_HTB_MAX + 1];   << 
1054         struct tc_htb_glob *gopt;                1216         struct tc_htb_glob *gopt;
1055         unsigned int ntx;                     !! 1217         int i;
1056         bool offload;                         !! 1218 #ifdef HTB_DEBUG
1057         int err;                              !! 1219         printk(KERN_INFO "HTB init, kernel part version %d.%d\n",
1058                                               !! 1220                           HTB_VER >> 16,HTB_VER & 0xffff);
1059         qdisc_watchdog_init(&q->watchdog, sch !! 1221 #endif
1060         INIT_WORK(&q->work, htb_work_func);   !! 1222         if (!opt || rtattr_parse(tb, TCA_HTB_INIT, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
1061                                               !! 1223                         tb[TCA_HTB_INIT-1] == NULL ||
1062         if (!opt)                             !! 1224                         RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) {
1063                 return -EINVAL;               !! 1225                 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1064                                               << 
1065         err = tcf_block_get(&q->block, &q->fi << 
1066         if (err)                              << 
1067                 return err;                   << 
1068                                               << 
1069         err = nla_parse_nested_deprecated(tb, << 
1070                                           NUL << 
1071         if (err < 0)                          << 
1072                 return err;                   << 
1073                                               << 
1074         if (!tb[TCA_HTB_INIT])                << 
1075                 return -EINVAL;                  1226                 return -EINVAL;
1076                                               !! 1227         }
1077         gopt = nla_data(tb[TCA_HTB_INIT]);    !! 1228         gopt = RTA_DATA(tb[TCA_HTB_INIT-1]);
1078         if (gopt->version != HTB_VER >> 16)   !! 1229         if (gopt->version != HTB_VER >> 16) {
                                                   >> 1230                 printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n",
                                                   >> 1231                                 HTB_VER >> 16,HTB_VER & 0xffff,gopt->version);
1079                 return -EINVAL;                  1232                 return -EINVAL;
1080                                               << 
1081         offload = nla_get_flag(tb[TCA_HTB_OFF << 
1082                                               << 
1083         if (offload) {                        << 
1084                 if (sch->parent != TC_H_ROOT) << 
1085                         NL_SET_ERR_MSG(extack << 
1086                         return -EOPNOTSUPP;   << 
1087                 }                             << 
1088                                               << 
1089                 if (!tc_can_offload(dev) || ! << 
1090                         NL_SET_ERR_MSG(extack << 
1091                         return -EOPNOTSUPP;   << 
1092                 }                             << 
1093                                               << 
1094                 q->num_direct_qdiscs = dev->r << 
1095                 q->direct_qdiscs = kcalloc(q- << 
1096                                            si << 
1097                                            GF << 
1098                 if (!q->direct_qdiscs)        << 
1099                         return -ENOMEM;       << 
1100         }                                        1233         }
                                                   >> 1234         q->debug = gopt->debug;
                                                   >> 1235         HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum);
1101                                                  1236 
1102         err = qdisc_class_hash_init(&q->clhas !! 1237         INIT_LIST_HEAD(&q->root);
1103         if (err < 0)                          !! 1238         for (i = 0; i < HTB_HSIZE; i++)
1104                 return err;                   !! 1239                 INIT_LIST_HEAD(q->hash+i);
1105                                               !! 1240         for (i = 0; i < TC_HTB_NUMPRIO; i++)
1106         if (tb[TCA_HTB_DIRECT_QLEN])          !! 1241                 INIT_LIST_HEAD(q->drops+i);
1107                 q->direct_qlen = nla_get_u32( !! 1242 
1108         else                                  !! 1243         init_timer(&q->timer);
1109                 q->direct_qlen = qdisc_dev(sc !! 1244         skb_queue_head_init(&q->direct_queue);
1110                                               !! 1245 
                                                   >> 1246         q->direct_qlen = sch->dev->tx_queue_len;
                                                   >> 1247         if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
                                                   >> 1248                 q->direct_qlen = 2;
                                                   >> 1249         q->timer.function = htb_timer;
                                                   >> 1250         q->timer.data = (unsigned long)sch;
                                                   >> 1251 
                                                   >> 1252 #ifdef HTB_RATECM
                                                   >> 1253         init_timer(&q->rttim);
                                                   >> 1254         q->rttim.function = htb_rate_timer;
                                                   >> 1255         q->rttim.data = (unsigned long)sch;
                                                   >> 1256         q->rttim.expires = jiffies + HZ;
                                                   >> 1257         add_timer(&q->rttim);
                                                   >> 1258 #endif
1111         if ((q->rate2quantum = gopt->rate2qua    1259         if ((q->rate2quantum = gopt->rate2quantum) < 1)
1112                 q->rate2quantum = 1;             1260                 q->rate2quantum = 1;
1113         q->defcls = gopt->defcls;                1261         q->defcls = gopt->defcls;
1114                                                  1262 
1115         if (!offload)                         !! 1263         MOD_INC_USE_COUNT;
1116                 return 0;                     << 
1117                                               << 
1118         for (ntx = 0; ntx < q->num_direct_qdi << 
1119                 struct netdev_queue *dev_queu << 
1120                 struct Qdisc *qdisc;          << 
1121                                               << 
1122                 qdisc = qdisc_create_dflt(dev << 
1123                                           TC_ << 
1124                 if (!qdisc) {                 << 
1125                         return -ENOMEM;       << 
1126                 }                             << 
1127                                               << 
1128                 q->direct_qdiscs[ntx] = qdisc << 
1129                 qdisc->flags |= TCQ_F_ONETXQU << 
1130         }                                     << 
1131                                               << 
1132         sch->flags |= TCQ_F_MQROOT;           << 
1133                                               << 
1134         offload_opt = (struct tc_htb_qopt_off << 
1135                 .command = TC_HTB_CREATE,     << 
1136                 .parent_classid = TC_H_MAJ(sc << 
1137                 .classid = TC_H_MIN(q->defcls << 
1138                 .extack = extack,             << 
1139         };                                    << 
1140         err = htb_offload(dev, &offload_opt); << 
1141         if (err)                              << 
1142                 return err;                   << 
1143                                               << 
1144         /* Defer this assignment, so that htb << 
1145          * parts (especially calling ndo_setu << 
1146          */                                   << 
1147         q->offload = true;                    << 
1148                                               << 
1149         return 0;                                1264         return 0;
1150 }                                                1265 }
1151                                                  1266 
1152 static void htb_attach_offload(struct Qdisc * << 
1153 {                                             << 
1154         struct net_device *dev = qdisc_dev(sc << 
1155         struct htb_sched *q = qdisc_priv(sch) << 
1156         unsigned int ntx;                     << 
1157                                               << 
1158         for (ntx = 0; ntx < q->num_direct_qdi << 
1159                 struct Qdisc *old, *qdisc = q << 
1160                                               << 
1161                 old = dev_graft_qdisc(qdisc-> << 
1162                 qdisc_put(old);               << 
1163                 qdisc_hash_add(qdisc, false); << 
1164         }                                     << 
1165         for (ntx = q->num_direct_qdiscs; ntx  << 
1166                 struct netdev_queue *dev_queu << 
1167                 struct Qdisc *old = dev_graft << 
1168                                               << 
1169                 qdisc_put(old);               << 
1170         }                                     << 
1171                                               << 
1172         kfree(q->direct_qdiscs);              << 
1173         q->direct_qdiscs = NULL;              << 
1174 }                                             << 
1175                                               << 
1176 static void htb_attach_software(struct Qdisc  << 
1177 {                                             << 
1178         struct net_device *dev = qdisc_dev(sc << 
1179         unsigned int ntx;                     << 
1180                                               << 
1181         /* Resemble qdisc_graft behavior. */  << 
1182         for (ntx = 0; ntx < dev->num_tx_queue << 
1183                 struct netdev_queue *dev_queu << 
1184                 struct Qdisc *old = dev_graft << 
1185                                               << 
1186                 qdisc_refcount_inc(sch);      << 
1187                                               << 
1188                 qdisc_put(old);               << 
1189         }                                     << 
1190 }                                             << 
1191                                               << 
1192 static void htb_attach(struct Qdisc *sch)     << 
1193 {                                             << 
1194         struct htb_sched *q = qdisc_priv(sch) << 
1195                                               << 
1196         if (q->offload)                       << 
1197                 htb_attach_offload(sch);      << 
1198         else                                  << 
1199                 htb_attach_software(sch);     << 
1200 }                                             << 
1201                                               << 
1202 static int htb_dump(struct Qdisc *sch, struct    1267 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1203 {                                                1268 {
1204         struct htb_sched *q = qdisc_priv(sch) !! 1269         struct htb_sched *q = (struct htb_sched*)sch->data;
1205         struct nlattr *nest;                  !! 1270         unsigned char    *b = skb->tail;
                                                   >> 1271         struct rtattr *rta;
1206         struct tc_htb_glob gopt;                 1272         struct tc_htb_glob gopt;
1207                                               !! 1273         HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle);
1208         if (q->offload)                       !! 1274         /* stats */
1209                 sch->flags |= TCQ_F_OFFLOADED !! 1275         HTB_QLOCK(sch);
1210         else                                  << 
1211                 sch->flags &= ~TCQ_F_OFFLOADE << 
1212                                               << 
1213         sch->qstats.overlimits = q->overlimit << 
1214         /* Its safe to not acquire qdisc lock << 
1215          * no change can happen on the qdisc  << 
1216          */                                   << 
1217                                               << 
1218         gopt.direct_pkts = q->direct_pkts;       1276         gopt.direct_pkts = q->direct_pkts;
                                                   >> 1277 
                                                   >> 1278 #ifdef HTB_DEBUG
                                                   >> 1279         if (HTB_DBG_COND(0,2))
                                                   >> 1280                 htb_debug_dump(q);
                                                   >> 1281 #endif
1219         gopt.version = HTB_VER;                  1282         gopt.version = HTB_VER;
1220         gopt.rate2quantum = q->rate2quantum;     1283         gopt.rate2quantum = q->rate2quantum;
1221         gopt.defcls = q->defcls;                 1284         gopt.defcls = q->defcls;
1222         gopt.debug = 0;                       !! 1285         gopt.debug = q->debug;
1223                                               !! 1286         rta = (struct rtattr*)b;
1224         nest = nla_nest_start_noflag(skb, TCA !! 1287         RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1225         if (nest == NULL)                     !! 1288         RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1226                 goto nla_put_failure;         !! 1289         rta->rta_len = skb->tail - b;
1227         if (nla_put(skb, TCA_HTB_INIT, sizeof !! 1290         HTB_QUNLOCK(sch);
1228             nla_put_u32(skb, TCA_HTB_DIRECT_Q !! 1291         return skb->len;
1229                 goto nla_put_failure;         !! 1292 rtattr_failure:
1230         if (q->offload && nla_put_flag(skb, T !! 1293         HTB_QUNLOCK(sch);
1231                 goto nla_put_failure;         !! 1294         skb_trim(skb, skb->tail - skb->data);
1232                                               << 
1233         return nla_nest_end(skb, nest);       << 
1234                                               << 
1235 nla_put_failure:                              << 
1236         nla_nest_cancel(skb, nest);           << 
1237         return -1;                               1295         return -1;
1238 }                                                1296 }
1239                                                  1297 
1240 static int htb_dump_class(struct Qdisc *sch,     1298 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1241                           struct sk_buff *skb !! 1299         struct sk_buff *skb, struct tcmsg *tcm)
1242 {                                                1300 {
1243         struct htb_class *cl = (struct htb_cl !! 1301 #ifdef HTB_DEBUG
1244         struct htb_sched *q = qdisc_priv(sch) !! 1302         struct htb_sched *q = (struct htb_sched*)sch->data;
1245         struct nlattr *nest;                  !! 1303 #endif
                                                   >> 1304         struct htb_class *cl = (struct htb_class*)arg;
                                                   >> 1305         unsigned char    *b = skb->tail;
                                                   >> 1306         struct rtattr *rta;
1246         struct tc_htb_opt opt;                   1307         struct tc_htb_opt opt;
1247                                                  1308 
1248         /* Its safe to not acquire qdisc lock !! 1309         HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid);
1249          * no change can happen on the class  << 
1250          */                                   << 
1251         tcm->tcm_parent = cl->parent ? cl->pa << 
1252         tcm->tcm_handle = cl->common.classid; << 
1253         if (!cl->level && cl->leaf.q)         << 
1254                 tcm->tcm_info = cl->leaf.q->h << 
1255                                               << 
1256         nest = nla_nest_start_noflag(skb, TCA << 
1257         if (nest == NULL)                     << 
1258                 goto nla_put_failure;         << 
1259                                               << 
1260         memset(&opt, 0, sizeof(opt));         << 
1261                                               << 
1262         psched_ratecfg_getrate(&opt.rate, &cl << 
1263         opt.buffer = PSCHED_NS2TICKS(cl->buff << 
1264         psched_ratecfg_getrate(&opt.ceil, &cl << 
1265         opt.cbuffer = PSCHED_NS2TICKS(cl->cbu << 
1266         opt.quantum = cl->quantum;            << 
1267         opt.prio = cl->prio;                  << 
1268         opt.level = cl->level;                << 
1269         if (nla_put(skb, TCA_HTB_PARMS, sizeo << 
1270                 goto nla_put_failure;         << 
1271         if (q->offload && nla_put_flag(skb, T << 
1272                 goto nla_put_failure;         << 
1273         if ((cl->rate.rate_bytes_ps >= (1ULL  << 
1274             nla_put_u64_64bit(skb, TCA_HTB_RA << 
1275                               TCA_HTB_PAD))   << 
1276                 goto nla_put_failure;         << 
1277         if ((cl->ceil.rate_bytes_ps >= (1ULL  << 
1278             nla_put_u64_64bit(skb, TCA_HTB_CE << 
1279                               TCA_HTB_PAD))   << 
1280                 goto nla_put_failure;         << 
1281                                                  1310 
1282         return nla_nest_end(skb, nest);       !! 1311         HTB_QLOCK(sch);
                                                   >> 1312         tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
                                                   >> 1313         tcm->tcm_handle = cl->classid;
                                                   >> 1314         if (!cl->level && cl->un.leaf.q) {
                                                   >> 1315                 tcm->tcm_info = cl->un.leaf.q->handle;
                                                   >> 1316                 cl->stats.qlen = cl->un.leaf.q->q.qlen;
                                                   >> 1317         }
                                                   >> 1318 
                                                   >> 1319         rta = (struct rtattr*)b;
                                                   >> 1320         RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
                                                   >> 1321 
                                                   >> 1322         memset (&opt,0,sizeof(opt));
                                                   >> 1323 
                                                   >> 1324         opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
                                                   >> 1325         opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
                                                   >> 1326         opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio;
                                                   >> 1327         opt.level = cl->level; 
                                                   >> 1328         RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
                                                   >> 1329         rta->rta_len = skb->tail - b;
                                                   >> 1330 
                                                   >> 1331 #ifdef HTB_RATECM
                                                   >> 1332         cl->stats.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
                                                   >> 1333         cl->stats.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
                                                   >> 1334 #endif
1283                                                  1335 
1284 nla_put_failure:                              !! 1336         cl->xstats.tokens = cl->tokens;
1285         nla_nest_cancel(skb, nest);           !! 1337         cl->xstats.ctokens = cl->ctokens;
                                                   >> 1338         RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats);
                                                   >> 1339         RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats);
                                                   >> 1340         HTB_QUNLOCK(sch);
                                                   >> 1341         return skb->len;
                                                   >> 1342 rtattr_failure:
                                                   >> 1343         HTB_QUNLOCK(sch);
                                                   >> 1344         skb_trim(skb, b - skb->data);
1286         return -1;                               1345         return -1;
1287 }                                                1346 }
1288                                                  1347 
1289 static void htb_offload_aggregate_stats(struc << 
1290                                         struc << 
1291 {                                             << 
1292         u64 bytes = 0, packets = 0;           << 
1293         struct htb_class *c;                  << 
1294         unsigned int i;                       << 
1295                                               << 
1296         gnet_stats_basic_sync_init(&cl->bstat << 
1297                                               << 
1298         for (i = 0; i < q->clhash.hashsize; i << 
1299                 hlist_for_each_entry(c, &q->c << 
1300                         struct htb_class *p = << 
1301                                               << 
1302                         while (p && p->level  << 
1303                                 p = p->parent << 
1304                                               << 
1305                         if (p != cl)          << 
1306                                 continue;     << 
1307                                               << 
1308                         bytes += u64_stats_re << 
1309                         packets += u64_stats_ << 
1310                         if (c->level == 0) {  << 
1311                                 bytes += u64_ << 
1312                                 packets += u6 << 
1313                         }                     << 
1314                 }                             << 
1315         }                                     << 
1316         _bstats_update(&cl->bstats, bytes, pa << 
1317 }                                             << 
1318                                               << 
1319 static int                                    << 
1320 htb_dump_class_stats(struct Qdisc *sch, unsig << 
1321 {                                             << 
1322         struct htb_class *cl = (struct htb_cl << 
1323         struct htb_sched *q = qdisc_priv(sch) << 
1324         struct gnet_stats_queue qs = {        << 
1325                 .drops = cl->drops,           << 
1326                 .overlimits = cl->overlimits, << 
1327         };                                    << 
1328         __u32 qlen = 0;                       << 
1329                                               << 
1330         if (!cl->level && cl->leaf.q)         << 
1331                 qdisc_qstats_qlen_backlog(cl- << 
1332                                               << 
1333         cl->xstats.tokens = clamp_t(s64, PSCH << 
1334                                     INT_MIN,  << 
1335         cl->xstats.ctokens = clamp_t(s64, PSC << 
1336                                      INT_MIN, << 
1337                                               << 
1338         if (q->offload) {                     << 
1339                 if (!cl->level) {             << 
1340                         if (cl->leaf.q)       << 
1341                                 cl->bstats =  << 
1342                         else                  << 
1343                                 gnet_stats_ba << 
1344                         _bstats_update(&cl->b << 
1345                                        u64_st << 
1346                                        u64_st << 
1347                 } else {                      << 
1348                         htb_offload_aggregate << 
1349                 }                             << 
1350         }                                     << 
1351                                               << 
1352         if (gnet_stats_copy_basic(d, NULL, &c << 
1353             gnet_stats_copy_rate_est(d, &cl-> << 
1354             gnet_stats_copy_queue(d, NULL, &q << 
1355                 return -1;                    << 
1356                                               << 
1357         return gnet_stats_copy_app(d, &cl->xs << 
1358 }                                             << 
1359                                               << 
1360 static struct netdev_queue *                  << 
1361 htb_select_queue(struct Qdisc *sch, struct tc << 
1362 {                                             << 
1363         struct net_device *dev = qdisc_dev(sc << 
1364         struct tc_htb_qopt_offload offload_op << 
1365         struct htb_sched *q = qdisc_priv(sch) << 
1366         int err;                              << 
1367                                               << 
1368         if (!q->offload)                      << 
1369                 return sch->dev_queue;        << 
1370                                               << 
1371         offload_opt = (struct tc_htb_qopt_off << 
1372                 .command = TC_HTB_LEAF_QUERY_ << 
1373                 .classid = TC_H_MIN(tcm->tcm_ << 
1374         };                                    << 
1375         err = htb_offload(dev, &offload_opt); << 
1376         if (err || offload_opt.qid >= dev->nu << 
1377                 return NULL;                  << 
1378         return netdev_get_tx_queue(dev, offlo << 
1379 }                                             << 
1380                                               << 
1381 static struct Qdisc *                         << 
1382 htb_graft_helper(struct netdev_queue *dev_que << 
1383 {                                             << 
1384         struct net_device *dev = dev_queue->d << 
1385         struct Qdisc *old_q;                  << 
1386                                               << 
1387         if (dev->flags & IFF_UP)              << 
1388                 dev_deactivate(dev);          << 
1389         old_q = dev_graft_qdisc(dev_queue, ne << 
1390         if (new_q)                            << 
1391                 new_q->flags |= TCQ_F_ONETXQU << 
1392         if (dev->flags & IFF_UP)              << 
1393                 dev_activate(dev);            << 
1394                                               << 
1395         return old_q;                         << 
1396 }                                             << 
1397                                               << 
1398 static struct netdev_queue *htb_offload_get_q << 
1399 {                                             << 
1400         struct netdev_queue *queue;           << 
1401                                               << 
1402         queue = cl->leaf.offload_queue;       << 
1403         if (!(cl->leaf.q->flags & TCQ_F_BUILT << 
1404                 WARN_ON(cl->leaf.q->dev_queue << 
1405                                               << 
1406         return queue;                         << 
1407 }                                             << 
1408                                               << 
1409 static void htb_offload_move_qdisc(struct Qdi << 
1410                                    struct htb << 
1411 {                                             << 
1412         struct netdev_queue *queue_old, *queu << 
1413         struct net_device *dev = qdisc_dev(sc << 
1414                                               << 
1415         queue_old = htb_offload_get_queue(cl_ << 
1416         queue_new = htb_offload_get_queue(cl_ << 
1417                                               << 
1418         if (!destroying) {                    << 
1419                 struct Qdisc *qdisc;          << 
1420                                               << 
1421                 if (dev->flags & IFF_UP)      << 
1422                         dev_deactivate(dev);  << 
1423                 qdisc = dev_graft_qdisc(queue << 
1424                 WARN_ON(qdisc != cl_old->leaf << 
1425         }                                     << 
1426                                               << 
1427         if (!(cl_old->leaf.q->flags & TCQ_F_B << 
1428                 cl_old->leaf.q->dev_queue = q << 
1429         cl_old->leaf.offload_queue = queue_ne << 
1430                                               << 
1431         if (!destroying) {                    << 
1432                 struct Qdisc *qdisc;          << 
1433                                               << 
1434                 qdisc = dev_graft_qdisc(queue << 
1435                 if (dev->flags & IFF_UP)      << 
1436                         dev_activate(dev);    << 
1437                 WARN_ON(!(qdisc->flags & TCQ_ << 
1438         }                                     << 
1439 }                                             << 
1440                                               << 
1441 static int htb_graft(struct Qdisc *sch, unsig    1348 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1442                      struct Qdisc **old, stru !! 1349         struct Qdisc **old)
1443 {                                                1350 {
1444         struct netdev_queue *dev_queue = sch- !! 1351         struct htb_class *cl = (struct htb_class*)arg;
1445         struct htb_class *cl = (struct htb_cl << 
1446         struct htb_sched *q = qdisc_priv(sch) << 
1447         struct Qdisc *old_q;                  << 
1448                                                  1352 
1449         if (cl->level)                        !! 1353         if (cl && !cl->level) {
1450                 return -EINVAL;               !! 1354                 if (new == NULL && (new = qdisc_create_dflt(sch->dev, 
1451                                               !! 1355                                         &pfifo_qdisc_ops)) == NULL)
1452         if (q->offload)                       !! 1356                                         return -ENOBUFS;
1453                 dev_queue = htb_offload_get_q !! 1357                 sch_tree_lock(sch);
1454                                               !! 1358                 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1455         if (!new) {                           !! 1359                         if (cl->prio_activity)
1456                 new = qdisc_create_dflt(dev_q !! 1360                                 htb_deactivate ((struct htb_sched*)sch->data,cl);
1457                                         cl->c !! 1361 
1458                 if (!new)                     !! 1362                         /* TODO: is it correct ? Why CBQ doesn't do it ? */
1459                         return -ENOBUFS;      !! 1363                         sch->q.qlen -= (*old)->q.qlen;  
1460         }                                     !! 1364                         qdisc_reset(*old);
1461                                               !! 1365                 }
1462         if (q->offload) {                     !! 1366                 sch_tree_unlock(sch);
1463                 /* One ref for cl->leaf.q, th << 
1464                 qdisc_refcount_inc(new);      << 
1465                 old_q = htb_graft_helper(dev_ << 
1466         }                                     << 
1467                                               << 
1468         *old = qdisc_replace(sch, new, &cl->l << 
1469                                               << 
1470         if (q->offload) {                     << 
1471                 WARN_ON(old_q != *old);       << 
1472                 qdisc_put(old_q);             << 
1473         }                                     << 
1474                                               << 
1475         return 0;                             << 
1476 }                                             << 
1477                                               << 
1478 static struct Qdisc *htb_leaf(struct Qdisc *s << 
1479 {                                             << 
1480         struct htb_class *cl = (struct htb_cl << 
1481         return !cl->level ? cl->leaf.q : NULL << 
1482 }                                             << 
1483                                               << 
1484 static void htb_qlen_notify(struct Qdisc *sch << 
1485 {                                             << 
1486         struct htb_class *cl = (struct htb_cl << 
1487                                               << 
1488         htb_deactivate(qdisc_priv(sch), cl);  << 
1489 }                                             << 
1490                                               << 
1491 static inline int htb_parent_last_child(struc << 
1492 {                                             << 
1493         if (!cl->parent)                      << 
1494                 /* the root class */          << 
1495                 return 0;                     << 
1496         if (cl->parent->children > 1)         << 
1497                 /* not the last child */      << 
1498                 return 0;                        1367                 return 0;
1499         return 1;                             !! 1368         }
                                                   >> 1369         return -ENOENT;
1500 }                                                1370 }
1501                                                  1371 
1502 static void htb_parent_to_leaf(struct Qdisc * !! 1372 static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg)
1503                                struct Qdisc * << 
1504 {                                                1373 {
1505         struct htb_sched *q = qdisc_priv(sch) !! 1374         struct htb_class *cl = (struct htb_class*)arg;
1506         struct htb_class *parent = cl->parent !! 1375         return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1507                                               << 
1508         WARN_ON(cl->level || !cl->leaf.q || c << 
1509                                               << 
1510         if (parent->cmode != HTB_CAN_SEND)    << 
1511                 htb_safe_rb_erase(&parent->pq << 
1512                                   &q->hlevel[ << 
1513                                               << 
1514         parent->level = 0;                    << 
1515         memset(&parent->inner, 0, sizeof(pare << 
1516         parent->leaf.q = new_q ? new_q : &noo << 
1517         parent->tokens = parent->buffer;      << 
1518         parent->ctokens = parent->cbuffer;    << 
1519         parent->t_c = ktime_get_ns();         << 
1520         parent->cmode = HTB_CAN_SEND;         << 
1521         if (q->offload)                       << 
1522                 parent->leaf.offload_queue =  << 
1523 }                                                1376 }
1524                                                  1377 
1525 static void htb_parent_to_leaf_offload(struct !! 1378 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1526                                        struct << 
1527                                        struct << 
1528 {                                                1379 {
1529         struct Qdisc *old_q;                  !! 1380 #ifdef HTB_DEBUG
1530                                               !! 1381         struct htb_sched *q = (struct htb_sched *)sch->data;
1531         /* One ref for cl->leaf.q, the other  !! 1382 #endif
1532         if (new_q)                            !! 1383         struct htb_class *cl = htb_find(classid,sch);
1533                 qdisc_refcount_inc(new_q);    !! 1384         HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
1534         old_q = htb_graft_helper(dev_queue, n !! 1385         if (cl) 
1535         WARN_ON(!(old_q->flags & TCQ_F_BUILTI !! 1386                 cl->refcnt++;
                                                   >> 1387         return (unsigned long)cl;
1536 }                                                1388 }
1537                                                  1389 
1538 static int htb_destroy_class_offload(struct Q !! 1390 static void htb_destroy_filters(struct tcf_proto **fl)
1539                                      bool las << 
1540                                      struct n << 
1541 {                                                1391 {
1542         struct tc_htb_qopt_offload offload_op !! 1392         struct tcf_proto *tp;
1543         struct netdev_queue *dev_queue;       << 
1544         struct Qdisc *q = cl->leaf.q;         << 
1545         struct Qdisc *old;                    << 
1546         int err;                              << 
1547                                                  1393 
1548         if (cl->level)                        !! 1394         while ((tp = *fl) != NULL) {
1549                 return -EINVAL;               !! 1395                 *fl = tp->next;
1550                                               !! 1396                 tcf_destroy(tp);
1551         WARN_ON(!q);                          << 
1552         dev_queue = htb_offload_get_queue(cl) << 
1553         /* When destroying, caller qdisc_graf << 
1554          * qdisc_put for the qdisc being dest << 
1555          * does not need to graft or qdisc_pu << 
1556          */                                   << 
1557         if (!destroying) {                    << 
1558                 old = htb_graft_helper(dev_qu << 
1559                 /* Last qdisc grafted should  << 
1560                  * calling htb_delete.        << 
1561                  */                           << 
1562                 WARN_ON(old != q);            << 
1563         }                                     << 
1564                                               << 
1565         if (cl->parent) {                     << 
1566                 _bstats_update(&cl->parent->b << 
1567                                u64_stats_read << 
1568                                u64_stats_read << 
1569         }                                     << 
1570                                               << 
1571         offload_opt = (struct tc_htb_qopt_off << 
1572                 .command = !last_child ? TC_H << 
1573                            destroying ? TC_HT << 
1574                            TC_HTB_LEAF_DEL_LA << 
1575                 .classid = cl->common.classid << 
1576                 .extack = extack,             << 
1577         };                                    << 
1578         err = htb_offload(qdisc_dev(sch), &of << 
1579                                               << 
1580         if (!destroying) {                    << 
1581                 if (!err)                     << 
1582                         qdisc_put(old);       << 
1583                 else                          << 
1584                         htb_graft_helper(dev_ << 
1585         }                                     << 
1586                                               << 
1587         if (last_child)                       << 
1588                 return err;                   << 
1589                                               << 
1590         if (!err && offload_opt.classid != TC << 
1591                 u32 classid = TC_H_MAJ(sch->h << 
1592                               TC_H_MIN(offloa << 
1593                 struct htb_class *moved_cl =  << 
1594                                               << 
1595                 htb_offload_move_qdisc(sch, m << 
1596         }                                        1397         }
1597                                               << 
1598         return err;                           << 
1599 }                                                1398 }
1600                                                  1399 
1601 static void htb_destroy_class(struct Qdisc *s !! 1400 static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
1602 {                                                1401 {
                                                   >> 1402         struct htb_sched *q = (struct htb_sched *)sch->data;
                                                   >> 1403         HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
1603         if (!cl->level) {                        1404         if (!cl->level) {
1604                 WARN_ON(!cl->leaf.q);         !! 1405                 BUG_TRAP(cl->un.leaf.q);
1605                 qdisc_put(cl->leaf.q);        !! 1406                 sch->q.qlen -= cl->un.leaf.q->q.qlen;
1606         }                                     !! 1407                 qdisc_destroy(cl->un.leaf.q);
1607         gen_kill_estimator(&cl->rate_est);    !! 1408         }
1608         tcf_block_put(cl->block);             !! 1409         qdisc_put_rtab(cl->rate);
                                                   >> 1410         qdisc_put_rtab(cl->ceil);
                                                   >> 1411         
                                                   >> 1412 #ifdef CONFIG_NET_ESTIMATOR
                                                   >> 1413         qdisc_kill_estimator(&cl->stats);
                                                   >> 1414 #endif
                                                   >> 1415         htb_destroy_filters (&cl->filter_list);
                                                   >> 1416         
                                                   >> 1417         while (!list_empty(&cl->children)) 
                                                   >> 1418                 htb_destroy_class (sch,list_entry(cl->children.next,
                                                   >> 1419                                         struct htb_class,sibling));
                                                   >> 1420 
                                                   >> 1421         /* note: this delete may happen twice (see htb_delete) */
                                                   >> 1422         list_del(&cl->hlist);
                                                   >> 1423         list_del(&cl->sibling);
                                                   >> 1424         
                                                   >> 1425         if (cl->prio_activity)
                                                   >> 1426                 htb_deactivate (q,cl);
                                                   >> 1427         
                                                   >> 1428         if (cl->cmode != HTB_CAN_SEND)
                                                   >> 1429                 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
                                                   >> 1430         
1609         kfree(cl);                               1431         kfree(cl);
1610 }                                                1432 }
1611                                                  1433 
1612 static void htb_destroy(struct Qdisc *sch)    !! 1434 /* always caled under BH & queue lock */
                                                   >> 1435 static void htb_destroy(struct Qdisc* sch)
1613 {                                                1436 {
1614         struct net_device *dev = qdisc_dev(sc !! 1437         struct htb_sched *q = (struct htb_sched *)sch->data;
1615         struct tc_htb_qopt_offload offload_op !! 1438         HTB_DBG(0,1,"htb_destroy q=%p\n",q);
1616         struct htb_sched *q = qdisc_priv(sch) << 
1617         struct hlist_node *next;              << 
1618         bool nonempty, changed;               << 
1619         struct htb_class *cl;                 << 
1620         unsigned int i;                       << 
1621                                                  1439 
1622         cancel_work_sync(&q->work);           !! 1440         del_timer_sync (&q->timer);
1623         qdisc_watchdog_cancel(&q->watchdog);  !! 1441 #ifdef HTB_RATECM
                                                   >> 1442         del_timer_sync (&q->rttim);
                                                   >> 1443 #endif
1624         /* This line used to be after htb_des    1444         /* This line used to be after htb_destroy_class call below
1625          * and surprisingly it worked in 2.4. !! 1445            and surprisingly it worked in 2.4. But it must precede it 
1626          * because filter need its target cla !! 1446            because filter need its target class alive to be able to call
1627          * unbind_filter on it (without Oops) !! 1447            unbind_filter on it (without Oops). */
1628          */                                   !! 1448         htb_destroy_filters(&q->filter_list);
1629         tcf_block_put(q->block);              !! 1449         
1630                                               !! 1450         while (!list_empty(&q->root)) 
1631         for (i = 0; i < q->clhash.hashsize; i !! 1451                 htb_destroy_class (sch,list_entry(q->root.next,
1632                 hlist_for_each_entry(cl, &q-> !! 1452                                         struct htb_class,sibling));
1633                         tcf_block_put(cl->blo !! 1453 
1634                         cl->block = NULL;     !! 1454         __skb_queue_purge(&q->direct_queue);
1635                 }                             !! 1455         MOD_DEC_USE_COUNT;
1636         }                                     !! 1456 }
1637                                               !! 1457 
1638         do {                                  !! 1458 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1639                 nonempty = false;             !! 1459 {
1640                 changed = false;              !! 1460         struct htb_sched *q = (struct htb_sched *)sch->data;
1641                 for (i = 0; i < q->clhash.has !! 1461         struct htb_class *cl = (struct htb_class*)arg;
1642                         hlist_for_each_entry_ !! 1462         HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1643                                               !! 1463 
1644                                 bool last_chi !! 1464         // TODO: why don't allow to delete subtree ? references ? does
1645                                               !! 1465         // tc subsys quarantee us that in htb_destroy it holds no class
1646                                 if (!q->offlo !! 1466         // refs so that we can remove children safely there ?
1647                                         htb_d !! 1467         if (!list_empty(&cl->children) || cl->filter_cnt)
1648                                         conti << 
1649                                 }             << 
1650                                               << 
1651                                 nonempty = tr << 
1652                                               << 
1653                                 if (cl->level << 
1654                                         conti << 
1655                                               << 
1656                                 changed = tru << 
1657                                               << 
1658                                 last_child =  << 
1659                                 htb_destroy_c << 
1660                                               << 
1661                                 qdisc_class_h << 
1662                                               << 
1663                                 if (cl->paren << 
1664                                         cl->p << 
1665                                 if (last_chil << 
1666                                         htb_p << 
1667                                 htb_destroy_c << 
1668                         }                     << 
1669                 }                             << 
1670         } while (changed);                    << 
1671         WARN_ON(nonempty);                    << 
1672                                               << 
1673         qdisc_class_hash_destroy(&q->clhash); << 
1674         __qdisc_reset_queue(&q->direct_queue) << 
1675                                               << 
1676         if (q->offload) {                     << 
1677                 offload_opt = (struct tc_htb_ << 
1678                         .command = TC_HTB_DES << 
1679                 };                            << 
1680                 htb_offload(dev, &offload_opt << 
1681         }                                     << 
1682                                               << 
1683         if (!q->direct_qdiscs)                << 
1684                 return;                       << 
1685         for (i = 0; i < q->num_direct_qdiscs  << 
1686                 qdisc_put(q->direct_qdiscs[i] << 
1687         kfree(q->direct_qdiscs);              << 
1688 }                                             << 
1689                                               << 
1690 static int htb_delete(struct Qdisc *sch, unsi << 
1691                       struct netlink_ext_ack  << 
1692 {                                             << 
1693         struct htb_sched *q = qdisc_priv(sch) << 
1694         struct htb_class *cl = (struct htb_cl << 
1695         struct Qdisc *new_q = NULL;           << 
1696         int last_child = 0;                   << 
1697         int err;                              << 
1698                                               << 
1699         /* TODO: why don't allow to delete su << 
1700          * tc subsys guarantee us that in htb << 
1701          * refs so that we can remove childre << 
1702          */                                   << 
1703         if (cl->children || qdisc_class_in_us << 
1704                 NL_SET_ERR_MSG(extack, "HTB c << 
1705                 return -EBUSY;                   1468                 return -EBUSY;
1706         }                                     !! 1469         
1707                                               << 
1708         if (!cl->level && htb_parent_last_chi << 
1709                 last_child = 1;               << 
1710                                               << 
1711         if (q->offload) {                     << 
1712                 err = htb_destroy_class_offlo << 
1713                                               << 
1714                 if (err)                      << 
1715                         return err;           << 
1716         }                                     << 
1717                                               << 
1718         if (last_child) {                     << 
1719                 struct netdev_queue *dev_queu << 
1720                                               << 
1721                 if (q->offload)               << 
1722                         dev_queue = htb_offlo << 
1723                                               << 
1724                 new_q = qdisc_create_dflt(dev << 
1725                                           cl- << 
1726                                           NUL << 
1727                 if (q->offload)               << 
1728                         htb_parent_to_leaf_of << 
1729         }                                     << 
1730                                               << 
1731         sch_tree_lock(sch);                      1470         sch_tree_lock(sch);
1732                                               !! 1471         
1733         if (!cl->level)                       << 
1734                 qdisc_purge_queue(cl->leaf.q) << 
1735                                               << 
1736         /* delete from hash and active; remai    1472         /* delete from hash and active; remainder in destroy_class */
1737         qdisc_class_hash_remove(&q->clhash, & !! 1473         list_del_init(&cl->hlist);
1738         if (cl->parent)                       << 
1739                 cl->parent->children--;       << 
1740                                               << 
1741         if (cl->prio_activity)                   1474         if (cl->prio_activity)
1742                 htb_deactivate(q, cl);        !! 1475                 htb_deactivate (q,cl);
1743                                                  1476 
1744         if (cl->cmode != HTB_CAN_SEND)        !! 1477         if (--cl->refcnt == 0)
1745                 htb_safe_rb_erase(&cl->pq_nod !! 1478                 htb_destroy_class(sch,cl);
1746                                   &q->hlevel[ << 
1747                                               << 
1748         if (last_child)                       << 
1749                 htb_parent_to_leaf(sch, cl, n << 
1750                                                  1479 
1751         sch_tree_unlock(sch);                    1480         sch_tree_unlock(sch);
1752                                               << 
1753         htb_destroy_class(sch, cl);           << 
1754         return 0;                                1481         return 0;
1755 }                                                1482 }
1756                                                  1483 
1757 static int htb_change_class(struct Qdisc *sch !! 1484 static void htb_put(struct Qdisc *sch, unsigned long arg)
1758                             u32 parentid, str !! 1485 {
1759                             unsigned long *ar !! 1486 #ifdef HTB_DEBUG
                                                   >> 1487         struct htb_sched *q = (struct htb_sched *)sch->data;
                                                   >> 1488 #endif
                                                   >> 1489         struct htb_class *cl = (struct htb_class*)arg;
                                                   >> 1490         HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
                                                   >> 1491 
                                                   >> 1492         if (--cl->refcnt == 0)
                                                   >> 1493                 htb_destroy_class(sch,cl);
                                                   >> 1494 }
                                                   >> 1495 
                                                   >> 1496 static int htb_change_class(struct Qdisc *sch, u32 classid, 
                                                   >> 1497                 u32 parentid, struct rtattr **tca, unsigned long *arg)
1760 {                                                1498 {
1761         int err = -EINVAL;                       1499         int err = -EINVAL;
1762         struct htb_sched *q = qdisc_priv(sch) !! 1500         struct htb_sched *q = (struct htb_sched *)sch->data;
1763         struct htb_class *cl = (struct htb_cl !! 1501         struct htb_class *cl = (struct htb_class*)*arg,*parent;
1764         struct tc_htb_qopt_offload offload_op !! 1502         struct rtattr *opt = tca[TCA_OPTIONS-1];
1765         struct nlattr *opt = tca[TCA_OPTIONS] !! 1503         struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1766         struct nlattr *tb[TCA_HTB_MAX + 1];   !! 1504         struct rtattr *tb[TCA_HTB_RTAB];
1767         struct Qdisc *parent_qdisc = NULL;    << 
1768         struct netdev_queue *dev_queue;       << 
1769         struct tc_htb_opt *hopt;                 1505         struct tc_htb_opt *hopt;
1770         u64 rate64, ceil64;                   << 
1771         int warn = 0;                         << 
1772                                                  1506 
1773         /* extract all subattrs from opt attr    1507         /* extract all subattrs from opt attr */
1774         if (!opt)                             !! 1508         if (!opt || rtattr_parse(tb, TCA_HTB_RTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
1775                 goto failure;                 !! 1509                         tb[TCA_HTB_PARMS-1] == NULL ||
1776                                               !! 1510                         RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt))
1777         err = nla_parse_nested_deprecated(tb, << 
1778                                           ext << 
1779         if (err < 0)                          << 
1780                 goto failure;                    1511                 goto failure;
                                                   >> 1512         
                                                   >> 1513         parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
1781                                                  1514 
1782         err = -EINVAL;                        !! 1515         hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
1783         if (tb[TCA_HTB_PARMS] == NULL)        !! 1516         HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
1784                 goto failure;                 !! 1517         rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
1785                                               !! 1518         ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
1786         parent = parentid == TC_H_ROOT ? NULL !! 1519         if (!rtab || !ctab) goto failure;
1787                                               << 
1788         hopt = nla_data(tb[TCA_HTB_PARMS]);   << 
1789         if (!hopt->rate.rate || !hopt->ceil.r << 
1790                 goto failure;                 << 
1791                                               << 
1792         if (q->offload) {                     << 
1793                 /* Options not supported by t << 
1794                 if (hopt->rate.overhead || ho << 
1795                         NL_SET_ERR_MSG(extack << 
1796                         goto failure;         << 
1797                 }                             << 
1798                 if (hopt->rate.mpu || hopt->c << 
1799                         NL_SET_ERR_MSG(extack << 
1800                         goto failure;         << 
1801                 }                             << 
1802         }                                     << 
1803                                               << 
1804         /* Keeping backward compatible with r << 
1805         if (hopt->rate.linklayer == TC_LINKLA << 
1806                 qdisc_put_rtab(qdisc_get_rtab << 
1807                                               << 
1808                                               << 
1809         if (hopt->ceil.linklayer == TC_LINKLA << 
1810                 qdisc_put_rtab(qdisc_get_rtab << 
1811                                               << 
1812                                               << 
1813         rate64 = tb[TCA_HTB_RATE64] ? nla_get << 
1814         ceil64 = tb[TCA_HTB_CEIL64] ? nla_get << 
1815                                               << 
1816         if (!cl) {              /* new class  << 
1817                 struct net_device *dev = qdis << 
1818                 struct Qdisc *new_q, *old_q;  << 
1819                 int prio;                     << 
1820                 struct {                      << 
1821                         struct nlattr         << 
1822                         struct gnet_estimator << 
1823                 } est = {                     << 
1824                         .nla = {              << 
1825                                 .nla_len      << 
1826                                 .nla_type     << 
1827                         },                    << 
1828                         .opt = {              << 
1829                                 /* 4s interva << 
1830                                 .interval     << 
1831                                 .ewma_log     << 
1832                         },                    << 
1833                 };                            << 
1834                                                  1520 
                                                   >> 1521         if (!cl) { /* new class */
                                                   >> 1522                 struct Qdisc *new_q;
1835                 /* check for valid classid */    1523                 /* check for valid classid */
1836                 if (!classid || TC_H_MAJ(clas !! 1524                 if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
1837                     htb_find(classid, sch))   << 
1838                         goto failure;            1525                         goto failure;
1839                                                  1526 
1840                 /* check maximal depth */        1527                 /* check maximal depth */
1841                 if (parent && parent->parent     1528                 if (parent && parent->parent && parent->parent->level < 2) {
1842                         NL_SET_ERR_MSG_MOD(ex !! 1529                         printk(KERN_ERR "htb: tree is too deep\n");
1843                         goto failure;            1530                         goto failure;
1844                 }                                1531                 }
1845                 err = -ENOBUFS;                  1532                 err = -ENOBUFS;
1846                 cl = kzalloc(sizeof(*cl), GFP !! 1533                 if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1847                 if (!cl)                      << 
1848                         goto failure;         << 
1849                                               << 
1850                 gnet_stats_basic_sync_init(&c << 
1851                 gnet_stats_basic_sync_init(&c << 
1852                                               << 
1853                 err = tcf_block_get(&cl->bloc << 
1854                 if (err) {                    << 
1855                         kfree(cl);            << 
1856                         goto failure;            1534                         goto failure;
1857                 }                             !! 1535                 
1858                 if (htb_rate_est || tca[TCA_R !! 1536                 memset(cl, 0, sizeof(*cl));
1859                         err = gen_new_estimat !! 1537                 cl->refcnt = 1;
1860                                               !! 1538                 INIT_LIST_HEAD(&cl->sibling);
1861                                               !! 1539                 INIT_LIST_HEAD(&cl->hlist);
1862                                               !! 1540                 INIT_LIST_HEAD(&cl->children);
1863                                               !! 1541                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1864                         if (err)              !! 1542 #ifdef HTB_DEBUG
1865                                 goto err_bloc !! 1543                 cl->magic = HTB_CMAGIC;
1866                 }                             !! 1544 #endif
1867                                               << 
1868                 cl->children = 0;             << 
1869                 RB_CLEAR_NODE(&cl->pq_node);  << 
1870                                               << 
1871                 for (prio = 0; prio < TC_HTB_ << 
1872                         RB_CLEAR_NODE(&cl->no << 
1873                                               << 
1874                 cl->common.classid = classid; << 
1875                                               << 
1876                 /* Make sure nothing interrup << 
1877                  * ndo_setup_tc calls.        << 
1878                  */                           << 
1879                 ASSERT_RTNL();                << 
1880                                                  1545 
1881                 /* create leaf qdisc early be    1546                 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1882                  * so that can't be used insi !! 1547                    so that can't be used inside of sch_tree_lock
1883                  * -- thanks to Karlis Peisen !! 1548                    -- thanks to Karlis Peisenieks */
1884                  */                           !! 1549                 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1885                 if (!q->offload) {            << 
1886                         dev_queue = sch->dev_ << 
1887                 } else if (!(parent && !paren << 
1888                         /* Assign a dev_queue << 
1889                         offload_opt = (struct << 
1890                                 .command = TC << 
1891                                 .classid = cl << 
1892                                 .parent_class << 
1893                                         TC_H_ << 
1894                                         TC_HT << 
1895                                 .rate = max_t << 
1896                                 .ceil = max_t << 
1897                                 .prio = hopt- << 
1898                                 .quantum = ho << 
1899                                 .extack = ext << 
1900                         };                    << 
1901                         err = htb_offload(dev << 
1902                         if (err) {            << 
1903                                 NL_SET_ERR_MS << 
1904                                               << 
1905                                 goto err_kill << 
1906                         }                     << 
1907                         dev_queue = netdev_ge << 
1908                 } else { /* First child. */   << 
1909                         dev_queue = htb_offlo << 
1910                         old_q = htb_graft_hel << 
1911                         WARN_ON(old_q != pare << 
1912                         offload_opt = (struct << 
1913                                 .command = TC << 
1914                                 .classid = cl << 
1915                                 .parent_class << 
1916                                         TC_H_ << 
1917                                 .rate = max_t << 
1918                                 .ceil = max_t << 
1919                                 .prio = hopt- << 
1920                                 .quantum = ho << 
1921                                 .extack = ext << 
1922                         };                    << 
1923                         err = htb_offload(dev << 
1924                         if (err) {            << 
1925                                 NL_SET_ERR_MS << 
1926                                               << 
1927                                 htb_graft_hel << 
1928                                 goto err_kill << 
1929                         }                     << 
1930                         _bstats_update(&paren << 
1931                                        u64_st << 
1932                                        u64_st << 
1933                         qdisc_put(old_q);     << 
1934                 }                             << 
1935                 new_q = qdisc_create_dflt(dev << 
1936                                           cla << 
1937                 if (q->offload) {             << 
1938                         /* One ref for cl->le << 
1939                         if (new_q)            << 
1940                                 qdisc_refcoun << 
1941                         old_q = htb_graft_hel << 
1942                         /* No qdisc_put neede << 
1943                         WARN_ON(!(old_q->flag << 
1944                 }                             << 
1945                 sch_tree_lock(sch);              1550                 sch_tree_lock(sch);
1946                 if (parent && !parent->level)    1551                 if (parent && !parent->level) {
1947                         /* turn parent into i    1552                         /* turn parent into inner node */
1948                         qdisc_purge_queue(par !! 1553                         sch->q.qlen -= parent->un.leaf.q->q.qlen;
1949                         parent_qdisc = parent !! 1554                         qdisc_destroy (parent->un.leaf.q);
1950                         if (parent->prio_acti !! 1555                         if (parent->prio_activity) 
1951                                 htb_deactivat !! 1556                                 htb_deactivate (q,parent);
1952                                                  1557 
1953                         /* remove from evt li    1558                         /* remove from evt list because of level change */
1954                         if (parent->cmode !=     1559                         if (parent->cmode != HTB_CAN_SEND) {
1955                                 htb_safe_rb_e !! 1560                                 htb_safe_rb_erase(&parent->pq_node,q->wait_pq /*+0*/);
1956                                 parent->cmode    1561                                 parent->cmode = HTB_CAN_SEND;
1957                         }                        1562                         }
1958                         parent->level = (pare    1563                         parent->level = (parent->parent ? parent->parent->level
1959                                          : TC !! 1564                                         : TC_HTB_MAXDEPTH) - 1;
1960                         memset(&parent->inner !! 1565                         memset (&parent->un.inner,0,sizeof(parent->un.inner));
1961                 }                                1566                 }
1962                                               << 
1963                 /* leaf (we) needs elementary    1567                 /* leaf (we) needs elementary qdisc */
1964                 cl->leaf.q = new_q ? new_q :  !! 1568                 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1965                 if (q->offload)               << 
1966                         cl->leaf.offload_queu << 
1967                                                  1569 
1968                 cl->parent = parent;          !! 1570                 cl->classid = classid; cl->parent = parent;
1969                                                  1571 
1970                 /* set class to be in HTB_CAN    1572                 /* set class to be in HTB_CAN_SEND state */
1971                 cl->tokens = PSCHED_TICKS2NS( !! 1573                 cl->tokens = hopt->buffer;
1972                 cl->ctokens = PSCHED_TICKS2NS !! 1574                 cl->ctokens = hopt->cbuffer;
1973                 cl->mbuffer = 60ULL * NSEC_PE !! 1575                 cl->mbuffer = 60000000; /* 1min */
1974                 cl->t_c = ktime_get_ns();     !! 1576                 PSCHED_GET_TIME(cl->t_c);
1975                 cl->cmode = HTB_CAN_SEND;        1577                 cl->cmode = HTB_CAN_SEND;
1976                                                  1578 
1977                 /* attach to the hash list an    1579                 /* attach to the hash list and parent's family */
1978                 qdisc_class_hash_insert(&q->c !! 1580                 list_add_tail(&cl->hlist, q->hash+htb_hash(classid));
1979                 if (parent)                   !! 1581                 list_add_tail(&cl->sibling, parent ? &parent->children : &q->root);
1980                         parent->children++;   !! 1582 #ifdef HTB_DEBUG
1981                 if (cl->leaf.q != &noop_qdisc !! 1583                 { 
1982                         qdisc_hash_add(cl->le !! 1584                         int i;
1983         } else {                              !! 1585                         for (i = 0; i < TC_HTB_NUMPRIO; i++) cl->node[i].rb_color = -1;
1984                 if (tca[TCA_RATE]) {          !! 1586                         cl->pq_node.rb_color = -1;
1985                         err = gen_replace_est << 
1986                                               << 
1987                                               << 
1988                                               << 
1989                                               << 
1990                         if (err)              << 
1991                                 return err;   << 
1992                 }                             << 
1993                                               << 
1994                 if (q->offload) {             << 
1995                         struct net_device *de << 
1996                                               << 
1997                         offload_opt = (struct << 
1998                                 .command = TC << 
1999                                 .classid = cl << 
2000                                 .rate = max_t << 
2001                                 .ceil = max_t << 
2002                                 .prio = hopt- << 
2003                                 .quantum = ho << 
2004                                 .extack = ext << 
2005                         };                    << 
2006                         err = htb_offload(dev << 
2007                         if (err)              << 
2008                                 /* Estimator  << 
2009                                  * as well, s << 
2010                                  * the estima << 
2011                                  * offload an << 
2012                                  * only when  << 
2013                                  */           << 
2014                                 return err;   << 
2015                 }                                1587                 }
2016                                               !! 1588 #endif
2017                 sch_tree_lock(sch);           !! 1589         } else sch_tree_lock(sch);
2018         }                                     << 
2019                                               << 
2020         psched_ratecfg_precompute(&cl->rate,  << 
2021         psched_ratecfg_precompute(&cl->ceil,  << 
2022                                                  1590 
2023         /* it used to be a nasty bug here, we    1591         /* it used to be a nasty bug here, we have to check that node
2024          * is really leaf before changing cl- !! 1592            is really leaf before changing cl->un.leaf ! */
2025          */                                   << 
2026         if (!cl->level) {                        1593         if (!cl->level) {
2027                 u64 quantum = cl->rate.rate_b !! 1594                 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
2028                                               !! 1595                 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
2029                 do_div(quantum, q->rate2quant !! 1596                         printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
2030                 cl->quantum = min_t(u64, quan !! 1597                         cl->un.leaf.quantum = 1000;
2031                                               !! 1598                 }
2032                 if (!hopt->quantum && cl->qua !! 1599                 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
2033                         warn = -1;            !! 1600                         printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
2034                         cl->quantum = 1000;   !! 1601                         cl->un.leaf.quantum = 200000;
2035                 }                             << 
2036                 if (!hopt->quantum && cl->qua << 
2037                         warn = 1;             << 
2038                         cl->quantum = 200000; << 
2039                 }                                1602                 }
2040                 if (hopt->quantum)               1603                 if (hopt->quantum)
2041                         cl->quantum = hopt->q !! 1604                         cl->un.leaf.quantum = hopt->quantum;
2042                 if ((cl->prio = hopt->prio) > !! 1605                 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
2043                         cl->prio = TC_HTB_NUM !! 1606                         cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
2044         }                                        1607         }
2045                                                  1608 
2046         cl->buffer = PSCHED_TICKS2NS(hopt->bu !! 1609         cl->buffer = hopt->buffer;
2047         cl->cbuffer = PSCHED_TICKS2NS(hopt->c !! 1610         cl->cbuffer = hopt->cbuffer;
2048                                               !! 1611         if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
                                                   >> 1612         if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
2049         sch_tree_unlock(sch);                    1613         sch_tree_unlock(sch);
2050         qdisc_put(parent_qdisc);              << 
2051                                               << 
2052         if (warn)                             << 
2053                 NL_SET_ERR_MSG_FMT_MOD(extack << 
2054                                        "quant << 
2055                                        cl->co << 
2056                                               << 
2057         qdisc_class_hash_grow(sch, &q->clhash << 
2058                                                  1614 
2059         *arg = (unsigned long)cl;                1615         *arg = (unsigned long)cl;
2060         return 0;                                1616         return 0;
2061                                                  1617 
2062 err_kill_estimator:                           << 
2063         gen_kill_estimator(&cl->rate_est);    << 
2064 err_block_put:                                << 
2065         tcf_block_put(cl->block);             << 
2066         kfree(cl);                            << 
2067 failure:                                         1618 failure:
                                                   >> 1619         if (rtab) qdisc_put_rtab(rtab);
                                                   >> 1620         if (ctab) qdisc_put_rtab(ctab);
2068         return err;                              1621         return err;
2069 }                                                1622 }
2070                                                  1623 
2071 static struct tcf_block *htb_tcf_block(struct !! 1624 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
2072                                        struct << 
2073 {                                                1625 {
2074         struct htb_sched *q = qdisc_priv(sch) !! 1626         struct htb_sched *q = (struct htb_sched *)sch->data;
2075         struct htb_class *cl = (struct htb_cl    1627         struct htb_class *cl = (struct htb_class *)arg;
2076                                               !! 1628         struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
2077         return cl ? cl->block : q->block;     !! 1629         HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
                                                   >> 1630         return fl;
2078 }                                                1631 }
2079                                                  1632 
2080 static unsigned long htb_bind_filter(struct Q    1633 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
2081                                      u32 clas !! 1634         u32 classid)
2082 {                                                1635 {
2083         struct htb_class *cl = htb_find(class !! 1636         struct htb_sched *q = (struct htb_sched *)sch->data;
2084                                               !! 1637         struct htb_class *cl = htb_find (classid,sch);
                                                   >> 1638         HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
2085         /*if (cl && !cl->level) return 0;        1639         /*if (cl && !cl->level) return 0;
2086          * The line above used to be there to !! 1640           The line above used to be there to prevent attaching filters to 
2087          * leaves. But at least tc_index filt !! 1641           leaves. But at least tc_index filter uses this just to get class 
2088          * for other reasons so that we have  !! 1642           for other reasons so that we have to allow for it.
2089          * ----                               !! 1643           ----
2090          * 19.6.2002 As Werner explained it i !! 1644           19.6.2002 As Werner explained it is ok - bind filter is just
2091          * another way to "lock" the class -  !! 1645           another way to "lock" the class - unlike "get" this lock can
2092          * be broken by class during destroy  !! 1646           be broken by class during destroy IIUC.
2093          */                                      1647          */
2094         if (cl)                               !! 1648         if (cl) 
2095                 qdisc_class_get(&cl->common); !! 1649                 cl->filter_cnt++; 
                                                   >> 1650         else 
                                                   >> 1651                 q->filter_cnt++;
2096         return (unsigned long)cl;                1652         return (unsigned long)cl;
2097 }                                                1653 }
2098                                                  1654 
2099 static void htb_unbind_filter(struct Qdisc *s    1655 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
2100 {                                                1656 {
                                                   >> 1657         struct htb_sched *q = (struct htb_sched *)sch->data;
2101         struct htb_class *cl = (struct htb_cl    1658         struct htb_class *cl = (struct htb_class *)arg;
2102                                               !! 1659         HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
2103         qdisc_class_put(&cl->common);         !! 1660         if (cl) 
                                                   >> 1661                 cl->filter_cnt--; 
                                                   >> 1662         else 
                                                   >> 1663                 q->filter_cnt--;
2104 }                                                1664 }
2105                                                  1665 
2106 static void htb_walk(struct Qdisc *sch, struc    1666 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2107 {                                                1667 {
2108         struct htb_sched *q = qdisc_priv(sch) !! 1668         struct htb_sched *q = (struct htb_sched *)sch->data;
2109         struct htb_class *cl;                 !! 1669         int i;
2110         unsigned int i;                       << 
2111                                                  1670 
2112         if (arg->stop)                           1671         if (arg->stop)
2113                 return;                          1672                 return;
2114                                                  1673 
2115         for (i = 0; i < q->clhash.hashsize; i !! 1674         for (i = 0; i < HTB_HSIZE; i++) {
2116                 hlist_for_each_entry(cl, &q-> !! 1675                 struct list_head *p;
2117                         if (!tc_qdisc_stats_d !! 1676                 list_for_each (p,q->hash+i) {
                                                   >> 1677                         struct htb_class *cl = list_entry(p,struct htb_class,hlist);
                                                   >> 1678                         if (arg->count < arg->skip) {
                                                   >> 1679                                 arg->count++;
                                                   >> 1680                                 continue;
                                                   >> 1681                         }
                                                   >> 1682                         if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
                                                   >> 1683                                 arg->stop = 1;
2118                                 return;          1684                                 return;
                                                   >> 1685                         }
                                                   >> 1686                         arg->count++;
2119                 }                                1687                 }
2120         }                                        1688         }
2121 }                                                1689 }
2122                                                  1690 
2123 static const struct Qdisc_class_ops htb_class !! 1691 static struct Qdisc_class_ops htb_class_ops =
2124         .select_queue   =       htb_select_qu !! 1692 {
2125         .graft          =       htb_graft,    !! 1693     htb_graft,
2126         .leaf           =       htb_leaf,     !! 1694     htb_leaf,
2127         .qlen_notify    =       htb_qlen_noti !! 1695     htb_get,
2128         .find           =       htb_search,   !! 1696     htb_put,
2129         .change         =       htb_change_cl !! 1697     htb_change_class,
2130         .delete         =       htb_delete,   !! 1698     htb_delete,
2131         .walk           =       htb_walk,     !! 1699     htb_walk,
2132         .tcf_block      =       htb_tcf_block !! 1700 
2133         .bind_tcf       =       htb_bind_filt !! 1701     htb_find_tcf,
2134         .unbind_tcf     =       htb_unbind_fi !! 1702     htb_bind_filter,
2135         .dump           =       htb_dump_clas !! 1703     htb_unbind_filter,
2136         .dump_stats     =       htb_dump_clas !! 1704 
                                                   >> 1705     htb_dump_class,
2137 };                                               1706 };
2138                                                  1707 
2139 static struct Qdisc_ops htb_qdisc_ops __read_ !! 1708 struct Qdisc_ops htb_qdisc_ops =
2140         .cl_ops         =       &htb_class_op !! 1709 {
2141         .id             =       "htb",        !! 1710     NULL,
2142         .priv_size      =       sizeof(struct !! 1711     &htb_class_ops,
2143         .enqueue        =       htb_enqueue,  !! 1712     "htb",
2144         .dequeue        =       htb_dequeue,  !! 1713     sizeof(struct htb_sched),
2145         .peek           =       qdisc_peek_de !! 1714 
2146         .init           =       htb_init,     !! 1715     htb_enqueue,
2147         .attach         =       htb_attach,   !! 1716     htb_dequeue,
2148         .reset          =       htb_reset,    !! 1717     htb_requeue,
2149         .destroy        =       htb_destroy,  !! 1718     htb_drop,
2150         .dump           =       htb_dump,     !! 1719 
2151         .owner          =       THIS_MODULE,  !! 1720     htb_init,
                                                   >> 1721     htb_reset,
                                                   >> 1722     htb_destroy,
                                                   >> 1723     NULL /* htb_change */,
                                                   >> 1724 
                                                   >> 1725     htb_dump,
2152 };                                               1726 };
2153 MODULE_ALIAS_NET_SCH("htb");                  << 
2154                                                  1727 
2155 static int __init htb_module_init(void)       !! 1728 #ifdef MODULE
                                                   >> 1729 int init_module(void)
2156 {                                                1730 {
2157         return register_qdisc(&htb_qdisc_ops) !! 1731     return register_qdisc(&htb_qdisc_ops);
2158 }                                                1732 }
2159 static void __exit htb_module_exit(void)      !! 1733 
                                                   >> 1734 void cleanup_module(void) 
2160 {                                                1735 {
2161         unregister_qdisc(&htb_qdisc_ops);     !! 1736     unregister_qdisc(&htb_qdisc_ops);
2162 }                                                1737 }
2163                                               << 
2164 module_init(htb_module_init)                  << 
2165 module_exit(htb_module_exit)                  << 
2166 MODULE_LICENSE("GPL");                           1738 MODULE_LICENSE("GPL");
2167 MODULE_DESCRIPTION("Hierarchical Token Bucket !! 1739 #endif
2168                                                  1740 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php