~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/net/sch_generic.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __NET_SCHED_GENERIC_H
  3 #define __NET_SCHED_GENERIC_H
  4 
  5 #include <linux/netdevice.h>
  6 #include <linux/types.h>
  7 #include <linux/rcupdate.h>
  8 #include <linux/pkt_sched.h>
  9 #include <linux/pkt_cls.h>
 10 #include <linux/percpu.h>
 11 #include <linux/dynamic_queue_limits.h>
 12 #include <linux/list.h>
 13 #include <linux/refcount.h>
 14 #include <linux/workqueue.h>
 15 #include <linux/mutex.h>
 16 #include <linux/rwsem.h>
 17 #include <linux/atomic.h>
 18 #include <linux/hashtable.h>
 19 #include <net/gen_stats.h>
 20 #include <net/rtnetlink.h>
 21 #include <net/flow_offload.h>
 22 #include <linux/xarray.h>
 23 
 24 struct Qdisc_ops;
 25 struct qdisc_walker;
 26 struct tcf_walker;
 27 struct module;
 28 struct bpf_flow_keys;
 29 
 30 struct qdisc_rate_table {
 31         struct tc_ratespec rate;
 32         u32             data[256];
 33         struct qdisc_rate_table *next;
 34         int             refcnt;
 35 };
 36 
 37 enum qdisc_state_t {
 38         __QDISC_STATE_SCHED,
 39         __QDISC_STATE_DEACTIVATED,
 40         __QDISC_STATE_MISSED,
 41         __QDISC_STATE_DRAINING,
 42 };
 43 
 44 enum qdisc_state2_t {
 45         /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
 46          * Use qdisc_run_begin/end() or qdisc_is_running() instead.
 47          */
 48         __QDISC_STATE2_RUNNING,
 49 };
 50 
 51 #define QDISC_STATE_MISSED      BIT(__QDISC_STATE_MISSED)
 52 #define QDISC_STATE_DRAINING    BIT(__QDISC_STATE_DRAINING)
 53 
 54 #define QDISC_STATE_NON_EMPTY   (QDISC_STATE_MISSED | \
 55                                         QDISC_STATE_DRAINING)
 56 
 57 struct qdisc_size_table {
 58         struct rcu_head         rcu;
 59         struct list_head        list;
 60         struct tc_sizespec      szopts;
 61         int                     refcnt;
 62         u16                     data[];
 63 };
 64 
 65 /* similar to sk_buff_head, but skb->prev pointer is undefined. */
 66 struct qdisc_skb_head {
 67         struct sk_buff  *head;
 68         struct sk_buff  *tail;
 69         __u32           qlen;
 70         spinlock_t      lock;
 71 };
 72 
 73 struct Qdisc {
 74         int                     (*enqueue)(struct sk_buff *skb,
 75                                            struct Qdisc *sch,
 76                                            struct sk_buff **to_free);
 77         struct sk_buff *        (*dequeue)(struct Qdisc *sch);
 78         unsigned int            flags;
 79 #define TCQ_F_BUILTIN           1
 80 #define TCQ_F_INGRESS           2
 81 #define TCQ_F_CAN_BYPASS        4
 82 #define TCQ_F_MQROOT            8
 83 #define TCQ_F_ONETXQUEUE        0x10 /* dequeue_skb() can assume all skbs are for
 84                                       * q->dev_queue : It can test
 85                                       * netif_xmit_frozen_or_stopped() before
 86                                       * dequeueing next packet.
 87                                       * Its true for MQ/MQPRIO slaves, or non
 88                                       * multiqueue device.
 89                                       */
 90 #define TCQ_F_WARN_NONWC        (1 << 16)
 91 #define TCQ_F_CPUSTATS          0x20 /* run using percpu statistics */
 92 #define TCQ_F_NOPARENT          0x40 /* root of its hierarchy :
 93                                       * qdisc_tree_decrease_qlen() should stop.
 94                                       */
 95 #define TCQ_F_INVISIBLE         0x80 /* invisible by default in dump */
 96 #define TCQ_F_NOLOCK            0x100 /* qdisc does not require locking */
 97 #define TCQ_F_OFFLOADED         0x200 /* qdisc is offloaded to HW */
 98         u32                     limit;
 99         const struct Qdisc_ops  *ops;
100         struct qdisc_size_table __rcu *stab;
101         struct hlist_node       hash;
102         u32                     handle;
103         u32                     parent;
104 
105         struct netdev_queue     *dev_queue;
106 
107         struct net_rate_estimator __rcu *rate_est;
108         struct gnet_stats_basic_sync __percpu *cpu_bstats;
109         struct gnet_stats_queue __percpu *cpu_qstats;
110         int                     pad;
111         refcount_t              refcnt;
112 
113         /*
114          * For performance sake on SMP, we put highly modified fields at the end
115          */
116         struct sk_buff_head     gso_skb ____cacheline_aligned_in_smp;
117         struct qdisc_skb_head   q;
118         struct gnet_stats_basic_sync bstats;
119         struct gnet_stats_queue qstats;
120         int                     owner;
121         unsigned long           state;
122         unsigned long           state2; /* must be written under qdisc spinlock */
123         struct Qdisc            *next_sched;
124         struct sk_buff_head     skb_bad_txq;
125 
126         spinlock_t              busylock ____cacheline_aligned_in_smp;
127         spinlock_t              seqlock;
128 
129         struct rcu_head         rcu;
130         netdevice_tracker       dev_tracker;
131         struct lock_class_key   root_lock_key;
132         /* private data */
133         long privdata[] ____cacheline_aligned;
134 };
135 
136 static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
137 {
138         if (qdisc->flags & TCQ_F_BUILTIN)
139                 return;
140         refcount_inc(&qdisc->refcnt);
141 }
142 
143 static inline bool qdisc_refcount_dec_if_one(struct Qdisc *qdisc)
144 {
145         if (qdisc->flags & TCQ_F_BUILTIN)
146                 return true;
147         return refcount_dec_if_one(&qdisc->refcnt);
148 }
149 
150 /* Intended to be used by unlocked users, when concurrent qdisc release is
151  * possible.
152  */
153 
154 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
155 {
156         if (qdisc->flags & TCQ_F_BUILTIN)
157                 return qdisc;
158         if (refcount_inc_not_zero(&qdisc->refcnt))
159                 return qdisc;
160         return NULL;
161 }
162 
163 /* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc
164  * root_lock section, or provide their own memory barriers -- ordering
165  * against qdisc_run_begin/end() atomic bit operations.
166  */
167 static inline bool qdisc_is_running(struct Qdisc *qdisc)
168 {
169         if (qdisc->flags & TCQ_F_NOLOCK)
170                 return spin_is_locked(&qdisc->seqlock);
171         return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
172 }
173 
174 static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
175 {
176         return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
177 }
178 
179 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
180 {
181         return q->flags & TCQ_F_CPUSTATS;
182 }
183 
184 static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
185 {
186         if (qdisc_is_percpu_stats(qdisc))
187                 return nolock_qdisc_is_empty(qdisc);
188         return !READ_ONCE(qdisc->q.qlen);
189 }
190 
191 /* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with
192  * the qdisc root lock acquired.
193  */
194 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
195 {
196         if (qdisc->flags & TCQ_F_NOLOCK) {
197                 if (spin_trylock(&qdisc->seqlock))
198                         return true;
199 
200                 /* No need to insist if the MISSED flag was already set.
201                  * Note that test_and_set_bit() also gives us memory ordering
202                  * guarantees wrt potential earlier enqueue() and below
203                  * spin_trylock(), both of which are necessary to prevent races
204                  */
205                 if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
206                         return false;
207 
208                 /* Try to take the lock again to make sure that we will either
209                  * grab it or the CPU that still has it will see MISSED set
210                  * when testing it in qdisc_run_end()
211                  */
212                 return spin_trylock(&qdisc->seqlock);
213         }
214         return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
215 }
216 
217 static inline void qdisc_run_end(struct Qdisc *qdisc)
218 {
219         if (qdisc->flags & TCQ_F_NOLOCK) {
220                 spin_unlock(&qdisc->seqlock);
221 
222                 /* spin_unlock() only has store-release semantic. The unlock
223                  * and test_bit() ordering is a store-load ordering, so a full
224                  * memory barrier is needed here.
225                  */
226                 smp_mb();
227 
228                 if (unlikely(test_bit(__QDISC_STATE_MISSED,
229                                       &qdisc->state)))
230                         __netif_schedule(qdisc);
231         } else {
232                 __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
233         }
234 }
235 
236 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
237 {
238         return qdisc->flags & TCQ_F_ONETXQUEUE;
239 }
240 
241 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
242 {
243         return netdev_queue_dql_avail(txq);
244 }
245 
246 struct Qdisc_class_ops {
247         unsigned int            flags;
248         /* Child qdisc manipulation */
249         struct netdev_queue *   (*select_queue)(struct Qdisc *, struct tcmsg *);
250         int                     (*graft)(struct Qdisc *, unsigned long cl,
251                                         struct Qdisc *, struct Qdisc **,
252                                         struct netlink_ext_ack *extack);
253         struct Qdisc *          (*leaf)(struct Qdisc *, unsigned long cl);
254         void                    (*qlen_notify)(struct Qdisc *, unsigned long);
255 
256         /* Class manipulation routines */
257         unsigned long           (*find)(struct Qdisc *, u32 classid);
258         int                     (*change)(struct Qdisc *, u32, u32,
259                                         struct nlattr **, unsigned long *,
260                                         struct netlink_ext_ack *);
261         int                     (*delete)(struct Qdisc *, unsigned long,
262                                           struct netlink_ext_ack *);
263         void                    (*walk)(struct Qdisc *, struct qdisc_walker * arg);
264 
265         /* Filter manipulation */
266         struct tcf_block *      (*tcf_block)(struct Qdisc *sch,
267                                              unsigned long arg,
268                                              struct netlink_ext_ack *extack);
269         unsigned long           (*bind_tcf)(struct Qdisc *, unsigned long,
270                                         u32 classid);
271         void                    (*unbind_tcf)(struct Qdisc *, unsigned long);
272 
273         /* rtnetlink specific */
274         int                     (*dump)(struct Qdisc *, unsigned long,
275                                         struct sk_buff *skb, struct tcmsg*);
276         int                     (*dump_stats)(struct Qdisc *, unsigned long,
277                                         struct gnet_dump *);
278 };
279 
280 /* Qdisc_class_ops flag values */
281 
282 /* Implements API that doesn't require rtnl lock */
283 enum qdisc_class_ops_flags {
284         QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
285 };
286 
287 struct Qdisc_ops {
288         struct Qdisc_ops        *next;
289         const struct Qdisc_class_ops    *cl_ops;
290         char                    id[IFNAMSIZ];
291         int                     priv_size;
292         unsigned int            static_flags;
293 
294         int                     (*enqueue)(struct sk_buff *skb,
295                                            struct Qdisc *sch,
296                                            struct sk_buff **to_free);
297         struct sk_buff *        (*dequeue)(struct Qdisc *);
298         struct sk_buff *        (*peek)(struct Qdisc *);
299 
300         int                     (*init)(struct Qdisc *sch, struct nlattr *arg,
301                                         struct netlink_ext_ack *extack);
302         void                    (*reset)(struct Qdisc *);
303         void                    (*destroy)(struct Qdisc *);
304         int                     (*change)(struct Qdisc *sch,
305                                           struct nlattr *arg,
306                                           struct netlink_ext_ack *extack);
307         void                    (*attach)(struct Qdisc *sch);
308         int                     (*change_tx_queue_len)(struct Qdisc *, unsigned int);
309         void                    (*change_real_num_tx)(struct Qdisc *sch,
310                                                       unsigned int new_real_tx);
311 
312         int                     (*dump)(struct Qdisc *, struct sk_buff *);
313         int                     (*dump_stats)(struct Qdisc *, struct gnet_dump *);
314 
315         void                    (*ingress_block_set)(struct Qdisc *sch,
316                                                      u32 block_index);
317         void                    (*egress_block_set)(struct Qdisc *sch,
318                                                     u32 block_index);
319         u32                     (*ingress_block_get)(struct Qdisc *sch);
320         u32                     (*egress_block_get)(struct Qdisc *sch);
321 
322         struct module           *owner;
323 };
324 
325 struct tcf_result {
326         union {
327                 struct {
328                         unsigned long   class;
329                         u32             classid;
330                 };
331                 const struct tcf_proto *goto_tp;
332         };
333 };
334 
335 struct tcf_chain;
336 
337 struct tcf_proto_ops {
338         struct list_head        head;
339         char                    kind[IFNAMSIZ];
340 
341         int                     (*classify)(struct sk_buff *,
342                                             const struct tcf_proto *,
343                                             struct tcf_result *);
344         int                     (*init)(struct tcf_proto*);
345         void                    (*destroy)(struct tcf_proto *tp, bool rtnl_held,
346                                            struct netlink_ext_ack *extack);
347 
348         void*                   (*get)(struct tcf_proto*, u32 handle);
349         void                    (*put)(struct tcf_proto *tp, void *f);
350         int                     (*change)(struct net *net, struct sk_buff *,
351                                         struct tcf_proto*, unsigned long,
352                                         u32 handle, struct nlattr **,
353                                         void **, u32,
354                                         struct netlink_ext_ack *);
355         int                     (*delete)(struct tcf_proto *tp, void *arg,
356                                           bool *last, bool rtnl_held,
357                                           struct netlink_ext_ack *);
358         bool                    (*delete_empty)(struct tcf_proto *tp);
359         void                    (*walk)(struct tcf_proto *tp,
360                                         struct tcf_walker *arg, bool rtnl_held);
361         int                     (*reoffload)(struct tcf_proto *tp, bool add,
362                                              flow_setup_cb_t *cb, void *cb_priv,
363                                              struct netlink_ext_ack *extack);
364         void                    (*hw_add)(struct tcf_proto *tp,
365                                           void *type_data);
366         void                    (*hw_del)(struct tcf_proto *tp,
367                                           void *type_data);
368         void                    (*bind_class)(void *, u32, unsigned long,
369                                               void *, unsigned long);
370         void *                  (*tmplt_create)(struct net *net,
371                                                 struct tcf_chain *chain,
372                                                 struct nlattr **tca,
373                                                 struct netlink_ext_ack *extack);
374         void                    (*tmplt_destroy)(void *tmplt_priv);
375         void                    (*tmplt_reoffload)(struct tcf_chain *chain,
376                                                    bool add,
377                                                    flow_setup_cb_t *cb,
378                                                    void *cb_priv);
379         struct tcf_exts *       (*get_exts)(const struct tcf_proto *tp,
380                                             u32 handle);
381 
382         /* rtnetlink specific */
383         int                     (*dump)(struct net*, struct tcf_proto*, void *,
384                                         struct sk_buff *skb, struct tcmsg*,
385                                         bool);
386         int                     (*terse_dump)(struct net *net,
387                                               struct tcf_proto *tp, void *fh,
388                                               struct sk_buff *skb,
389                                               struct tcmsg *t, bool rtnl_held);
390         int                     (*tmplt_dump)(struct sk_buff *skb,
391                                               struct net *net,
392                                               void *tmplt_priv);
393 
394         struct module           *owner;
395         int                     flags;
396 };
397 
398 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
399  * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
400  * conditions can occur when filters are inserted/deleted simultaneously.
401  */
402 enum tcf_proto_ops_flags {
403         TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
404 };
405 
406 struct tcf_proto {
407         /* Fast access part */
408         struct tcf_proto __rcu  *next;
409         void __rcu              *root;
410 
411         /* called under RCU BH lock*/
412         int                     (*classify)(struct sk_buff *,
413                                             const struct tcf_proto *,
414                                             struct tcf_result *);
415         __be16                  protocol;
416 
417         /* All the rest */
418         u32                     prio;
419         void                    *data;
420         const struct tcf_proto_ops      *ops;
421         struct tcf_chain        *chain;
422         /* Lock protects tcf_proto shared state and can be used by unlocked
423          * classifiers to protect their private data.
424          */
425         spinlock_t              lock;
426         bool                    deleting;
427         bool                    counted;
428         refcount_t              refcnt;
429         struct rcu_head         rcu;
430         struct hlist_node       destroy_ht_node;
431 };
432 
433 struct qdisc_skb_cb {
434         struct {
435                 unsigned int            pkt_len;
436                 u16                     slave_dev_queue_mapping;
437                 u16                     tc_classid;
438         };
439 #define QDISC_CB_PRIV_LEN 20
440         unsigned char           data[QDISC_CB_PRIV_LEN];
441 };
442 
443 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
444 
445 struct tcf_chain {
446         /* Protects filter_chain. */
447         struct mutex filter_chain_lock;
448         struct tcf_proto __rcu *filter_chain;
449         struct list_head list;
450         struct tcf_block *block;
451         u32 index; /* chain index */
452         unsigned int refcnt;
453         unsigned int action_refcnt;
454         bool explicitly_created;
455         bool flushing;
456         const struct tcf_proto_ops *tmplt_ops;
457         void *tmplt_priv;
458         struct rcu_head rcu;
459 };
460 
461 struct tcf_block {
462         struct xarray ports; /* datapath accessible */
463         /* Lock protects tcf_block and lifetime-management data of chains
464          * attached to the block (refcnt, action_refcnt, explicitly_created).
465          */
466         struct mutex lock;
467         struct list_head chain_list;
468         u32 index; /* block index for shared blocks */
469         u32 classid; /* which class this block belongs to */
470         refcount_t refcnt;
471         struct net *net;
472         struct Qdisc *q;
473         struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
474         struct flow_block flow_block;
475         struct list_head owner_list;
476         bool keep_dst;
477         bool bypass_wanted;
478         atomic_t filtercnt; /* Number of filters */
479         atomic_t skipswcnt; /* Number of skip_sw filters */
480         atomic_t offloadcnt; /* Number of oddloaded filters */
481         unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
482         unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
483         struct {
484                 struct tcf_chain *chain;
485                 struct list_head filter_chain_list;
486         } chain0;
487         struct rcu_head rcu;
488         DECLARE_HASHTABLE(proto_destroy_ht, 7);
489         struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
490 };
491 
492 struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index);
493 
494 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
495 {
496         return lockdep_is_held(&chain->filter_chain_lock);
497 }
498 
499 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
500 {
501         return lockdep_is_held(&tp->lock);
502 }
503 
504 #define tcf_chain_dereference(p, chain)                                 \
505         rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
506 
507 #define tcf_proto_dereference(p, tp)                                    \
508         rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
509 
510 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
511 {
512         struct qdisc_skb_cb *qcb;
513 
514         BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
515         BUILD_BUG_ON(sizeof(qcb->data) < sz);
516 }
517 
518 static inline int qdisc_qlen(const struct Qdisc *q)
519 {
520         return q->q.qlen;
521 }
522 
523 static inline int qdisc_qlen_sum(const struct Qdisc *q)
524 {
525         __u32 qlen = q->qstats.qlen;
526         int i;
527 
528         if (qdisc_is_percpu_stats(q)) {
529                 for_each_possible_cpu(i)
530                         qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
531         } else {
532                 qlen += q->q.qlen;
533         }
534 
535         return qlen;
536 }
537 
538 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
539 {
540         return (struct qdisc_skb_cb *)skb->cb;
541 }
542 
543 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
544 {
545         return &qdisc->q.lock;
546 }
547 
548 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
549 {
550         struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
551 
552         return q;
553 }
554 
555 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
556 {
557         return rcu_dereference_bh(qdisc->dev_queue->qdisc);
558 }
559 
560 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
561 {
562         return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping);
563 }
564 
565 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
566 {
567         struct Qdisc *root = qdisc_root_sleeping(qdisc);
568 
569         ASSERT_RTNL();
570         return qdisc_lock(root);
571 }
572 
573 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
574 {
575         return qdisc->dev_queue->dev;
576 }
577 
578 static inline void sch_tree_lock(struct Qdisc *q)
579 {
580         if (q->flags & TCQ_F_MQROOT)
581                 spin_lock_bh(qdisc_lock(q));
582         else
583                 spin_lock_bh(qdisc_root_sleeping_lock(q));
584 }
585 
586 static inline void sch_tree_unlock(struct Qdisc *q)
587 {
588         if (q->flags & TCQ_F_MQROOT)
589                 spin_unlock_bh(qdisc_lock(q));
590         else
591                 spin_unlock_bh(qdisc_root_sleeping_lock(q));
592 }
593 
594 extern struct Qdisc noop_qdisc;
595 extern struct Qdisc_ops noop_qdisc_ops;
596 extern struct Qdisc_ops pfifo_fast_ops;
597 extern const u8 sch_default_prio2band[TC_PRIO_MAX + 1];
598 extern struct Qdisc_ops mq_qdisc_ops;
599 extern struct Qdisc_ops noqueue_qdisc_ops;
600 extern const struct Qdisc_ops *default_qdisc_ops;
601 static inline const struct Qdisc_ops *
602 get_default_qdisc_ops(const struct net_device *dev, int ntx)
603 {
604         return ntx < dev->real_num_tx_queues ?
605                         default_qdisc_ops : &pfifo_fast_ops;
606 }
607 
608 struct Qdisc_class_common {
609         u32                     classid;
610         unsigned int            filter_cnt;
611         struct hlist_node       hnode;
612 };
613 
614 struct Qdisc_class_hash {
615         struct hlist_head       *hash;
616         unsigned int            hashsize;
617         unsigned int            hashmask;
618         unsigned int            hashelems;
619 };
620 
621 static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
622 {
623         id ^= id >> 8;
624         id ^= id >> 4;
625         return id & mask;
626 }
627 
628 static inline struct Qdisc_class_common *
629 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
630 {
631         struct Qdisc_class_common *cl;
632         unsigned int h;
633 
634         if (!id)
635                 return NULL;
636 
637         h = qdisc_class_hash(id, hash->hashmask);
638         hlist_for_each_entry(cl, &hash->hash[h], hnode) {
639                 if (cl->classid == id)
640                         return cl;
641         }
642         return NULL;
643 }
644 
645 static inline bool qdisc_class_in_use(const struct Qdisc_class_common *cl)
646 {
647         return cl->filter_cnt > 0;
648 }
649 
650 static inline void qdisc_class_get(struct Qdisc_class_common *cl)
651 {
652         unsigned int res;
653 
654         if (check_add_overflow(cl->filter_cnt, 1, &res))
655                 WARN(1, "Qdisc class overflow");
656 
657         cl->filter_cnt = res;
658 }
659 
660 static inline void qdisc_class_put(struct Qdisc_class_common *cl)
661 {
662         unsigned int res;
663 
664         if (check_sub_overflow(cl->filter_cnt, 1, &res))
665                 WARN(1, "Qdisc class underflow");
666 
667         cl->filter_cnt = res;
668 }
669 
670 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
671 {
672         u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
673 
674         return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
675 }
676 
677 int qdisc_class_hash_init(struct Qdisc_class_hash *);
678 void qdisc_class_hash_insert(struct Qdisc_class_hash *,
679                              struct Qdisc_class_common *);
680 void qdisc_class_hash_remove(struct Qdisc_class_hash *,
681                              struct Qdisc_class_common *);
682 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
683 void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
684 
685 int dev_qdisc_change_tx_queue_len(struct net_device *dev);
686 void dev_qdisc_change_real_num_tx(struct net_device *dev,
687                                   unsigned int new_real_tx);
688 void dev_init_scheduler(struct net_device *dev);
689 void dev_shutdown(struct net_device *dev);
690 void dev_activate(struct net_device *dev);
691 void dev_deactivate(struct net_device *dev);
692 void dev_deactivate_many(struct list_head *head);
693 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
694                               struct Qdisc *qdisc);
695 void qdisc_reset(struct Qdisc *qdisc);
696 void qdisc_destroy(struct Qdisc *qdisc);
697 void qdisc_put(struct Qdisc *qdisc);
698 void qdisc_put_unlocked(struct Qdisc *qdisc);
699 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
700 #ifdef CONFIG_NET_SCHED
701 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
702                               void *type_data);
703 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
704                                 struct Qdisc *new, struct Qdisc *old,
705                                 enum tc_setup_type type, void *type_data,
706                                 struct netlink_ext_ack *extack);
707 #else
708 static inline int
709 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
710                           void *type_data)
711 {
712         q->flags &= ~TCQ_F_OFFLOADED;
713         return 0;
714 }
715 
716 static inline void
717 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
718                            struct Qdisc *new, struct Qdisc *old,
719                            enum tc_setup_type type, void *type_data,
720                            struct netlink_ext_ack *extack)
721 {
722 }
723 #endif
724 void qdisc_offload_query_caps(struct net_device *dev,
725                               enum tc_setup_type type,
726                               void *caps, size_t caps_len);
727 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
728                           const struct Qdisc_ops *ops,
729                           struct netlink_ext_ack *extack);
730 void qdisc_free(struct Qdisc *qdisc);
731 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
732                                 const struct Qdisc_ops *ops, u32 parentid,
733                                 struct netlink_ext_ack *extack);
734 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
735                                const struct qdisc_size_table *stab);
736 int skb_do_redirect(struct sk_buff *);
737 
738 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
739 {
740 #ifdef CONFIG_NET_XGRESS
741         return skb->tc_at_ingress;
742 #else
743         return false;
744 #endif
745 }
746 
747 static inline bool skb_skip_tc_classify(struct sk_buff *skb)
748 {
749 #ifdef CONFIG_NET_CLS_ACT
750         if (skb->tc_skip_classify) {
751                 skb->tc_skip_classify = 0;
752                 return true;
753         }
754 #endif
755         return false;
756 }
757 
758 /* Reset all TX qdiscs greater than index of a device.  */
759 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
760 {
761         struct Qdisc *qdisc;
762 
763         for (; i < dev->num_tx_queues; i++) {
764                 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
765                 if (qdisc) {
766                         spin_lock_bh(qdisc_lock(qdisc));
767                         qdisc_reset(qdisc);
768                         spin_unlock_bh(qdisc_lock(qdisc));
769                 }
770         }
771 }
772 
773 /* Are all TX queues of the device empty?  */
774 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
775 {
776         unsigned int i;
777 
778         rcu_read_lock();
779         for (i = 0; i < dev->num_tx_queues; i++) {
780                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
781                 const struct Qdisc *q = rcu_dereference(txq->qdisc);
782 
783                 if (!qdisc_is_empty(q)) {
784                         rcu_read_unlock();
785                         return false;
786                 }
787         }
788         rcu_read_unlock();
789         return true;
790 }
791 
792 /* Are any of the TX qdiscs changing?  */
793 static inline bool qdisc_tx_changing(const struct net_device *dev)
794 {
795         unsigned int i;
796 
797         for (i = 0; i < dev->num_tx_queues; i++) {
798                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
799 
800                 if (rcu_access_pointer(txq->qdisc) !=
801                     rcu_access_pointer(txq->qdisc_sleeping))
802                         return true;
803         }
804         return false;
805 }
806 
807 /* Is the device using the noop qdisc on all queues?  */
808 static inline bool qdisc_tx_is_noop(const struct net_device *dev)
809 {
810         unsigned int i;
811 
812         for (i = 0; i < dev->num_tx_queues; i++) {
813                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
814                 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
815                         return false;
816         }
817         return true;
818 }
819 
820 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
821 {
822         return qdisc_skb_cb(skb)->pkt_len;
823 }
824 
825 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
826 enum net_xmit_qdisc_t {
827         __NET_XMIT_STOLEN = 0x00010000,
828         __NET_XMIT_BYPASS = 0x00020000,
829 };
830 
831 #ifdef CONFIG_NET_CLS_ACT
832 #define net_xmit_drop_count(e)  ((e) & __NET_XMIT_STOLEN ? 0 : 1)
833 #else
834 #define net_xmit_drop_count(e)  (1)
835 #endif
836 
837 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
838                                            const struct Qdisc *sch)
839 {
840 #ifdef CONFIG_NET_SCHED
841         struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
842 
843         if (stab)
844                 __qdisc_calculate_pkt_len(skb, stab);
845 #endif
846 }
847 
848 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
849                                 struct sk_buff **to_free)
850 {
851         return sch->enqueue(skb, sch, to_free);
852 }
853 
854 static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
855                                   __u64 bytes, __u32 packets)
856 {
857         u64_stats_update_begin(&bstats->syncp);
858         u64_stats_add(&bstats->bytes, bytes);
859         u64_stats_add(&bstats->packets, packets);
860         u64_stats_update_end(&bstats->syncp);
861 }
862 
863 static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
864                                  const struct sk_buff *skb)
865 {
866         _bstats_update(bstats,
867                        qdisc_pkt_len(skb),
868                        skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
869 }
870 
871 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
872                                            const struct sk_buff *skb)
873 {
874         bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
875 }
876 
877 static inline void qdisc_bstats_update(struct Qdisc *sch,
878                                        const struct sk_buff *skb)
879 {
880         bstats_update(&sch->bstats, skb);
881 }
882 
883 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
884                                             const struct sk_buff *skb)
885 {
886         sch->qstats.backlog -= qdisc_pkt_len(skb);
887 }
888 
889 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
890                                                 const struct sk_buff *skb)
891 {
892         this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
893 }
894 
895 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
896                                             const struct sk_buff *skb)
897 {
898         sch->qstats.backlog += qdisc_pkt_len(skb);
899 }
900 
901 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
902                                                 const struct sk_buff *skb)
903 {
904         this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
905 }
906 
907 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
908 {
909         this_cpu_inc(sch->cpu_qstats->qlen);
910 }
911 
912 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
913 {
914         this_cpu_dec(sch->cpu_qstats->qlen);
915 }
916 
917 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
918 {
919         this_cpu_inc(sch->cpu_qstats->requeues);
920 }
921 
922 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
923 {
924         sch->qstats.drops += count;
925 }
926 
927 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
928 {
929         qstats->drops++;
930 }
931 
932 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
933 {
934         qstats->overlimits++;
935 }
936 
937 static inline void qdisc_qstats_drop(struct Qdisc *sch)
938 {
939         qstats_drop_inc(&sch->qstats);
940 }
941 
942 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
943 {
944         this_cpu_inc(sch->cpu_qstats->drops);
945 }
946 
947 static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
948 {
949         sch->qstats.overlimits++;
950 }
951 
952 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
953 {
954         __u32 qlen = qdisc_qlen_sum(sch);
955 
956         return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
957 }
958 
959 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch,  __u32 *qlen,
960                                              __u32 *backlog)
961 {
962         struct gnet_stats_queue qstats = { 0 };
963 
964         gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
965         *qlen = qstats.qlen + qdisc_qlen(sch);
966         *backlog = qstats.backlog;
967 }
968 
969 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
970 {
971         __u32 qlen, backlog;
972 
973         qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
974         qdisc_tree_reduce_backlog(sch, qlen, backlog);
975 }
976 
977 static inline void qdisc_purge_queue(struct Qdisc *sch)
978 {
979         __u32 qlen, backlog;
980 
981         qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
982         qdisc_reset(sch);
983         qdisc_tree_reduce_backlog(sch, qlen, backlog);
984 }
985 
986 static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
987                                         struct qdisc_skb_head *qh)
988 {
989         struct sk_buff *last = qh->tail;
990 
991         if (last) {
992                 skb->next = NULL;
993                 last->next = skb;
994                 qh->tail = skb;
995         } else {
996                 qh->tail = skb;
997                 qh->head = skb;
998         }
999         qh->qlen++;
1000 }
1001 
1002 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
1003 {
1004         __qdisc_enqueue_tail(skb, &sch->q);
1005         qdisc_qstats_backlog_inc(sch, skb);
1006         return NET_XMIT_SUCCESS;
1007 }
1008 
1009 static inline void __qdisc_enqueue_head(struct sk_buff *skb,
1010                                         struct qdisc_skb_head *qh)
1011 {
1012         skb->next = qh->head;
1013 
1014         if (!qh->head)
1015                 qh->tail = skb;
1016         qh->head = skb;
1017         qh->qlen++;
1018 }
1019 
1020 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
1021 {
1022         struct sk_buff *skb = qh->head;
1023 
1024         if (likely(skb != NULL)) {
1025                 qh->head = skb->next;
1026                 qh->qlen--;
1027                 if (qh->head == NULL)
1028                         qh->tail = NULL;
1029                 skb->next = NULL;
1030         }
1031 
1032         return skb;
1033 }
1034 
1035 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
1036 {
1037         struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
1038 
1039         if (likely(skb != NULL)) {
1040                 qdisc_qstats_backlog_dec(sch, skb);
1041                 qdisc_bstats_update(sch, skb);
1042         }
1043 
1044         return skb;
1045 }
1046 
1047 struct tc_skb_cb {
1048         struct qdisc_skb_cb qdisc_cb;
1049         u32 drop_reason;
1050 
1051         u16 zone; /* Only valid if post_ct = true */
1052         u16 mru;
1053         u8 post_ct:1;
1054         u8 post_ct_snat:1;
1055         u8 post_ct_dnat:1;
1056 };
1057 
1058 static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
1059 {
1060         struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
1061 
1062         BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
1063         return cb;
1064 }
1065 
1066 static inline enum skb_drop_reason
1067 tcf_get_drop_reason(const struct sk_buff *skb)
1068 {
1069         return tc_skb_cb(skb)->drop_reason;
1070 }
1071 
1072 static inline void tcf_set_drop_reason(const struct sk_buff *skb,
1073                                        enum skb_drop_reason reason)
1074 {
1075         tc_skb_cb(skb)->drop_reason = reason;
1076 }
1077 
1078 /* Instead of calling kfree_skb() while root qdisc lock is held,
1079  * queue the skb for future freeing at end of __dev_xmit_skb()
1080  */
1081 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
1082 {
1083         skb->next = *to_free;
1084         *to_free = skb;
1085 }
1086 
1087 static inline void __qdisc_drop_all(struct sk_buff *skb,
1088                                     struct sk_buff **to_free)
1089 {
1090         if (skb->prev)
1091                 skb->prev->next = *to_free;
1092         else
1093                 skb->next = *to_free;
1094         *to_free = skb;
1095 }
1096 
1097 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
1098                                                    struct qdisc_skb_head *qh,
1099                                                    struct sk_buff **to_free)
1100 {
1101         struct sk_buff *skb = __qdisc_dequeue_head(qh);
1102 
1103         if (likely(skb != NULL)) {
1104                 unsigned int len = qdisc_pkt_len(skb);
1105 
1106                 qdisc_qstats_backlog_dec(sch, skb);
1107                 __qdisc_drop(skb, to_free);
1108                 return len;
1109         }
1110 
1111         return 0;
1112 }
1113 
1114 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
1115 {
1116         const struct qdisc_skb_head *qh = &sch->q;
1117 
1118         return qh->head;
1119 }
1120 
1121 /* generic pseudo peek method for non-work-conserving qdisc */
1122 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
1123 {
1124         struct sk_buff *skb = skb_peek(&sch->gso_skb);
1125 
1126         /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
1127         if (!skb) {
1128                 skb = sch->dequeue(sch);
1129 
1130                 if (skb) {
1131                         __skb_queue_head(&sch->gso_skb, skb);
1132                         /* it's still part of the queue */
1133                         qdisc_qstats_backlog_inc(sch, skb);
1134                         sch->q.qlen++;
1135                 }
1136         }
1137 
1138         return skb;
1139 }
1140 
1141 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
1142                                                  struct sk_buff *skb)
1143 {
1144         if (qdisc_is_percpu_stats(sch)) {
1145                 qdisc_qstats_cpu_backlog_dec(sch, skb);
1146                 qdisc_bstats_cpu_update(sch, skb);
1147                 qdisc_qstats_cpu_qlen_dec(sch);
1148         } else {
1149                 qdisc_qstats_backlog_dec(sch, skb);
1150                 qdisc_bstats_update(sch, skb);
1151                 sch->q.qlen--;
1152         }
1153 }
1154 
1155 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
1156                                                  unsigned int pkt_len)
1157 {
1158         if (qdisc_is_percpu_stats(sch)) {
1159                 qdisc_qstats_cpu_qlen_inc(sch);
1160                 this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
1161         } else {
1162                 sch->qstats.backlog += pkt_len;
1163                 sch->q.qlen++;
1164         }
1165 }
1166 
1167 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
1168 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
1169 {
1170         struct sk_buff *skb = skb_peek(&sch->gso_skb);
1171 
1172         if (skb) {
1173                 skb = __skb_dequeue(&sch->gso_skb);
1174                 if (qdisc_is_percpu_stats(sch)) {
1175                         qdisc_qstats_cpu_backlog_dec(sch, skb);
1176                         qdisc_qstats_cpu_qlen_dec(sch);
1177                 } else {
1178                         qdisc_qstats_backlog_dec(sch, skb);
1179                         sch->q.qlen--;
1180                 }
1181         } else {
1182                 skb = sch->dequeue(sch);
1183         }
1184 
1185         return skb;
1186 }
1187 
1188 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
1189 {
1190         /*
1191          * We do not know the backlog in bytes of this list, it
1192          * is up to the caller to correct it
1193          */
1194         ASSERT_RTNL();
1195         if (qh->qlen) {
1196                 rtnl_kfree_skbs(qh->head, qh->tail);
1197 
1198                 qh->head = NULL;
1199                 qh->tail = NULL;
1200                 qh->qlen = 0;
1201         }
1202 }
1203 
1204 static inline void qdisc_reset_queue(struct Qdisc *sch)
1205 {
1206         __qdisc_reset_queue(&sch->q);
1207 }
1208 
1209 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1210                                           struct Qdisc **pold)
1211 {
1212         struct Qdisc *old;
1213 
1214         sch_tree_lock(sch);
1215         old = *pold;
1216         *pold = new;
1217         if (old != NULL)
1218                 qdisc_purge_queue(old);
1219         sch_tree_unlock(sch);
1220 
1221         return old;
1222 }
1223 
1224 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
1225 {
1226         rtnl_kfree_skbs(skb, skb);
1227         qdisc_qstats_drop(sch);
1228 }
1229 
1230 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
1231                                  struct sk_buff **to_free)
1232 {
1233         __qdisc_drop(skb, to_free);
1234         qdisc_qstats_cpu_drop(sch);
1235 
1236         return NET_XMIT_DROP;
1237 }
1238 
1239 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
1240                              struct sk_buff **to_free)
1241 {
1242         __qdisc_drop(skb, to_free);
1243         qdisc_qstats_drop(sch);
1244 
1245         return NET_XMIT_DROP;
1246 }
1247 
1248 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
1249                                  struct sk_buff **to_free)
1250 {
1251         __qdisc_drop_all(skb, to_free);
1252         qdisc_qstats_drop(sch);
1253 
1254         return NET_XMIT_DROP;
1255 }
1256 
1257 struct psched_ratecfg {
1258         u64     rate_bytes_ps; /* bytes per second */
1259         u32     mult;
1260         u16     overhead;
1261         u16     mpu;
1262         u8      linklayer;
1263         u8      shift;
1264 };
1265 
1266 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
1267                                 unsigned int len)
1268 {
1269         len += r->overhead;
1270 
1271         if (len < r->mpu)
1272                 len = r->mpu;
1273 
1274         if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
1275                 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
1276 
1277         return ((u64)len * r->mult) >> r->shift;
1278 }
1279 
1280 void psched_ratecfg_precompute(struct psched_ratecfg *r,
1281                                const struct tc_ratespec *conf,
1282                                u64 rate64);
1283 
1284 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
1285                                           const struct psched_ratecfg *r)
1286 {
1287         memset(res, 0, sizeof(*res));
1288 
1289         /* legacy struct tc_ratespec has a 32bit @rate field
1290          * Qdisc using 64bit rate should add new attributes
1291          * in order to maintain compatibility.
1292          */
1293         res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
1294 
1295         res->overhead = r->overhead;
1296         res->mpu = r->mpu;
1297         res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
1298 }
1299 
1300 struct psched_pktrate {
1301         u64     rate_pkts_ps; /* packets per second */
1302         u32     mult;
1303         u8      shift;
1304 };
1305 
1306 static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r,
1307                                   unsigned int pkt_num)
1308 {
1309         return ((u64)pkt_num * r->mult) >> r->shift;
1310 }
1311 
1312 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
1313 
1314 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
1315  * The fast path only needs to access filter list and to update stats
1316  */
1317 struct mini_Qdisc {
1318         struct tcf_proto *filter_list;
1319         struct tcf_block *block;
1320         struct gnet_stats_basic_sync __percpu *cpu_bstats;
1321         struct gnet_stats_queue __percpu *cpu_qstats;
1322         unsigned long rcu_state;
1323 };
1324 
1325 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
1326                                                 const struct sk_buff *skb)
1327 {
1328         bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
1329 }
1330 
1331 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
1332 {
1333         this_cpu_inc(miniq->cpu_qstats->drops);
1334 }
1335 
1336 struct mini_Qdisc_pair {
1337         struct mini_Qdisc miniq1;
1338         struct mini_Qdisc miniq2;
1339         struct mini_Qdisc __rcu **p_miniq;
1340 };
1341 
1342 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1343                           struct tcf_proto *tp_head);
1344 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1345                           struct mini_Qdisc __rcu **p_miniq);
1346 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1347                                 struct tcf_block *block);
1348 
1349 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
1350 
1351 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
1352 
1353 /* Make sure qdisc is no longer in SCHED state. */
1354 static inline void qdisc_synchronize(const struct Qdisc *q)
1355 {
1356         while (test_bit(__QDISC_STATE_SCHED, &q->state))
1357                 msleep(1);
1358 }
1359 
1360 #endif
1361 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php