1 /* SPDX-License-Identifier: GPL-2.0 */ 1 2 #ifndef _LINUX_NET_QUEUES_H 3 #define _LINUX_NET_QUEUES_H 4 5 #include <linux/netdevice.h> 6 7 /* See the netdev.yaml spec for definition of 8 struct netdev_queue_stats_rx { 9 u64 bytes; 10 u64 packets; 11 u64 alloc_fail; 12 13 u64 hw_drops; 14 u64 hw_drop_overruns; 15 16 u64 csum_unnecessary; 17 u64 csum_none; 18 u64 csum_bad; 19 20 u64 hw_gro_packets; 21 u64 hw_gro_bytes; 22 u64 hw_gro_wire_packets; 23 u64 hw_gro_wire_bytes; 24 25 u64 hw_drop_ratelimits; 26 }; 27 28 struct netdev_queue_stats_tx { 29 u64 bytes; 30 u64 packets; 31 32 u64 hw_drops; 33 u64 hw_drop_errors; 34 35 u64 csum_none; 36 u64 needs_csum; 37 38 u64 hw_gso_packets; 39 u64 hw_gso_bytes; 40 u64 hw_gso_wire_packets; 41 u64 hw_gso_wire_bytes; 42 43 u64 hw_drop_ratelimits; 44 45 u64 stop; 46 u64 wake; 47 }; 48 49 /** 50 * struct netdev_stat_ops - netdev ops for fin 51 * @get_queue_stats_rx: get stats for a given 52 * @get_queue_stats_tx: get stats for a given 53 * @get_base_stats: get base stats (not be 54 * 55 * Query stats for a given object. The values 56 * on entry (specifically they are *not* zero- 57 * assign values only to the statistics they c 58 * collected must be left undefined. 59 * 60 * Queue objects are not necessarily persisten 61 * queues are queried by the per-queue callbac 62 * statistics will not generally add up to the 63 * the device. The @get_base_stats callback al 64 * between events for currently live queues an 65 * @get_base_stats can also be used to report 66 * transferred outside of the main set of queu 67 * When the statistics for the entire device a 68 * is issued to collect the delta, and then a 69 * Only statistics which are set in @get_base_ 70 * at the device level, meaning that unlike in 71 * a statistic to zero in @get_base_stats is a 72 * This is because @get_base_stats has a secon 73 * statistics are in fact correct for the enti 74 * for some of the events is not maintained, a 75 * be provided). 76 * 77 * Device drivers can assume that when collect 78 * the @get_base_stats and subsequent per-queu 79 * "atomically" (without releasing the rtnl_lo 80 * 81 * Device drivers are encouraged to reset the 82 * number of queues change. This is because th 83 * per-queue statistics is currently to detect 84 */ 85 struct netdev_stat_ops { 86 void (*get_queue_stats_rx)(struct net_ 87 struct netd 88 void (*get_queue_stats_tx)(struct net_ 89 struct netd 90 void (*get_base_stats)(struct net_devi 91 struct netdev_q 92 struct netdev_q 93 }; 94 95 /** 96 * struct netdev_queue_mgmt_ops - netdev ops f 97 * 98 * @ndo_queue_mem_size: Size of the struct tha 99 * 100 * @ndo_queue_mem_alloc: Allocate memory for a 101 * The new memory is wri 102 * 103 * @ndo_queue_mem_free: Free memory from an RX 104 * 105 * @ndo_queue_start: Start an RX queue with 106 * specified index. 107 * 108 * @ndo_queue_stop: Stop the RX queue at t 109 * queue's memory is writ 110 */ 111 struct netdev_queue_mgmt_ops { 112 size_t ndo_queue_mem_ 113 int (*ndo_queue_me 114 115 116 void (*ndo_queue_me 117 118 int (*ndo_queue_st 119 120 121 int (*ndo_queue_st 122 123 124 }; 125 126 /** 127 * DOC: Lockless queue stopping / waking helpe 128 * 129 * The netif_txq_maybe_stop() and __netif_txq_ 130 * macros are designed to safely implement sto 131 * and waking netdev queues without full lock 132 * 133 * We assume that there can be no concurrent s 134 * wake attempts. The try-stop should happen f 135 * while wake up should be triggered from NAPI 136 * The two may run concurrently (single produc 137 * 138 * The try-stop side is expected to run from t 139 * it does not reschedule Tx (netif_tx_start_q 140 * netif_tx_wake_queue()). Uses of the ``stop` 141 * handler may lead to xmit queue being enable 142 * The waking side does not have similar conte 143 * 144 * The macros guarantee that rings will not re 145 * space available, but they do *not* prevent 146 * the ring is full! Drivers should check for 147 * for the xmit handler. 148 * 149 * All descriptor ring indexes (and other rele 150 * be updated before invoking the macros. 151 */ 152 153 #define netif_txq_try_stop(txq, get_desc, star 154 ({ 155 int _res; 156 157 netif_tx_stop_queue(txq); 158 /* Producer index and stop bit 159 * to consumer before we reche 160 * Pairs with a barrier in __n 161 */ 162 smp_mb__after_atomic(); 163 164 /* We need to check again in a 165 * CPU has just made room avai 166 */ 167 _res = 0; 168 if (unlikely(get_desc >= start 169 netif_tx_start_queue(t 170 _res = -1; 171 } 172 _res; 173 }) 174 175 /** 176 * netif_txq_maybe_stop() - locklessly stop a 177 * @txq: struct netdev_queue to stop/st 178 * @get_desc: get current number of free des 179 * @stop_thrs: minimal number of available de 180 * enabled 181 * @start_thrs: minimal number of descriptors 182 * equal to @stop_thrs or higher 183 * 184 * All arguments may be evaluated multiple tim 185 * @get_desc must be a formula or a function c 186 * return up-to-date information when evaluate 187 * Expected to be used from ndo_start_xmit, se 188 * 189 * Returns: 190 * 0 if the queue was stopped 191 * 1 if the queue was left enabled 192 * -1 if the queue was re-enabled (raced 193 */ 194 #define netif_txq_maybe_stop(txq, get_desc, st 195 ({ 196 int _res; 197 198 _res = 1; 199 if (unlikely(get_desc < stop_t 200 _res = netif_txq_try_s 201 _res; 202 }) 203 204 /* Variant of netdev_tx_completed_queue() whic 205 * @bytes != 0, regardless of kernel config. 206 */ 207 static inline void 208 netdev_txq_completed_mb(struct netdev_queue *d 209 unsigned int pkts, uns 210 { 211 if (IS_ENABLED(CONFIG_BQL)) 212 netdev_tx_completed_queue(dev_ 213 else if (bytes) 214 smp_mb(); 215 } 216 217 /** 218 * __netif_txq_completed_wake() - locklessly w 219 * @txq: struct netdev_queue to stop/st 220 * @pkts: number of packets completed 221 * @bytes: number of bytes completed 222 * @get_desc: get current number of free des 223 * @start_thrs: minimal number of descriptors 224 * @down_cond: down condition, predicate indi 225 * not be woken up even if descri 226 * 227 * All arguments may be evaluated multiple tim 228 * @get_desc must be a formula or a function c 229 * return up-to-date information when evaluate 230 * Reports completed pkts/bytes to BQL. 231 * 232 * Returns: 233 * 0 if the queue was woken up 234 * 1 if the queue was already enabled (o 235 * -1 if the queue was left unchanged (@s 236 */ 237 #define __netif_txq_completed_wake(txq, pkts, 238 get_desc, s 239 ({ 240 int _res; 241 242 /* Report to BQL and piggy bac 243 * Barrier makes sure that any 244 * after this point sees the n 245 * Pairs with barrier in netif 246 */ 247 netdev_txq_completed_mb(txq, p 248 249 _res = -1; 250 if (pkts && likely(get_desc >= 251 _res = 1; 252 if (unlikely(netif_tx_ 253 !(down_cond)) { 254 netif_tx_wake_ 255 _res = 0; 256 } 257 } 258 _res; 259 }) 260 261 #define netif_txq_completed_wake(txq, pkts, by 262 __netif_txq_completed_wake(txq, pkts, 263 264 /* subqueue variants follow */ 265 266 #define netif_subqueue_try_stop(dev, idx, get_ 267 ({ 268 struct netdev_queue *txq; 269 270 txq = netdev_get_tx_queue(dev, 271 netif_txq_try_stop(txq, get_de 272 }) 273 274 #define netif_subqueue_maybe_stop(dev, idx, ge 275 ({ 276 struct netdev_queue *txq; 277 278 txq = netdev_get_tx_queue(dev, 279 netif_txq_maybe_stop(txq, get_ 280 }) 281 282 #define netif_subqueue_completed_wake(dev, idx 283 get_desc 284 ({ 285 struct netdev_queue *txq; 286 287 txq = netdev_get_tx_queue(dev, 288 netif_txq_completed_wake(txq, 289 get_d 290 }) 291 292 #endif 293
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.