~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/core/netdev-genl.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 
  3 #include <linux/netdevice.h>
  4 #include <linux/notifier.h>
  5 #include <linux/rtnetlink.h>
  6 #include <net/net_namespace.h>
  7 #include <net/sock.h>
  8 #include <net/xdp.h>
  9 #include <net/xdp_sock.h>
 10 #include <net/netdev_rx_queue.h>
 11 #include <net/netdev_queues.h>
 12 #include <net/busy_poll.h>
 13 
 14 #include "netdev-genl-gen.h"
 15 #include "dev.h"
 16 
 17 struct netdev_nl_dump_ctx {
 18         unsigned long   ifindex;
 19         unsigned int    rxq_idx;
 20         unsigned int    txq_idx;
 21         unsigned int    napi_id;
 22 };
 23 
 24 static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb)
 25 {
 26         NL_ASSERT_DUMP_CTX_FITS(struct netdev_nl_dump_ctx);
 27 
 28         return (struct netdev_nl_dump_ctx *)cb->ctx;
 29 }
 30 
 31 static int
 32 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
 33                    const struct genl_info *info)
 34 {
 35         u64 xsk_features = 0;
 36         u64 xdp_rx_meta = 0;
 37         void *hdr;
 38 
 39         hdr = genlmsg_iput(rsp, info);
 40         if (!hdr)
 41                 return -EMSGSIZE;
 42 
 43 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \
 44         if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
 45                 xdp_rx_meta |= flag;
 46 XDP_METADATA_KFUNC_xxx
 47 #undef XDP_METADATA_KFUNC
 48 
 49         if (netdev->xsk_tx_metadata_ops) {
 50                 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp)
 51                         xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP;
 52                 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum)
 53                         xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM;
 54         }
 55 
 56         if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) ||
 57             nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES,
 58                               netdev->xdp_features, NETDEV_A_DEV_PAD) ||
 59             nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
 60                               xdp_rx_meta, NETDEV_A_DEV_PAD) ||
 61             nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
 62                               xsk_features, NETDEV_A_DEV_PAD))
 63                 goto err_cancel_msg;
 64 
 65         if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
 66                 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
 67                                 netdev->xdp_zc_max_segs))
 68                         goto err_cancel_msg;
 69         }
 70 
 71         genlmsg_end(rsp, hdr);
 72 
 73         return 0;
 74 
 75 err_cancel_msg:
 76         genlmsg_cancel(rsp, hdr);
 77         return -EMSGSIZE;
 78 }
 79 
 80 static void
 81 netdev_genl_dev_notify(struct net_device *netdev, int cmd)
 82 {
 83         struct genl_info info;
 84         struct sk_buff *ntf;
 85 
 86         if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev),
 87                                 NETDEV_NLGRP_MGMT))
 88                 return;
 89 
 90         genl_info_init_ntf(&info, &netdev_nl_family, cmd);
 91 
 92         ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
 93         if (!ntf)
 94                 return;
 95 
 96         if (netdev_nl_dev_fill(netdev, ntf, &info)) {
 97                 nlmsg_free(ntf);
 98                 return;
 99         }
100 
101         genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf,
102                                 0, NETDEV_NLGRP_MGMT, GFP_KERNEL);
103 }
104 
105 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
106 {
107         struct net_device *netdev;
108         struct sk_buff *rsp;
109         u32 ifindex;
110         int err;
111 
112         if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX))
113                 return -EINVAL;
114 
115         ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
116 
117         rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
118         if (!rsp)
119                 return -ENOMEM;
120 
121         rtnl_lock();
122 
123         netdev = __dev_get_by_index(genl_info_net(info), ifindex);
124         if (netdev)
125                 err = netdev_nl_dev_fill(netdev, rsp, info);
126         else
127                 err = -ENODEV;
128 
129         rtnl_unlock();
130 
131         if (err)
132                 goto err_free_msg;
133 
134         return genlmsg_reply(rsp, info);
135 
136 err_free_msg:
137         nlmsg_free(rsp);
138         return err;
139 }
140 
141 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
142 {
143         struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
144         struct net *net = sock_net(skb->sk);
145         struct net_device *netdev;
146         int err = 0;
147 
148         rtnl_lock();
149         for_each_netdev_dump(net, netdev, ctx->ifindex) {
150                 err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb));
151                 if (err < 0)
152                         break;
153         }
154         rtnl_unlock();
155 
156         return err;
157 }
158 
159 static int
160 netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
161                         const struct genl_info *info)
162 {
163         void *hdr;
164         pid_t pid;
165 
166         if (WARN_ON_ONCE(!napi->dev))
167                 return -EINVAL;
168         if (!(napi->dev->flags & IFF_UP))
169                 return 0;
170 
171         hdr = genlmsg_iput(rsp, info);
172         if (!hdr)
173                 return -EMSGSIZE;
174 
175         if (napi->napi_id >= MIN_NAPI_ID &&
176             nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
177                 goto nla_put_failure;
178 
179         if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
180                 goto nla_put_failure;
181 
182         if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq))
183                 goto nla_put_failure;
184 
185         if (napi->thread) {
186                 pid = task_pid_nr(napi->thread);
187                 if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid))
188                         goto nla_put_failure;
189         }
190 
191         genlmsg_end(rsp, hdr);
192 
193         return 0;
194 
195 nla_put_failure:
196         genlmsg_cancel(rsp, hdr);
197         return -EMSGSIZE;
198 }
199 
200 int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
201 {
202         struct napi_struct *napi;
203         struct sk_buff *rsp;
204         u32 napi_id;
205         int err;
206 
207         if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
208                 return -EINVAL;
209 
210         napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
211 
212         rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
213         if (!rsp)
214                 return -ENOMEM;
215 
216         rtnl_lock();
217 
218         napi = napi_by_id(napi_id);
219         if (napi) {
220                 err = netdev_nl_napi_fill_one(rsp, napi, info);
221         } else {
222                 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
223                 err = -ENOENT;
224         }
225 
226         rtnl_unlock();
227 
228         if (err)
229                 goto err_free_msg;
230 
231         return genlmsg_reply(rsp, info);
232 
233 err_free_msg:
234         nlmsg_free(rsp);
235         return err;
236 }
237 
238 static int
239 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
240                         const struct genl_info *info,
241                         struct netdev_nl_dump_ctx *ctx)
242 {
243         struct napi_struct *napi;
244         int err = 0;
245 
246         if (!(netdev->flags & IFF_UP))
247                 return err;
248 
249         list_for_each_entry(napi, &netdev->napi_list, dev_list) {
250                 if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
251                         continue;
252 
253                 err = netdev_nl_napi_fill_one(rsp, napi, info);
254                 if (err)
255                         return err;
256                 ctx->napi_id = napi->napi_id;
257         }
258         return err;
259 }
260 
261 int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
262 {
263         struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
264         const struct genl_info *info = genl_info_dump(cb);
265         struct net *net = sock_net(skb->sk);
266         struct net_device *netdev;
267         u32 ifindex = 0;
268         int err = 0;
269 
270         if (info->attrs[NETDEV_A_NAPI_IFINDEX])
271                 ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]);
272 
273         rtnl_lock();
274         if (ifindex) {
275                 netdev = __dev_get_by_index(net, ifindex);
276                 if (netdev)
277                         err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
278                 else
279                         err = -ENODEV;
280         } else {
281                 for_each_netdev_dump(net, netdev, ctx->ifindex) {
282                         err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
283                         if (err < 0)
284                                 break;
285                         ctx->napi_id = 0;
286                 }
287         }
288         rtnl_unlock();
289 
290         return err;
291 }
292 
293 static int
294 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
295                          u32 q_idx, u32 q_type, const struct genl_info *info)
296 {
297         struct netdev_rx_queue *rxq;
298         struct netdev_queue *txq;
299         void *hdr;
300 
301         hdr = genlmsg_iput(rsp, info);
302         if (!hdr)
303                 return -EMSGSIZE;
304 
305         if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) ||
306             nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) ||
307             nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex))
308                 goto nla_put_failure;
309 
310         switch (q_type) {
311         case NETDEV_QUEUE_TYPE_RX:
312                 rxq = __netif_get_rx_queue(netdev, q_idx);
313                 if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
314                                              rxq->napi->napi_id))
315                         goto nla_put_failure;
316                 break;
317         case NETDEV_QUEUE_TYPE_TX:
318                 txq = netdev_get_tx_queue(netdev, q_idx);
319                 if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
320                                              txq->napi->napi_id))
321                         goto nla_put_failure;
322         }
323 
324         genlmsg_end(rsp, hdr);
325 
326         return 0;
327 
328 nla_put_failure:
329         genlmsg_cancel(rsp, hdr);
330         return -EMSGSIZE;
331 }
332 
333 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id,
334                                     u32 q_type)
335 {
336         switch (q_type) {
337         case NETDEV_QUEUE_TYPE_RX:
338                 if (q_id >= netdev->real_num_rx_queues)
339                         return -EINVAL;
340                 return 0;
341         case NETDEV_QUEUE_TYPE_TX:
342                 if (q_id >= netdev->real_num_tx_queues)
343                         return -EINVAL;
344         }
345         return 0;
346 }
347 
348 static int
349 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
350                      u32 q_type, const struct genl_info *info)
351 {
352         int err = 0;
353 
354         if (!(netdev->flags & IFF_UP))
355                 return err;
356 
357         err = netdev_nl_queue_validate(netdev, q_idx, q_type);
358         if (err)
359                 return err;
360 
361         return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info);
362 }
363 
364 int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
365 {
366         u32 q_id, q_type, ifindex;
367         struct net_device *netdev;
368         struct sk_buff *rsp;
369         int err;
370 
371         if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) ||
372             GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) ||
373             GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX))
374                 return -EINVAL;
375 
376         q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]);
377         q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]);
378         ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
379 
380         rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
381         if (!rsp)
382                 return -ENOMEM;
383 
384         rtnl_lock();
385 
386         netdev = __dev_get_by_index(genl_info_net(info), ifindex);
387         if (netdev)
388                 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info);
389         else
390                 err = -ENODEV;
391 
392         rtnl_unlock();
393 
394         if (err)
395                 goto err_free_msg;
396 
397         return genlmsg_reply(rsp, info);
398 
399 err_free_msg:
400         nlmsg_free(rsp);
401         return err;
402 }
403 
404 static int
405 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
406                          const struct genl_info *info,
407                          struct netdev_nl_dump_ctx *ctx)
408 {
409         int err = 0;
410         int i;
411 
412         if (!(netdev->flags & IFF_UP))
413                 return err;
414 
415         for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) {
416                 err = netdev_nl_queue_fill_one(rsp, netdev, i,
417                                                NETDEV_QUEUE_TYPE_RX, info);
418                 if (err)
419                         return err;
420                 ctx->rxq_idx = i++;
421         }
422         for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) {
423                 err = netdev_nl_queue_fill_one(rsp, netdev, i,
424                                                NETDEV_QUEUE_TYPE_TX, info);
425                 if (err)
426                         return err;
427                 ctx->txq_idx = i++;
428         }
429 
430         return err;
431 }
432 
433 int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
434 {
435         struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
436         const struct genl_info *info = genl_info_dump(cb);
437         struct net *net = sock_net(skb->sk);
438         struct net_device *netdev;
439         u32 ifindex = 0;
440         int err = 0;
441 
442         if (info->attrs[NETDEV_A_QUEUE_IFINDEX])
443                 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
444 
445         rtnl_lock();
446         if (ifindex) {
447                 netdev = __dev_get_by_index(net, ifindex);
448                 if (netdev)
449                         err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
450                 else
451                         err = -ENODEV;
452         } else {
453                 for_each_netdev_dump(net, netdev, ctx->ifindex) {
454                         err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
455                         if (err < 0)
456                                 break;
457                         ctx->rxq_idx = 0;
458                         ctx->txq_idx = 0;
459                 }
460         }
461         rtnl_unlock();
462 
463         return err;
464 }
465 
466 #define NETDEV_STAT_NOT_SET             (~0ULL)
467 
468 static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
469 {
470         const u64 *add = _add;
471         u64 *sum = _sum;
472 
473         while (size) {
474                 if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
475                         *sum += *add;
476                 sum++;
477                 add++;
478                 size -= 8;
479         }
480 }
481 
482 static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
483 {
484         if (value == NETDEV_STAT_NOT_SET)
485                 return 0;
486         return nla_put_uint(rsp, attr_id, value);
487 }
488 
489 static int
490 netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
491 {
492         if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
493             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
494             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) ||
495             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) ||
496             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) ||
497             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) ||
498             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) ||
499             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) ||
500             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) ||
501             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) ||
502             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) ||
503             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) ||
504             netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits))
505                 return -EMSGSIZE;
506         return 0;
507 }
508 
509 static int
510 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
511 {
512         if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
513             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) ||
514             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) ||
515             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) ||
516             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) ||
517             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) ||
518             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) ||
519             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) ||
520             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) ||
521             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) ||
522             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) ||
523             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) ||
524             netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake))
525                 return -EMSGSIZE;
526         return 0;
527 }
528 
529 static int
530 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
531                       u32 q_type, int i, const struct genl_info *info)
532 {
533         const struct netdev_stat_ops *ops = netdev->stat_ops;
534         struct netdev_queue_stats_rx rx;
535         struct netdev_queue_stats_tx tx;
536         void *hdr;
537 
538         hdr = genlmsg_iput(rsp, info);
539         if (!hdr)
540                 return -EMSGSIZE;
541         if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) ||
542             nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) ||
543             nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i))
544                 goto nla_put_failure;
545 
546         switch (q_type) {
547         case NETDEV_QUEUE_TYPE_RX:
548                 memset(&rx, 0xff, sizeof(rx));
549                 ops->get_queue_stats_rx(netdev, i, &rx);
550                 if (!memchr_inv(&rx, 0xff, sizeof(rx)))
551                         goto nla_cancel;
552                 if (netdev_nl_stats_write_rx(rsp, &rx))
553                         goto nla_put_failure;
554                 break;
555         case NETDEV_QUEUE_TYPE_TX:
556                 memset(&tx, 0xff, sizeof(tx));
557                 ops->get_queue_stats_tx(netdev, i, &tx);
558                 if (!memchr_inv(&tx, 0xff, sizeof(tx)))
559                         goto nla_cancel;
560                 if (netdev_nl_stats_write_tx(rsp, &tx))
561                         goto nla_put_failure;
562                 break;
563         }
564 
565         genlmsg_end(rsp, hdr);
566         return 0;
567 
568 nla_cancel:
569         genlmsg_cancel(rsp, hdr);
570         return 0;
571 nla_put_failure:
572         genlmsg_cancel(rsp, hdr);
573         return -EMSGSIZE;
574 }
575 
576 static int
577 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
578                          const struct genl_info *info,
579                          struct netdev_nl_dump_ctx *ctx)
580 {
581         const struct netdev_stat_ops *ops = netdev->stat_ops;
582         int i, err;
583 
584         if (!(netdev->flags & IFF_UP))
585                 return 0;
586 
587         i = ctx->rxq_idx;
588         while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
589                 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX,
590                                             i, info);
591                 if (err)
592                         return err;
593                 ctx->rxq_idx = i++;
594         }
595         i = ctx->txq_idx;
596         while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
597                 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX,
598                                             i, info);
599                 if (err)
600                         return err;
601                 ctx->txq_idx = i++;
602         }
603 
604         ctx->rxq_idx = 0;
605         ctx->txq_idx = 0;
606         return 0;
607 }
608 
609 static int
610 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
611                           const struct genl_info *info)
612 {
613         struct netdev_queue_stats_rx rx_sum, rx;
614         struct netdev_queue_stats_tx tx_sum, tx;
615         const struct netdev_stat_ops *ops;
616         void *hdr;
617         int i;
618 
619         ops = netdev->stat_ops;
620         /* Netdev can't guarantee any complete counters */
621         if (!ops->get_base_stats)
622                 return 0;
623 
624         memset(&rx_sum, 0xff, sizeof(rx_sum));
625         memset(&tx_sum, 0xff, sizeof(tx_sum));
626 
627         ops->get_base_stats(netdev, &rx_sum, &tx_sum);
628 
629         /* The op was there, but nothing reported, don't bother */
630         if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
631             !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum)))
632                 return 0;
633 
634         hdr = genlmsg_iput(rsp, info);
635         if (!hdr)
636                 return -EMSGSIZE;
637         if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
638                 goto nla_put_failure;
639 
640         for (i = 0; i < netdev->real_num_rx_queues; i++) {
641                 memset(&rx, 0xff, sizeof(rx));
642                 if (ops->get_queue_stats_rx)
643                         ops->get_queue_stats_rx(netdev, i, &rx);
644                 netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
645         }
646         for (i = 0; i < netdev->real_num_tx_queues; i++) {
647                 memset(&tx, 0xff, sizeof(tx));
648                 if (ops->get_queue_stats_tx)
649                         ops->get_queue_stats_tx(netdev, i, &tx);
650                 netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
651         }
652 
653         if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
654             netdev_nl_stats_write_tx(rsp, &tx_sum))
655                 goto nla_put_failure;
656 
657         genlmsg_end(rsp, hdr);
658         return 0;
659 
660 nla_put_failure:
661         genlmsg_cancel(rsp, hdr);
662         return -EMSGSIZE;
663 }
664 
665 static int
666 netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope,
667                               struct sk_buff *skb, const struct genl_info *info,
668                               struct netdev_nl_dump_ctx *ctx)
669 {
670         if (!netdev->stat_ops)
671                 return 0;
672 
673         switch (scope) {
674         case 0:
675                 return netdev_nl_stats_by_netdev(netdev, skb, info);
676         case NETDEV_QSTATS_SCOPE_QUEUE:
677                 return netdev_nl_stats_by_queue(netdev, skb, info, ctx);
678         }
679 
680         return -EINVAL; /* Should not happen, per netlink policy */
681 }
682 
683 int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
684                                 struct netlink_callback *cb)
685 {
686         struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
687         const struct genl_info *info = genl_info_dump(cb);
688         struct net *net = sock_net(skb->sk);
689         struct net_device *netdev;
690         unsigned int ifindex;
691         unsigned int scope;
692         int err = 0;
693 
694         scope = 0;
695         if (info->attrs[NETDEV_A_QSTATS_SCOPE])
696                 scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
697 
698         ifindex = 0;
699         if (info->attrs[NETDEV_A_QSTATS_IFINDEX])
700                 ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]);
701 
702         rtnl_lock();
703         if (ifindex) {
704                 netdev = __dev_get_by_index(net, ifindex);
705                 if (netdev && netdev->stat_ops) {
706                         err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
707                                                             info, ctx);
708                 } else {
709                         NL_SET_BAD_ATTR(info->extack,
710                                         info->attrs[NETDEV_A_QSTATS_IFINDEX]);
711                         err = netdev ? -EOPNOTSUPP : -ENODEV;
712                 }
713         } else {
714                 for_each_netdev_dump(net, netdev, ctx->ifindex) {
715                         err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
716                                                             info, ctx);
717                         if (err < 0)
718                                 break;
719                 }
720         }
721         rtnl_unlock();
722 
723         return err;
724 }
725 
726 static int netdev_genl_netdevice_event(struct notifier_block *nb,
727                                        unsigned long event, void *ptr)
728 {
729         struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
730 
731         switch (event) {
732         case NETDEV_REGISTER:
733                 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF);
734                 break;
735         case NETDEV_UNREGISTER:
736                 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF);
737                 break;
738         case NETDEV_XDP_FEAT_CHANGE:
739                 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF);
740                 break;
741         }
742 
743         return NOTIFY_OK;
744 }
745 
746 static struct notifier_block netdev_genl_nb = {
747         .notifier_call  = netdev_genl_netdevice_event,
748 };
749 
750 static int __init netdev_genl_init(void)
751 {
752         int err;
753 
754         err = register_netdevice_notifier(&netdev_genl_nb);
755         if (err)
756                 return err;
757 
758         err = genl_register_family(&netdev_nl_family);
759         if (err)
760                 goto err_unreg_ntf;
761 
762         return 0;
763 
764 err_unreg_ntf:
765         unregister_netdevice_notifier(&netdev_genl_nb);
766         return err;
767 }
768 
769 subsys_initcall(netdev_genl_init);
770 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php