1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net-sysfs.c - network device class and attributes 4 * 5 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> 6 */ 7 8 #include <linux/capability.h> 9 #include <linux/kernel.h> 10 #include <linux/netdevice.h> 11 #include <linux/if_arp.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/sched/isolation.h> 15 #include <linux/nsproxy.h> 16 #include <net/sock.h> 17 #include <net/net_namespace.h> 18 #include <linux/rtnetlink.h> 19 #include <linux/vmalloc.h> 20 #include <linux/export.h> 21 #include <linux/jiffies.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/of.h> 24 #include <linux/of_net.h> 25 #include <linux/cpu.h> 26 #include <net/netdev_rx_queue.h> 27 #include <net/rps.h> 28 29 #include "dev.h" 30 #include "net-sysfs.h" 31 32 #ifdef CONFIG_SYSFS 33 static const char fmt_hex[] = "%#x\n"; 34 static const char fmt_dec[] = "%d\n"; 35 static const char fmt_uint[] = "%u\n"; 36 static const char fmt_ulong[] = "%lu\n"; 37 static const char fmt_u64[] = "%llu\n"; 38 39 /* Caller holds RTNL or RCU */ 40 static inline int dev_isalive(const struct net_device *dev) 41 { 42 return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED; 43 } 44 45 /* use same locking rules as GIF* ioctl's */ 46 static ssize_t netdev_show(const struct device *dev, 47 struct device_attribute *attr, char *buf, 48 ssize_t (*format)(const struct net_device *, char *)) 49 { 50 struct net_device *ndev = to_net_dev(dev); 51 ssize_t ret = -EINVAL; 52 53 rcu_read_lock(); 54 if (dev_isalive(ndev)) 55 ret = (*format)(ndev, buf); 56 rcu_read_unlock(); 57 58 return ret; 59 } 60 61 /* generate a show function for simple field */ 62 #define NETDEVICE_SHOW(field, format_string) \ 63 static ssize_t format_##field(const struct net_device *dev, char *buf) \ 64 { \ 65 return sysfs_emit(buf, format_string, READ_ONCE(dev->field)); \ 66 } \ 67 static ssize_t field##_show(struct device *dev, \ 68 struct device_attribute *attr, char *buf) \ 69 { \ 70 return netdev_show(dev, attr, buf, format_##field); \ 71 } \ 72 73 #define NETDEVICE_SHOW_RO(field, format_string) \ 74 NETDEVICE_SHOW(field, format_string); \ 75 static DEVICE_ATTR_RO(field) 76 77 #define NETDEVICE_SHOW_RW(field, format_string) \ 78 NETDEVICE_SHOW(field, format_string); \ 79 static DEVICE_ATTR_RW(field) 80 81 /* use same locking and permission rules as SIF* ioctl's */ 82 static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, 83 const char *buf, size_t len, 84 int (*set)(struct net_device *, unsigned long)) 85 { 86 struct net_device *netdev = to_net_dev(dev); 87 struct net *net = dev_net(netdev); 88 unsigned long new; 89 int ret; 90 91 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 92 return -EPERM; 93 94 ret = kstrtoul(buf, 0, &new); 95 if (ret) 96 goto err; 97 98 if (!rtnl_trylock()) 99 return restart_syscall(); 100 101 if (dev_isalive(netdev)) { 102 ret = (*set)(netdev, new); 103 if (ret == 0) 104 ret = len; 105 } 106 rtnl_unlock(); 107 err: 108 return ret; 109 } 110 111 NETDEVICE_SHOW_RO(dev_id, fmt_hex); 112 NETDEVICE_SHOW_RO(dev_port, fmt_dec); 113 NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); 114 NETDEVICE_SHOW_RO(addr_len, fmt_dec); 115 NETDEVICE_SHOW_RO(ifindex, fmt_dec); 116 NETDEVICE_SHOW_RO(type, fmt_dec); 117 NETDEVICE_SHOW_RO(link_mode, fmt_dec); 118 119 static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, 120 char *buf) 121 { 122 struct net_device *ndev = to_net_dev(dev); 123 124 return sysfs_emit(buf, fmt_dec, dev_get_iflink(ndev)); 125 } 126 static DEVICE_ATTR_RO(iflink); 127 128 static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) 129 { 130 return sysfs_emit(buf, fmt_dec, READ_ONCE(dev->name_assign_type)); 131 } 132 133 static ssize_t name_assign_type_show(struct device *dev, 134 struct device_attribute *attr, 135 char *buf) 136 { 137 struct net_device *ndev = to_net_dev(dev); 138 ssize_t ret = -EINVAL; 139 140 if (READ_ONCE(ndev->name_assign_type) != NET_NAME_UNKNOWN) 141 ret = netdev_show(dev, attr, buf, format_name_assign_type); 142 143 return ret; 144 } 145 static DEVICE_ATTR_RO(name_assign_type); 146 147 /* use same locking rules as GIFHWADDR ioctl's (dev_get_mac_address()) */ 148 static ssize_t address_show(struct device *dev, struct device_attribute *attr, 149 char *buf) 150 { 151 struct net_device *ndev = to_net_dev(dev); 152 ssize_t ret = -EINVAL; 153 154 down_read(&dev_addr_sem); 155 156 rcu_read_lock(); 157 if (dev_isalive(ndev)) 158 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len); 159 rcu_read_unlock(); 160 161 up_read(&dev_addr_sem); 162 return ret; 163 } 164 static DEVICE_ATTR_RO(address); 165 166 static ssize_t broadcast_show(struct device *dev, 167 struct device_attribute *attr, char *buf) 168 { 169 struct net_device *ndev = to_net_dev(dev); 170 int ret = -EINVAL; 171 172 rcu_read_lock(); 173 if (dev_isalive(ndev)) 174 ret = sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); 175 rcu_read_unlock(); 176 return ret; 177 } 178 static DEVICE_ATTR_RO(broadcast); 179 180 static int change_carrier(struct net_device *dev, unsigned long new_carrier) 181 { 182 if (!netif_running(dev)) 183 return -EINVAL; 184 return dev_change_carrier(dev, (bool)new_carrier); 185 } 186 187 static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, 188 const char *buf, size_t len) 189 { 190 struct net_device *netdev = to_net_dev(dev); 191 192 /* The check is also done in change_carrier; this helps returning early 193 * without hitting the trylock/restart in netdev_store. 194 */ 195 if (!netdev->netdev_ops->ndo_change_carrier) 196 return -EOPNOTSUPP; 197 198 return netdev_store(dev, attr, buf, len, change_carrier); 199 } 200 201 static ssize_t carrier_show(struct device *dev, 202 struct device_attribute *attr, char *buf) 203 { 204 struct net_device *netdev = to_net_dev(dev); 205 int ret = -EINVAL; 206 207 if (!rtnl_trylock()) 208 return restart_syscall(); 209 210 if (netif_running(netdev)) { 211 /* Synchronize carrier state with link watch, 212 * see also rtnl_getlink(). 213 */ 214 linkwatch_sync_dev(netdev); 215 216 ret = sysfs_emit(buf, fmt_dec, !!netif_carrier_ok(netdev)); 217 } 218 rtnl_unlock(); 219 220 return ret; 221 } 222 static DEVICE_ATTR_RW(carrier); 223 224 static ssize_t speed_show(struct device *dev, 225 struct device_attribute *attr, char *buf) 226 { 227 struct net_device *netdev = to_net_dev(dev); 228 int ret = -EINVAL; 229 230 /* The check is also done in __ethtool_get_link_ksettings; this helps 231 * returning early without hitting the trylock/restart below. 232 */ 233 if (!netdev->ethtool_ops->get_link_ksettings) 234 return ret; 235 236 if (!rtnl_trylock()) 237 return restart_syscall(); 238 239 if (netif_running(netdev)) { 240 struct ethtool_link_ksettings cmd; 241 242 if (!__ethtool_get_link_ksettings(netdev, &cmd)) 243 ret = sysfs_emit(buf, fmt_dec, cmd.base.speed); 244 } 245 rtnl_unlock(); 246 return ret; 247 } 248 static DEVICE_ATTR_RO(speed); 249 250 static ssize_t duplex_show(struct device *dev, 251 struct device_attribute *attr, char *buf) 252 { 253 struct net_device *netdev = to_net_dev(dev); 254 int ret = -EINVAL; 255 256 /* The check is also done in __ethtool_get_link_ksettings; this helps 257 * returning early without hitting the trylock/restart below. 258 */ 259 if (!netdev->ethtool_ops->get_link_ksettings) 260 return ret; 261 262 if (!rtnl_trylock()) 263 return restart_syscall(); 264 265 if (netif_running(netdev)) { 266 struct ethtool_link_ksettings cmd; 267 268 if (!__ethtool_get_link_ksettings(netdev, &cmd)) { 269 const char *duplex; 270 271 switch (cmd.base.duplex) { 272 case DUPLEX_HALF: 273 duplex = "half"; 274 break; 275 case DUPLEX_FULL: 276 duplex = "full"; 277 break; 278 default: 279 duplex = "unknown"; 280 break; 281 } 282 ret = sysfs_emit(buf, "%s\n", duplex); 283 } 284 } 285 rtnl_unlock(); 286 return ret; 287 } 288 static DEVICE_ATTR_RO(duplex); 289 290 static ssize_t testing_show(struct device *dev, 291 struct device_attribute *attr, char *buf) 292 { 293 struct net_device *netdev = to_net_dev(dev); 294 295 if (netif_running(netdev)) 296 return sysfs_emit(buf, fmt_dec, !!netif_testing(netdev)); 297 298 return -EINVAL; 299 } 300 static DEVICE_ATTR_RO(testing); 301 302 static ssize_t dormant_show(struct device *dev, 303 struct device_attribute *attr, char *buf) 304 { 305 struct net_device *netdev = to_net_dev(dev); 306 307 if (netif_running(netdev)) 308 return sysfs_emit(buf, fmt_dec, !!netif_dormant(netdev)); 309 310 return -EINVAL; 311 } 312 static DEVICE_ATTR_RO(dormant); 313 314 static const char *const operstates[] = { 315 "unknown", 316 "notpresent", /* currently unused */ 317 "down", 318 "lowerlayerdown", 319 "testing", 320 "dormant", 321 "up" 322 }; 323 324 static ssize_t operstate_show(struct device *dev, 325 struct device_attribute *attr, char *buf) 326 { 327 const struct net_device *netdev = to_net_dev(dev); 328 unsigned char operstate; 329 330 operstate = READ_ONCE(netdev->operstate); 331 if (!netif_running(netdev)) 332 operstate = IF_OPER_DOWN; 333 334 if (operstate >= ARRAY_SIZE(operstates)) 335 return -EINVAL; /* should not happen */ 336 337 return sysfs_emit(buf, "%s\n", operstates[operstate]); 338 } 339 static DEVICE_ATTR_RO(operstate); 340 341 static ssize_t carrier_changes_show(struct device *dev, 342 struct device_attribute *attr, 343 char *buf) 344 { 345 struct net_device *netdev = to_net_dev(dev); 346 347 return sysfs_emit(buf, fmt_dec, 348 atomic_read(&netdev->carrier_up_count) + 349 atomic_read(&netdev->carrier_down_count)); 350 } 351 static DEVICE_ATTR_RO(carrier_changes); 352 353 static ssize_t carrier_up_count_show(struct device *dev, 354 struct device_attribute *attr, 355 char *buf) 356 { 357 struct net_device *netdev = to_net_dev(dev); 358 359 return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_up_count)); 360 } 361 static DEVICE_ATTR_RO(carrier_up_count); 362 363 static ssize_t carrier_down_count_show(struct device *dev, 364 struct device_attribute *attr, 365 char *buf) 366 { 367 struct net_device *netdev = to_net_dev(dev); 368 369 return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_down_count)); 370 } 371 static DEVICE_ATTR_RO(carrier_down_count); 372 373 /* read-write attributes */ 374 375 static int change_mtu(struct net_device *dev, unsigned long new_mtu) 376 { 377 return dev_set_mtu(dev, (int)new_mtu); 378 } 379 380 static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, 381 const char *buf, size_t len) 382 { 383 return netdev_store(dev, attr, buf, len, change_mtu); 384 } 385 NETDEVICE_SHOW_RW(mtu, fmt_dec); 386 387 static int change_flags(struct net_device *dev, unsigned long new_flags) 388 { 389 return dev_change_flags(dev, (unsigned int)new_flags, NULL); 390 } 391 392 static ssize_t flags_store(struct device *dev, struct device_attribute *attr, 393 const char *buf, size_t len) 394 { 395 return netdev_store(dev, attr, buf, len, change_flags); 396 } 397 NETDEVICE_SHOW_RW(flags, fmt_hex); 398 399 static ssize_t tx_queue_len_store(struct device *dev, 400 struct device_attribute *attr, 401 const char *buf, size_t len) 402 { 403 if (!capable(CAP_NET_ADMIN)) 404 return -EPERM; 405 406 return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len); 407 } 408 NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); 409 410 static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) 411 { 412 WRITE_ONCE(dev->gro_flush_timeout, val); 413 return 0; 414 } 415 416 static ssize_t gro_flush_timeout_store(struct device *dev, 417 struct device_attribute *attr, 418 const char *buf, size_t len) 419 { 420 if (!capable(CAP_NET_ADMIN)) 421 return -EPERM; 422 423 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout); 424 } 425 NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); 426 427 static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val) 428 { 429 if (val > S32_MAX) 430 return -ERANGE; 431 432 WRITE_ONCE(dev->napi_defer_hard_irqs, val); 433 return 0; 434 } 435 436 static ssize_t napi_defer_hard_irqs_store(struct device *dev, 437 struct device_attribute *attr, 438 const char *buf, size_t len) 439 { 440 if (!capable(CAP_NET_ADMIN)) 441 return -EPERM; 442 443 return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs); 444 } 445 NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_uint); 446 447 static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, 448 const char *buf, size_t len) 449 { 450 struct net_device *netdev = to_net_dev(dev); 451 struct net *net = dev_net(netdev); 452 size_t count = len; 453 ssize_t ret = 0; 454 455 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 456 return -EPERM; 457 458 /* ignore trailing newline */ 459 if (len > 0 && buf[len - 1] == '\n') 460 --count; 461 462 if (!rtnl_trylock()) 463 return restart_syscall(); 464 465 if (dev_isalive(netdev)) { 466 ret = dev_set_alias(netdev, buf, count); 467 if (ret < 0) 468 goto err; 469 ret = len; 470 netdev_state_change(netdev); 471 } 472 err: 473 rtnl_unlock(); 474 475 return ret; 476 } 477 478 static ssize_t ifalias_show(struct device *dev, 479 struct device_attribute *attr, char *buf) 480 { 481 const struct net_device *netdev = to_net_dev(dev); 482 char tmp[IFALIASZ]; 483 ssize_t ret = 0; 484 485 ret = dev_get_alias(netdev, tmp, sizeof(tmp)); 486 if (ret > 0) 487 ret = sysfs_emit(buf, "%s\n", tmp); 488 return ret; 489 } 490 static DEVICE_ATTR_RW(ifalias); 491 492 static int change_group(struct net_device *dev, unsigned long new_group) 493 { 494 dev_set_group(dev, (int)new_group); 495 return 0; 496 } 497 498 static ssize_t group_store(struct device *dev, struct device_attribute *attr, 499 const char *buf, size_t len) 500 { 501 return netdev_store(dev, attr, buf, len, change_group); 502 } 503 NETDEVICE_SHOW(group, fmt_dec); 504 static DEVICE_ATTR(netdev_group, 0644, group_show, group_store); 505 506 static int change_proto_down(struct net_device *dev, unsigned long proto_down) 507 { 508 return dev_change_proto_down(dev, (bool)proto_down); 509 } 510 511 static ssize_t proto_down_store(struct device *dev, 512 struct device_attribute *attr, 513 const char *buf, size_t len) 514 { 515 return netdev_store(dev, attr, buf, len, change_proto_down); 516 } 517 NETDEVICE_SHOW_RW(proto_down, fmt_dec); 518 519 static ssize_t phys_port_id_show(struct device *dev, 520 struct device_attribute *attr, char *buf) 521 { 522 struct net_device *netdev = to_net_dev(dev); 523 ssize_t ret = -EINVAL; 524 525 /* The check is also done in dev_get_phys_port_id; this helps returning 526 * early without hitting the trylock/restart below. 527 */ 528 if (!netdev->netdev_ops->ndo_get_phys_port_id) 529 return -EOPNOTSUPP; 530 531 if (!rtnl_trylock()) 532 return restart_syscall(); 533 534 if (dev_isalive(netdev)) { 535 struct netdev_phys_item_id ppid; 536 537 ret = dev_get_phys_port_id(netdev, &ppid); 538 if (!ret) 539 ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); 540 } 541 rtnl_unlock(); 542 543 return ret; 544 } 545 static DEVICE_ATTR_RO(phys_port_id); 546 547 static ssize_t phys_port_name_show(struct device *dev, 548 struct device_attribute *attr, char *buf) 549 { 550 struct net_device *netdev = to_net_dev(dev); 551 ssize_t ret = -EINVAL; 552 553 /* The checks are also done in dev_get_phys_port_name; this helps 554 * returning early without hitting the trylock/restart below. 555 */ 556 if (!netdev->netdev_ops->ndo_get_phys_port_name && 557 !netdev->devlink_port) 558 return -EOPNOTSUPP; 559 560 if (!rtnl_trylock()) 561 return restart_syscall(); 562 563 if (dev_isalive(netdev)) { 564 char name[IFNAMSIZ]; 565 566 ret = dev_get_phys_port_name(netdev, name, sizeof(name)); 567 if (!ret) 568 ret = sysfs_emit(buf, "%s\n", name); 569 } 570 rtnl_unlock(); 571 572 return ret; 573 } 574 static DEVICE_ATTR_RO(phys_port_name); 575 576 static ssize_t phys_switch_id_show(struct device *dev, 577 struct device_attribute *attr, char *buf) 578 { 579 struct net_device *netdev = to_net_dev(dev); 580 ssize_t ret = -EINVAL; 581 582 /* The checks are also done in dev_get_phys_port_name; this helps 583 * returning early without hitting the trylock/restart below. This works 584 * because recurse is false when calling dev_get_port_parent_id. 585 */ 586 if (!netdev->netdev_ops->ndo_get_port_parent_id && 587 !netdev->devlink_port) 588 return -EOPNOTSUPP; 589 590 if (!rtnl_trylock()) 591 return restart_syscall(); 592 593 if (dev_isalive(netdev)) { 594 struct netdev_phys_item_id ppid = { }; 595 596 ret = dev_get_port_parent_id(netdev, &ppid, false); 597 if (!ret) 598 ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); 599 } 600 rtnl_unlock(); 601 602 return ret; 603 } 604 static DEVICE_ATTR_RO(phys_switch_id); 605 606 static ssize_t threaded_show(struct device *dev, 607 struct device_attribute *attr, char *buf) 608 { 609 struct net_device *netdev = to_net_dev(dev); 610 ssize_t ret = -EINVAL; 611 612 rcu_read_lock(); 613 614 if (dev_isalive(netdev)) 615 ret = sysfs_emit(buf, fmt_dec, READ_ONCE(netdev->threaded)); 616 617 rcu_read_unlock(); 618 619 return ret; 620 } 621 622 static int modify_napi_threaded(struct net_device *dev, unsigned long val) 623 { 624 int ret; 625 626 if (list_empty(&dev->napi_list)) 627 return -EOPNOTSUPP; 628 629 if (val != 0 && val != 1) 630 return -EOPNOTSUPP; 631 632 ret = dev_set_threaded(dev, val); 633 634 return ret; 635 } 636 637 static ssize_t threaded_store(struct device *dev, 638 struct device_attribute *attr, 639 const char *buf, size_t len) 640 { 641 return netdev_store(dev, attr, buf, len, modify_napi_threaded); 642 } 643 static DEVICE_ATTR_RW(threaded); 644 645 static struct attribute *net_class_attrs[] __ro_after_init = { 646 &dev_attr_netdev_group.attr, 647 &dev_attr_type.attr, 648 &dev_attr_dev_id.attr, 649 &dev_attr_dev_port.attr, 650 &dev_attr_iflink.attr, 651 &dev_attr_ifindex.attr, 652 &dev_attr_name_assign_type.attr, 653 &dev_attr_addr_assign_type.attr, 654 &dev_attr_addr_len.attr, 655 &dev_attr_link_mode.attr, 656 &dev_attr_address.attr, 657 &dev_attr_broadcast.attr, 658 &dev_attr_speed.attr, 659 &dev_attr_duplex.attr, 660 &dev_attr_dormant.attr, 661 &dev_attr_testing.attr, 662 &dev_attr_operstate.attr, 663 &dev_attr_carrier_changes.attr, 664 &dev_attr_ifalias.attr, 665 &dev_attr_carrier.attr, 666 &dev_attr_mtu.attr, 667 &dev_attr_flags.attr, 668 &dev_attr_tx_queue_len.attr, 669 &dev_attr_gro_flush_timeout.attr, 670 &dev_attr_napi_defer_hard_irqs.attr, 671 &dev_attr_phys_port_id.attr, 672 &dev_attr_phys_port_name.attr, 673 &dev_attr_phys_switch_id.attr, 674 &dev_attr_proto_down.attr, 675 &dev_attr_carrier_up_count.attr, 676 &dev_attr_carrier_down_count.attr, 677 &dev_attr_threaded.attr, 678 NULL, 679 }; 680 ATTRIBUTE_GROUPS(net_class); 681 682 /* Show a given an attribute in the statistics group */ 683 static ssize_t netstat_show(const struct device *d, 684 struct device_attribute *attr, char *buf, 685 unsigned long offset) 686 { 687 struct net_device *dev = to_net_dev(d); 688 ssize_t ret = -EINVAL; 689 690 WARN_ON(offset > sizeof(struct rtnl_link_stats64) || 691 offset % sizeof(u64) != 0); 692 693 rcu_read_lock(); 694 if (dev_isalive(dev)) { 695 struct rtnl_link_stats64 temp; 696 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 697 698 ret = sysfs_emit(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); 699 } 700 rcu_read_unlock(); 701 return ret; 702 } 703 704 /* generate a read-only statistics attribute */ 705 #define NETSTAT_ENTRY(name) \ 706 static ssize_t name##_show(struct device *d, \ 707 struct device_attribute *attr, char *buf) \ 708 { \ 709 return netstat_show(d, attr, buf, \ 710 offsetof(struct rtnl_link_stats64, name)); \ 711 } \ 712 static DEVICE_ATTR_RO(name) 713 714 NETSTAT_ENTRY(rx_packets); 715 NETSTAT_ENTRY(tx_packets); 716 NETSTAT_ENTRY(rx_bytes); 717 NETSTAT_ENTRY(tx_bytes); 718 NETSTAT_ENTRY(rx_errors); 719 NETSTAT_ENTRY(tx_errors); 720 NETSTAT_ENTRY(rx_dropped); 721 NETSTAT_ENTRY(tx_dropped); 722 NETSTAT_ENTRY(multicast); 723 NETSTAT_ENTRY(collisions); 724 NETSTAT_ENTRY(rx_length_errors); 725 NETSTAT_ENTRY(rx_over_errors); 726 NETSTAT_ENTRY(rx_crc_errors); 727 NETSTAT_ENTRY(rx_frame_errors); 728 NETSTAT_ENTRY(rx_fifo_errors); 729 NETSTAT_ENTRY(rx_missed_errors); 730 NETSTAT_ENTRY(tx_aborted_errors); 731 NETSTAT_ENTRY(tx_carrier_errors); 732 NETSTAT_ENTRY(tx_fifo_errors); 733 NETSTAT_ENTRY(tx_heartbeat_errors); 734 NETSTAT_ENTRY(tx_window_errors); 735 NETSTAT_ENTRY(rx_compressed); 736 NETSTAT_ENTRY(tx_compressed); 737 NETSTAT_ENTRY(rx_nohandler); 738 739 static struct attribute *netstat_attrs[] __ro_after_init = { 740 &dev_attr_rx_packets.attr, 741 &dev_attr_tx_packets.attr, 742 &dev_attr_rx_bytes.attr, 743 &dev_attr_tx_bytes.attr, 744 &dev_attr_rx_errors.attr, 745 &dev_attr_tx_errors.attr, 746 &dev_attr_rx_dropped.attr, 747 &dev_attr_tx_dropped.attr, 748 &dev_attr_multicast.attr, 749 &dev_attr_collisions.attr, 750 &dev_attr_rx_length_errors.attr, 751 &dev_attr_rx_over_errors.attr, 752 &dev_attr_rx_crc_errors.attr, 753 &dev_attr_rx_frame_errors.attr, 754 &dev_attr_rx_fifo_errors.attr, 755 &dev_attr_rx_missed_errors.attr, 756 &dev_attr_tx_aborted_errors.attr, 757 &dev_attr_tx_carrier_errors.attr, 758 &dev_attr_tx_fifo_errors.attr, 759 &dev_attr_tx_heartbeat_errors.attr, 760 &dev_attr_tx_window_errors.attr, 761 &dev_attr_rx_compressed.attr, 762 &dev_attr_tx_compressed.attr, 763 &dev_attr_rx_nohandler.attr, 764 NULL 765 }; 766 767 static const struct attribute_group netstat_group = { 768 .name = "statistics", 769 .attrs = netstat_attrs, 770 }; 771 772 static struct attribute *wireless_attrs[] = { 773 NULL 774 }; 775 776 static const struct attribute_group wireless_group = { 777 .name = "wireless", 778 .attrs = wireless_attrs, 779 }; 780 781 static bool wireless_group_needed(struct net_device *ndev) 782 { 783 #if IS_ENABLED(CONFIG_CFG80211) 784 if (ndev->ieee80211_ptr) 785 return true; 786 #endif 787 #if IS_ENABLED(CONFIG_WIRELESS_EXT) 788 if (ndev->wireless_handlers) 789 return true; 790 #endif 791 return false; 792 } 793 794 #else /* CONFIG_SYSFS */ 795 #define net_class_groups NULL 796 #endif /* CONFIG_SYSFS */ 797 798 #ifdef CONFIG_SYSFS 799 #define to_rx_queue_attr(_attr) \ 800 container_of(_attr, struct rx_queue_attribute, attr) 801 802 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) 803 804 static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, 805 char *buf) 806 { 807 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); 808 struct netdev_rx_queue *queue = to_rx_queue(kobj); 809 810 if (!attribute->show) 811 return -EIO; 812 813 return attribute->show(queue, buf); 814 } 815 816 static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, 817 const char *buf, size_t count) 818 { 819 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); 820 struct netdev_rx_queue *queue = to_rx_queue(kobj); 821 822 if (!attribute->store) 823 return -EIO; 824 825 return attribute->store(queue, buf, count); 826 } 827 828 static const struct sysfs_ops rx_queue_sysfs_ops = { 829 .show = rx_queue_attr_show, 830 .store = rx_queue_attr_store, 831 }; 832 833 #ifdef CONFIG_RPS 834 static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) 835 { 836 struct rps_map *map; 837 cpumask_var_t mask; 838 int i, len; 839 840 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 841 return -ENOMEM; 842 843 rcu_read_lock(); 844 map = rcu_dereference(queue->rps_map); 845 if (map) 846 for (i = 0; i < map->len; i++) 847 cpumask_set_cpu(map->cpus[i], mask); 848 849 len = sysfs_emit(buf, "%*pb\n", cpumask_pr_args(mask)); 850 rcu_read_unlock(); 851 free_cpumask_var(mask); 852 853 return len < PAGE_SIZE ? len : -EINVAL; 854 } 855 856 static int netdev_rx_queue_set_rps_mask(struct netdev_rx_queue *queue, 857 cpumask_var_t mask) 858 { 859 static DEFINE_MUTEX(rps_map_mutex); 860 struct rps_map *old_map, *map; 861 int cpu, i; 862 863 map = kzalloc(max_t(unsigned int, 864 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), 865 GFP_KERNEL); 866 if (!map) 867 return -ENOMEM; 868 869 i = 0; 870 for_each_cpu_and(cpu, mask, cpu_online_mask) 871 map->cpus[i++] = cpu; 872 873 if (i) { 874 map->len = i; 875 } else { 876 kfree(map); 877 map = NULL; 878 } 879 880 mutex_lock(&rps_map_mutex); 881 old_map = rcu_dereference_protected(queue->rps_map, 882 mutex_is_locked(&rps_map_mutex)); 883 rcu_assign_pointer(queue->rps_map, map); 884 885 if (map) 886 static_branch_inc(&rps_needed); 887 if (old_map) 888 static_branch_dec(&rps_needed); 889 890 mutex_unlock(&rps_map_mutex); 891 892 if (old_map) 893 kfree_rcu(old_map, rcu); 894 return 0; 895 } 896 897 int rps_cpumask_housekeeping(struct cpumask *mask) 898 { 899 if (!cpumask_empty(mask)) { 900 cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_DOMAIN)); 901 cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_WQ)); 902 if (cpumask_empty(mask)) 903 return -EINVAL; 904 } 905 return 0; 906 } 907 908 static ssize_t store_rps_map(struct netdev_rx_queue *queue, 909 const char *buf, size_t len) 910 { 911 cpumask_var_t mask; 912 int err; 913 914 if (!capable(CAP_NET_ADMIN)) 915 return -EPERM; 916 917 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 918 return -ENOMEM; 919 920 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); 921 if (err) 922 goto out; 923 924 err = rps_cpumask_housekeeping(mask); 925 if (err) 926 goto out; 927 928 err = netdev_rx_queue_set_rps_mask(queue, mask); 929 930 out: 931 free_cpumask_var(mask); 932 return err ? : len; 933 } 934 935 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, 936 char *buf) 937 { 938 struct rps_dev_flow_table *flow_table; 939 unsigned long val = 0; 940 941 rcu_read_lock(); 942 flow_table = rcu_dereference(queue->rps_flow_table); 943 if (flow_table) 944 val = (unsigned long)flow_table->mask + 1; 945 rcu_read_unlock(); 946 947 return sysfs_emit(buf, "%lu\n", val); 948 } 949 950 static void rps_dev_flow_table_release(struct rcu_head *rcu) 951 { 952 struct rps_dev_flow_table *table = container_of(rcu, 953 struct rps_dev_flow_table, rcu); 954 vfree(table); 955 } 956 957 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, 958 const char *buf, size_t len) 959 { 960 unsigned long mask, count; 961 struct rps_dev_flow_table *table, *old_table; 962 static DEFINE_SPINLOCK(rps_dev_flow_lock); 963 int rc; 964 965 if (!capable(CAP_NET_ADMIN)) 966 return -EPERM; 967 968 rc = kstrtoul(buf, 0, &count); 969 if (rc < 0) 970 return rc; 971 972 if (count) { 973 mask = count - 1; 974 /* mask = roundup_pow_of_two(count) - 1; 975 * without overflows... 976 */ 977 while ((mask | (mask >> 1)) != mask) 978 mask |= (mask >> 1); 979 /* On 64 bit arches, must check mask fits in table->mask (u32), 980 * and on 32bit arches, must check 981 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow. 982 */ 983 #if BITS_PER_LONG > 32 984 if (mask > (unsigned long)(u32)mask) 985 return -EINVAL; 986 #else 987 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) 988 / sizeof(struct rps_dev_flow)) { 989 /* Enforce a limit to prevent overflow */ 990 return -EINVAL; 991 } 992 #endif 993 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); 994 if (!table) 995 return -ENOMEM; 996 997 table->mask = mask; 998 for (count = 0; count <= mask; count++) 999 table->flows[count].cpu = RPS_NO_CPU; 1000 } else { 1001 table = NULL; 1002 } 1003 1004 spin_lock(&rps_dev_flow_lock); 1005 old_table = rcu_dereference_protected(queue->rps_flow_table, 1006 lockdep_is_held(&rps_dev_flow_lock)); 1007 rcu_assign_pointer(queue->rps_flow_table, table); 1008 spin_unlock(&rps_dev_flow_lock); 1009 1010 if (old_table) 1011 call_rcu(&old_table->rcu, rps_dev_flow_table_release); 1012 1013 return len; 1014 } 1015 1016 static struct rx_queue_attribute rps_cpus_attribute __ro_after_init 1017 = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map); 1018 1019 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init 1020 = __ATTR(rps_flow_cnt, 0644, 1021 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); 1022 #endif /* CONFIG_RPS */ 1023 1024 static struct attribute *rx_queue_default_attrs[] __ro_after_init = { 1025 #ifdef CONFIG_RPS 1026 &rps_cpus_attribute.attr, 1027 &rps_dev_flow_table_cnt_attribute.attr, 1028 #endif 1029 NULL 1030 }; 1031 ATTRIBUTE_GROUPS(rx_queue_default); 1032 1033 static void rx_queue_release(struct kobject *kobj) 1034 { 1035 struct netdev_rx_queue *queue = to_rx_queue(kobj); 1036 #ifdef CONFIG_RPS 1037 struct rps_map *map; 1038 struct rps_dev_flow_table *flow_table; 1039 1040 map = rcu_dereference_protected(queue->rps_map, 1); 1041 if (map) { 1042 RCU_INIT_POINTER(queue->rps_map, NULL); 1043 kfree_rcu(map, rcu); 1044 } 1045 1046 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); 1047 if (flow_table) { 1048 RCU_INIT_POINTER(queue->rps_flow_table, NULL); 1049 call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 1050 } 1051 #endif 1052 1053 memset(kobj, 0, sizeof(*kobj)); 1054 netdev_put(queue->dev, &queue->dev_tracker); 1055 } 1056 1057 static const void *rx_queue_namespace(const struct kobject *kobj) 1058 { 1059 struct netdev_rx_queue *queue = to_rx_queue(kobj); 1060 struct device *dev = &queue->dev->dev; 1061 const void *ns = NULL; 1062 1063 if (dev->class && dev->class->ns_type) 1064 ns = dev->class->namespace(dev); 1065 1066 return ns; 1067 } 1068 1069 static void rx_queue_get_ownership(const struct kobject *kobj, 1070 kuid_t *uid, kgid_t *gid) 1071 { 1072 const struct net *net = rx_queue_namespace(kobj); 1073 1074 net_ns_get_ownership(net, uid, gid); 1075 } 1076 1077 static const struct kobj_type rx_queue_ktype = { 1078 .sysfs_ops = &rx_queue_sysfs_ops, 1079 .release = rx_queue_release, 1080 .default_groups = rx_queue_default_groups, 1081 .namespace = rx_queue_namespace, 1082 .get_ownership = rx_queue_get_ownership, 1083 }; 1084 1085 static int rx_queue_default_mask(struct net_device *dev, 1086 struct netdev_rx_queue *queue) 1087 { 1088 #if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL) 1089 struct cpumask *rps_default_mask = READ_ONCE(dev_net(dev)->core.rps_default_mask); 1090 1091 if (rps_default_mask && !cpumask_empty(rps_default_mask)) 1092 return netdev_rx_queue_set_rps_mask(queue, rps_default_mask); 1093 #endif 1094 return 0; 1095 } 1096 1097 static int rx_queue_add_kobject(struct net_device *dev, int index) 1098 { 1099 struct netdev_rx_queue *queue = dev->_rx + index; 1100 struct kobject *kobj = &queue->kobj; 1101 int error = 0; 1102 1103 /* Kobject_put later will trigger rx_queue_release call which 1104 * decreases dev refcount: Take that reference here 1105 */ 1106 netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL); 1107 1108 kobj->kset = dev->queues_kset; 1109 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, 1110 "rx-%u", index); 1111 if (error) 1112 goto err; 1113 1114 if (dev->sysfs_rx_queue_group) { 1115 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); 1116 if (error) 1117 goto err; 1118 } 1119 1120 error = rx_queue_default_mask(dev, queue); 1121 if (error) 1122 goto err; 1123 1124 kobject_uevent(kobj, KOBJ_ADD); 1125 1126 return error; 1127 1128 err: 1129 kobject_put(kobj); 1130 return error; 1131 } 1132 1133 static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid, 1134 kgid_t kgid) 1135 { 1136 struct netdev_rx_queue *queue = dev->_rx + index; 1137 struct kobject *kobj = &queue->kobj; 1138 int error; 1139 1140 error = sysfs_change_owner(kobj, kuid, kgid); 1141 if (error) 1142 return error; 1143 1144 if (dev->sysfs_rx_queue_group) 1145 error = sysfs_group_change_owner( 1146 kobj, dev->sysfs_rx_queue_group, kuid, kgid); 1147 1148 return error; 1149 } 1150 #endif /* CONFIG_SYSFS */ 1151 1152 int 1153 net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) 1154 { 1155 #ifdef CONFIG_SYSFS 1156 int i; 1157 int error = 0; 1158 1159 #ifndef CONFIG_RPS 1160 if (!dev->sysfs_rx_queue_group) 1161 return 0; 1162 #endif 1163 for (i = old_num; i < new_num; i++) { 1164 error = rx_queue_add_kobject(dev, i); 1165 if (error) { 1166 new_num = old_num; 1167 break; 1168 } 1169 } 1170 1171 while (--i >= new_num) { 1172 struct kobject *kobj = &dev->_rx[i].kobj; 1173 1174 if (!refcount_read(&dev_net(dev)->ns.count)) 1175 kobj->uevent_suppress = 1; 1176 if (dev->sysfs_rx_queue_group) 1177 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); 1178 kobject_put(kobj); 1179 } 1180 1181 return error; 1182 #else 1183 return 0; 1184 #endif 1185 } 1186 1187 static int net_rx_queue_change_owner(struct net_device *dev, int num, 1188 kuid_t kuid, kgid_t kgid) 1189 { 1190 #ifdef CONFIG_SYSFS 1191 int error = 0; 1192 int i; 1193 1194 #ifndef CONFIG_RPS 1195 if (!dev->sysfs_rx_queue_group) 1196 return 0; 1197 #endif 1198 for (i = 0; i < num; i++) { 1199 error = rx_queue_change_owner(dev, i, kuid, kgid); 1200 if (error) 1201 break; 1202 } 1203 1204 return error; 1205 #else 1206 return 0; 1207 #endif 1208 } 1209 1210 #ifdef CONFIG_SYSFS 1211 /* 1212 * netdev_queue sysfs structures and functions. 1213 */ 1214 struct netdev_queue_attribute { 1215 struct attribute attr; 1216 ssize_t (*show)(struct netdev_queue *queue, char *buf); 1217 ssize_t (*store)(struct netdev_queue *queue, 1218 const char *buf, size_t len); 1219 }; 1220 #define to_netdev_queue_attr(_attr) \ 1221 container_of(_attr, struct netdev_queue_attribute, attr) 1222 1223 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) 1224 1225 static ssize_t netdev_queue_attr_show(struct kobject *kobj, 1226 struct attribute *attr, char *buf) 1227 { 1228 const struct netdev_queue_attribute *attribute 1229 = to_netdev_queue_attr(attr); 1230 struct netdev_queue *queue = to_netdev_queue(kobj); 1231 1232 if (!attribute->show) 1233 return -EIO; 1234 1235 return attribute->show(queue, buf); 1236 } 1237 1238 static ssize_t netdev_queue_attr_store(struct kobject *kobj, 1239 struct attribute *attr, 1240 const char *buf, size_t count) 1241 { 1242 const struct netdev_queue_attribute *attribute 1243 = to_netdev_queue_attr(attr); 1244 struct netdev_queue *queue = to_netdev_queue(kobj); 1245 1246 if (!attribute->store) 1247 return -EIO; 1248 1249 return attribute->store(queue, buf, count); 1250 } 1251 1252 static const struct sysfs_ops netdev_queue_sysfs_ops = { 1253 .show = netdev_queue_attr_show, 1254 .store = netdev_queue_attr_store, 1255 }; 1256 1257 static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) 1258 { 1259 unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout); 1260 1261 return sysfs_emit(buf, fmt_ulong, trans_timeout); 1262 } 1263 1264 static unsigned int get_netdev_queue_index(struct netdev_queue *queue) 1265 { 1266 struct net_device *dev = queue->dev; 1267 unsigned int i; 1268 1269 i = queue - dev->_tx; 1270 BUG_ON(i >= dev->num_tx_queues); 1271 1272 return i; 1273 } 1274 1275 static ssize_t traffic_class_show(struct netdev_queue *queue, 1276 char *buf) 1277 { 1278 struct net_device *dev = queue->dev; 1279 int num_tc, tc; 1280 int index; 1281 1282 if (!netif_is_multiqueue(dev)) 1283 return -ENOENT; 1284 1285 if (!rtnl_trylock()) 1286 return restart_syscall(); 1287 1288 index = get_netdev_queue_index(queue); 1289 1290 /* If queue belongs to subordinate dev use its TC mapping */ 1291 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 1292 1293 num_tc = dev->num_tc; 1294 tc = netdev_txq_to_tc(dev, index); 1295 1296 rtnl_unlock(); 1297 1298 if (tc < 0) 1299 return -EINVAL; 1300 1301 /* We can report the traffic class one of two ways: 1302 * Subordinate device traffic classes are reported with the traffic 1303 * class first, and then the subordinate class so for example TC0 on 1304 * subordinate device 2 will be reported as "0-2". If the queue 1305 * belongs to the root device it will be reported with just the 1306 * traffic class, so just "" for TC 0 for example. 1307 */ 1308 return num_tc < 0 ? sysfs_emit(buf, "%d%d\n", tc, num_tc) : 1309 sysfs_emit(buf, "%d\n", tc); 1310 } 1311 1312 #ifdef CONFIG_XPS 1313 static ssize_t tx_maxrate_show(struct netdev_queue *queue, 1314 char *buf) 1315 { 1316 return sysfs_emit(buf, "%lu\n", queue->tx_maxrate); 1317 } 1318 1319 static ssize_t tx_maxrate_store(struct netdev_queue *queue, 1320 const char *buf, size_t len) 1321 { 1322 struct net_device *dev = queue->dev; 1323 int err, index = get_netdev_queue_index(queue); 1324 u32 rate = 0; 1325 1326 if (!capable(CAP_NET_ADMIN)) 1327 return -EPERM; 1328 1329 /* The check is also done later; this helps returning early without 1330 * hitting the trylock/restart below. 1331 */ 1332 if (!dev->netdev_ops->ndo_set_tx_maxrate) 1333 return -EOPNOTSUPP; 1334 1335 err = kstrtou32(buf, 10, &rate); 1336 if (err < 0) 1337 return err; 1338 1339 if (!rtnl_trylock()) 1340 return restart_syscall(); 1341 1342 err = -EOPNOTSUPP; 1343 if (dev->netdev_ops->ndo_set_tx_maxrate) 1344 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); 1345 1346 rtnl_unlock(); 1347 if (!err) { 1348 queue->tx_maxrate = rate; 1349 return len; 1350 } 1351 return err; 1352 } 1353 1354 static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init 1355 = __ATTR_RW(tx_maxrate); 1356 #endif 1357 1358 static struct netdev_queue_attribute queue_trans_timeout __ro_after_init 1359 = __ATTR_RO(tx_timeout); 1360 1361 static struct netdev_queue_attribute queue_traffic_class __ro_after_init 1362 = __ATTR_RO(traffic_class); 1363 1364 #ifdef CONFIG_BQL 1365 /* 1366 * Byte queue limits sysfs structures and functions. 1367 */ 1368 static ssize_t bql_show(char *buf, unsigned int value) 1369 { 1370 return sysfs_emit(buf, "%u\n", value); 1371 } 1372 1373 static ssize_t bql_set(const char *buf, const size_t count, 1374 unsigned int *pvalue) 1375 { 1376 unsigned int value; 1377 int err; 1378 1379 if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) { 1380 value = DQL_MAX_LIMIT; 1381 } else { 1382 err = kstrtouint(buf, 10, &value); 1383 if (err < 0) 1384 return err; 1385 if (value > DQL_MAX_LIMIT) 1386 return -EINVAL; 1387 } 1388 1389 *pvalue = value; 1390 1391 return count; 1392 } 1393 1394 static ssize_t bql_show_hold_time(struct netdev_queue *queue, 1395 char *buf) 1396 { 1397 struct dql *dql = &queue->dql; 1398 1399 return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); 1400 } 1401 1402 static ssize_t bql_set_hold_time(struct netdev_queue *queue, 1403 const char *buf, size_t len) 1404 { 1405 struct dql *dql = &queue->dql; 1406 unsigned int value; 1407 int err; 1408 1409 err = kstrtouint(buf, 10, &value); 1410 if (err < 0) 1411 return err; 1412 1413 dql->slack_hold_time = msecs_to_jiffies(value); 1414 1415 return len; 1416 } 1417 1418 static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init 1419 = __ATTR(hold_time, 0644, 1420 bql_show_hold_time, bql_set_hold_time); 1421 1422 static ssize_t bql_show_stall_thrs(struct netdev_queue *queue, char *buf) 1423 { 1424 struct dql *dql = &queue->dql; 1425 1426 return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->stall_thrs)); 1427 } 1428 1429 static ssize_t bql_set_stall_thrs(struct netdev_queue *queue, 1430 const char *buf, size_t len) 1431 { 1432 struct dql *dql = &queue->dql; 1433 unsigned int value; 1434 int err; 1435 1436 err = kstrtouint(buf, 10, &value); 1437 if (err < 0) 1438 return err; 1439 1440 value = msecs_to_jiffies(value); 1441 if (value && (value < 4 || value > 4 / 2 * BITS_PER_LONG)) 1442 return -ERANGE; 1443 1444 if (!dql->stall_thrs && value) 1445 dql->last_reap = jiffies; 1446 /* Force last_reap to be live */ 1447 smp_wmb(); 1448 dql->stall_thrs = value; 1449 1450 return len; 1451 } 1452 1453 static struct netdev_queue_attribute bql_stall_thrs_attribute __ro_after_init = 1454 __ATTR(stall_thrs, 0644, bql_show_stall_thrs, bql_set_stall_thrs); 1455 1456 static ssize_t bql_show_stall_max(struct netdev_queue *queue, char *buf) 1457 { 1458 return sysfs_emit(buf, "%u\n", READ_ONCE(queue->dql.stall_max)); 1459 } 1460 1461 static ssize_t bql_set_stall_max(struct netdev_queue *queue, 1462 const char *buf, size_t len) 1463 { 1464 WRITE_ONCE(queue->dql.stall_max, 0); 1465 return len; 1466 } 1467 1468 static struct netdev_queue_attribute bql_stall_max_attribute __ro_after_init = 1469 __ATTR(stall_max, 0644, bql_show_stall_max, bql_set_stall_max); 1470 1471 static ssize_t bql_show_stall_cnt(struct netdev_queue *queue, char *buf) 1472 { 1473 struct dql *dql = &queue->dql; 1474 1475 return sysfs_emit(buf, "%lu\n", dql->stall_cnt); 1476 } 1477 1478 static struct netdev_queue_attribute bql_stall_cnt_attribute __ro_after_init = 1479 __ATTR(stall_cnt, 0444, bql_show_stall_cnt, NULL); 1480 1481 static ssize_t bql_show_inflight(struct netdev_queue *queue, 1482 char *buf) 1483 { 1484 struct dql *dql = &queue->dql; 1485 1486 return sysfs_emit(buf, "%u\n", dql->num_queued - dql->num_completed); 1487 } 1488 1489 static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init = 1490 __ATTR(inflight, 0444, bql_show_inflight, NULL); 1491 1492 #define BQL_ATTR(NAME, FIELD) \ 1493 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ 1494 char *buf) \ 1495 { \ 1496 return bql_show(buf, queue->dql.FIELD); \ 1497 } \ 1498 \ 1499 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ 1500 const char *buf, size_t len) \ 1501 { \ 1502 return bql_set(buf, len, &queue->dql.FIELD); \ 1503 } \ 1504 \ 1505 static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \ 1506 = __ATTR(NAME, 0644, \ 1507 bql_show_ ## NAME, bql_set_ ## NAME) 1508 1509 BQL_ATTR(limit, limit); 1510 BQL_ATTR(limit_max, max_limit); 1511 BQL_ATTR(limit_min, min_limit); 1512 1513 static struct attribute *dql_attrs[] __ro_after_init = { 1514 &bql_limit_attribute.attr, 1515 &bql_limit_max_attribute.attr, 1516 &bql_limit_min_attribute.attr, 1517 &bql_hold_time_attribute.attr, 1518 &bql_inflight_attribute.attr, 1519 &bql_stall_thrs_attribute.attr, 1520 &bql_stall_cnt_attribute.attr, 1521 &bql_stall_max_attribute.attr, 1522 NULL 1523 }; 1524 1525 static const struct attribute_group dql_group = { 1526 .name = "byte_queue_limits", 1527 .attrs = dql_attrs, 1528 }; 1529 #else 1530 /* Fake declaration, all the code using it should be dead */ 1531 static const struct attribute_group dql_group = {}; 1532 #endif /* CONFIG_BQL */ 1533 1534 #ifdef CONFIG_XPS 1535 static ssize_t xps_queue_show(struct net_device *dev, unsigned int index, 1536 int tc, char *buf, enum xps_map_type type) 1537 { 1538 struct xps_dev_maps *dev_maps; 1539 unsigned long *mask; 1540 unsigned int nr_ids; 1541 int j, len; 1542 1543 rcu_read_lock(); 1544 dev_maps = rcu_dereference(dev->xps_maps[type]); 1545 1546 /* Default to nr_cpu_ids/dev->num_rx_queues and do not just return 0 1547 * when dev_maps hasn't been allocated yet, to be backward compatible. 1548 */ 1549 nr_ids = dev_maps ? dev_maps->nr_ids : 1550 (type == XPS_CPUS ? nr_cpu_ids : dev->num_rx_queues); 1551 1552 mask = bitmap_zalloc(nr_ids, GFP_NOWAIT); 1553 if (!mask) { 1554 rcu_read_unlock(); 1555 return -ENOMEM; 1556 } 1557 1558 if (!dev_maps || tc >= dev_maps->num_tc) 1559 goto out_no_maps; 1560 1561 for (j = 0; j < nr_ids; j++) { 1562 int i, tci = j * dev_maps->num_tc + tc; 1563 struct xps_map *map; 1564 1565 map = rcu_dereference(dev_maps->attr_map[tci]); 1566 if (!map) 1567 continue; 1568 1569 for (i = map->len; i--;) { 1570 if (map->queues[i] == index) { 1571 __set_bit(j, mask); 1572 break; 1573 } 1574 } 1575 } 1576 out_no_maps: 1577 rcu_read_unlock(); 1578 1579 len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids); 1580 bitmap_free(mask); 1581 1582 return len < PAGE_SIZE ? len : -EINVAL; 1583 } 1584 1585 static ssize_t xps_cpus_show(struct netdev_queue *queue, char *buf) 1586 { 1587 struct net_device *dev = queue->dev; 1588 unsigned int index; 1589 int len, tc; 1590 1591 if (!netif_is_multiqueue(dev)) 1592 return -ENOENT; 1593 1594 index = get_netdev_queue_index(queue); 1595 1596 if (!rtnl_trylock()) 1597 return restart_syscall(); 1598 1599 /* If queue belongs to subordinate dev use its map */ 1600 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 1601 1602 tc = netdev_txq_to_tc(dev, index); 1603 if (tc < 0) { 1604 rtnl_unlock(); 1605 return -EINVAL; 1606 } 1607 1608 /* Make sure the subordinate device can't be freed */ 1609 get_device(&dev->dev); 1610 rtnl_unlock(); 1611 1612 len = xps_queue_show(dev, index, tc, buf, XPS_CPUS); 1613 1614 put_device(&dev->dev); 1615 return len; 1616 } 1617 1618 static ssize_t xps_cpus_store(struct netdev_queue *queue, 1619 const char *buf, size_t len) 1620 { 1621 struct net_device *dev = queue->dev; 1622 unsigned int index; 1623 cpumask_var_t mask; 1624 int err; 1625 1626 if (!netif_is_multiqueue(dev)) 1627 return -ENOENT; 1628 1629 if (!capable(CAP_NET_ADMIN)) 1630 return -EPERM; 1631 1632 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 1633 return -ENOMEM; 1634 1635 index = get_netdev_queue_index(queue); 1636 1637 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); 1638 if (err) { 1639 free_cpumask_var(mask); 1640 return err; 1641 } 1642 1643 if (!rtnl_trylock()) { 1644 free_cpumask_var(mask); 1645 return restart_syscall(); 1646 } 1647 1648 err = netif_set_xps_queue(dev, mask, index); 1649 rtnl_unlock(); 1650 1651 free_cpumask_var(mask); 1652 1653 return err ? : len; 1654 } 1655 1656 static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init 1657 = __ATTR_RW(xps_cpus); 1658 1659 static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) 1660 { 1661 struct net_device *dev = queue->dev; 1662 unsigned int index; 1663 int tc; 1664 1665 index = get_netdev_queue_index(queue); 1666 1667 if (!rtnl_trylock()) 1668 return restart_syscall(); 1669 1670 tc = netdev_txq_to_tc(dev, index); 1671 rtnl_unlock(); 1672 if (tc < 0) 1673 return -EINVAL; 1674 1675 return xps_queue_show(dev, index, tc, buf, XPS_RXQS); 1676 } 1677 1678 static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, 1679 size_t len) 1680 { 1681 struct net_device *dev = queue->dev; 1682 struct net *net = dev_net(dev); 1683 unsigned long *mask; 1684 unsigned int index; 1685 int err; 1686 1687 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1688 return -EPERM; 1689 1690 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); 1691 if (!mask) 1692 return -ENOMEM; 1693 1694 index = get_netdev_queue_index(queue); 1695 1696 err = bitmap_parse(buf, len, mask, dev->num_rx_queues); 1697 if (err) { 1698 bitmap_free(mask); 1699 return err; 1700 } 1701 1702 if (!rtnl_trylock()) { 1703 bitmap_free(mask); 1704 return restart_syscall(); 1705 } 1706 1707 cpus_read_lock(); 1708 err = __netif_set_xps_queue(dev, mask, index, XPS_RXQS); 1709 cpus_read_unlock(); 1710 1711 rtnl_unlock(); 1712 1713 bitmap_free(mask); 1714 return err ? : len; 1715 } 1716 1717 static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init 1718 = __ATTR_RW(xps_rxqs); 1719 #endif /* CONFIG_XPS */ 1720 1721 static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { 1722 &queue_trans_timeout.attr, 1723 &queue_traffic_class.attr, 1724 #ifdef CONFIG_XPS 1725 &xps_cpus_attribute.attr, 1726 &xps_rxqs_attribute.attr, 1727 &queue_tx_maxrate.attr, 1728 #endif 1729 NULL 1730 }; 1731 ATTRIBUTE_GROUPS(netdev_queue_default); 1732 1733 static void netdev_queue_release(struct kobject *kobj) 1734 { 1735 struct netdev_queue *queue = to_netdev_queue(kobj); 1736 1737 memset(kobj, 0, sizeof(*kobj)); 1738 netdev_put(queue->dev, &queue->dev_tracker); 1739 } 1740 1741 static const void *netdev_queue_namespace(const struct kobject *kobj) 1742 { 1743 struct netdev_queue *queue = to_netdev_queue(kobj); 1744 struct device *dev = &queue->dev->dev; 1745 const void *ns = NULL; 1746 1747 if (dev->class && dev->class->ns_type) 1748 ns = dev->class->namespace(dev); 1749 1750 return ns; 1751 } 1752 1753 static void netdev_queue_get_ownership(const struct kobject *kobj, 1754 kuid_t *uid, kgid_t *gid) 1755 { 1756 const struct net *net = netdev_queue_namespace(kobj); 1757 1758 net_ns_get_ownership(net, uid, gid); 1759 } 1760 1761 static const struct kobj_type netdev_queue_ktype = { 1762 .sysfs_ops = &netdev_queue_sysfs_ops, 1763 .release = netdev_queue_release, 1764 .default_groups = netdev_queue_default_groups, 1765 .namespace = netdev_queue_namespace, 1766 .get_ownership = netdev_queue_get_ownership, 1767 }; 1768 1769 static bool netdev_uses_bql(const struct net_device *dev) 1770 { 1771 if (dev->features & NETIF_F_LLTX || 1772 dev->priv_flags & IFF_NO_QUEUE) 1773 return false; 1774 1775 return IS_ENABLED(CONFIG_BQL); 1776 } 1777 1778 static int netdev_queue_add_kobject(struct net_device *dev, int index) 1779 { 1780 struct netdev_queue *queue = dev->_tx + index; 1781 struct kobject *kobj = &queue->kobj; 1782 int error = 0; 1783 1784 /* Kobject_put later will trigger netdev_queue_release call 1785 * which decreases dev refcount: Take that reference here 1786 */ 1787 netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL); 1788 1789 kobj->kset = dev->queues_kset; 1790 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, 1791 "tx-%u", index); 1792 if (error) 1793 goto err; 1794 1795 if (netdev_uses_bql(dev)) { 1796 error = sysfs_create_group(kobj, &dql_group); 1797 if (error) 1798 goto err; 1799 } 1800 1801 kobject_uevent(kobj, KOBJ_ADD); 1802 return 0; 1803 1804 err: 1805 kobject_put(kobj); 1806 return error; 1807 } 1808 1809 static int tx_queue_change_owner(struct net_device *ndev, int index, 1810 kuid_t kuid, kgid_t kgid) 1811 { 1812 struct netdev_queue *queue = ndev->_tx + index; 1813 struct kobject *kobj = &queue->kobj; 1814 int error; 1815 1816 error = sysfs_change_owner(kobj, kuid, kgid); 1817 if (error) 1818 return error; 1819 1820 if (netdev_uses_bql(ndev)) 1821 error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid); 1822 1823 return error; 1824 } 1825 #endif /* CONFIG_SYSFS */ 1826 1827 int 1828 netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) 1829 { 1830 #ifdef CONFIG_SYSFS 1831 int i; 1832 int error = 0; 1833 1834 /* Tx queue kobjects are allowed to be updated when a device is being 1835 * unregistered, but solely to remove queues from qdiscs. Any path 1836 * adding queues should be fixed. 1837 */ 1838 WARN(dev->reg_state == NETREG_UNREGISTERING && new_num > old_num, 1839 "New queues can't be registered after device unregistration."); 1840 1841 for (i = old_num; i < new_num; i++) { 1842 error = netdev_queue_add_kobject(dev, i); 1843 if (error) { 1844 new_num = old_num; 1845 break; 1846 } 1847 } 1848 1849 while (--i >= new_num) { 1850 struct netdev_queue *queue = dev->_tx + i; 1851 1852 if (!refcount_read(&dev_net(dev)->ns.count)) 1853 queue->kobj.uevent_suppress = 1; 1854 1855 if (netdev_uses_bql(dev)) 1856 sysfs_remove_group(&queue->kobj, &dql_group); 1857 1858 kobject_put(&queue->kobj); 1859 } 1860 1861 return error; 1862 #else 1863 return 0; 1864 #endif /* CONFIG_SYSFS */ 1865 } 1866 1867 static int net_tx_queue_change_owner(struct net_device *dev, int num, 1868 kuid_t kuid, kgid_t kgid) 1869 { 1870 #ifdef CONFIG_SYSFS 1871 int error = 0; 1872 int i; 1873 1874 for (i = 0; i < num; i++) { 1875 error = tx_queue_change_owner(dev, i, kuid, kgid); 1876 if (error) 1877 break; 1878 } 1879 1880 return error; 1881 #else 1882 return 0; 1883 #endif /* CONFIG_SYSFS */ 1884 } 1885 1886 static int register_queue_kobjects(struct net_device *dev) 1887 { 1888 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; 1889 1890 #ifdef CONFIG_SYSFS 1891 dev->queues_kset = kset_create_and_add("queues", 1892 NULL, &dev->dev.kobj); 1893 if (!dev->queues_kset) 1894 return -ENOMEM; 1895 real_rx = dev->real_num_rx_queues; 1896 #endif 1897 real_tx = dev->real_num_tx_queues; 1898 1899 error = net_rx_queue_update_kobjects(dev, 0, real_rx); 1900 if (error) 1901 goto error; 1902 rxq = real_rx; 1903 1904 error = netdev_queue_update_kobjects(dev, 0, real_tx); 1905 if (error) 1906 goto error; 1907 txq = real_tx; 1908 1909 return 0; 1910 1911 error: 1912 netdev_queue_update_kobjects(dev, txq, 0); 1913 net_rx_queue_update_kobjects(dev, rxq, 0); 1914 #ifdef CONFIG_SYSFS 1915 kset_unregister(dev->queues_kset); 1916 #endif 1917 return error; 1918 } 1919 1920 static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid) 1921 { 1922 int error = 0, real_rx = 0, real_tx = 0; 1923 1924 #ifdef CONFIG_SYSFS 1925 if (ndev->queues_kset) { 1926 error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid); 1927 if (error) 1928 return error; 1929 } 1930 real_rx = ndev->real_num_rx_queues; 1931 #endif 1932 real_tx = ndev->real_num_tx_queues; 1933 1934 error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid); 1935 if (error) 1936 return error; 1937 1938 error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid); 1939 if (error) 1940 return error; 1941 1942 return 0; 1943 } 1944 1945 static void remove_queue_kobjects(struct net_device *dev) 1946 { 1947 int real_rx = 0, real_tx = 0; 1948 1949 #ifdef CONFIG_SYSFS 1950 real_rx = dev->real_num_rx_queues; 1951 #endif 1952 real_tx = dev->real_num_tx_queues; 1953 1954 net_rx_queue_update_kobjects(dev, real_rx, 0); 1955 netdev_queue_update_kobjects(dev, real_tx, 0); 1956 1957 dev->real_num_rx_queues = 0; 1958 dev->real_num_tx_queues = 0; 1959 #ifdef CONFIG_SYSFS 1960 kset_unregister(dev->queues_kset); 1961 #endif 1962 } 1963 1964 static bool net_current_may_mount(void) 1965 { 1966 struct net *net = current->nsproxy->net_ns; 1967 1968 return ns_capable(net->user_ns, CAP_SYS_ADMIN); 1969 } 1970 1971 static void *net_grab_current_ns(void) 1972 { 1973 struct net *ns = current->nsproxy->net_ns; 1974 #ifdef CONFIG_NET_NS 1975 if (ns) 1976 refcount_inc(&ns->passive); 1977 #endif 1978 return ns; 1979 } 1980 1981 static const void *net_initial_ns(void) 1982 { 1983 return &init_net; 1984 } 1985 1986 static const void *net_netlink_ns(struct sock *sk) 1987 { 1988 return sock_net(sk); 1989 } 1990 1991 const struct kobj_ns_type_operations net_ns_type_operations = { 1992 .type = KOBJ_NS_TYPE_NET, 1993 .current_may_mount = net_current_may_mount, 1994 .grab_current_ns = net_grab_current_ns, 1995 .netlink_ns = net_netlink_ns, 1996 .initial_ns = net_initial_ns, 1997 .drop_ns = net_drop_ns, 1998 }; 1999 EXPORT_SYMBOL_GPL(net_ns_type_operations); 2000 2001 static int netdev_uevent(const struct device *d, struct kobj_uevent_env *env) 2002 { 2003 const struct net_device *dev = to_net_dev(d); 2004 int retval; 2005 2006 /* pass interface to uevent. */ 2007 retval = add_uevent_var(env, "INTERFACE=%s", dev->name); 2008 if (retval) 2009 goto exit; 2010 2011 /* pass ifindex to uevent. 2012 * ifindex is useful as it won't change (interface name may change) 2013 * and is what RtNetlink uses natively. 2014 */ 2015 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); 2016 2017 exit: 2018 return retval; 2019 } 2020 2021 /* 2022 * netdev_release -- destroy and free a dead device. 2023 * Called when last reference to device kobject is gone. 2024 */ 2025 static void netdev_release(struct device *d) 2026 { 2027 struct net_device *dev = to_net_dev(d); 2028 2029 BUG_ON(dev->reg_state != NETREG_RELEASED); 2030 2031 /* no need to wait for rcu grace period: 2032 * device is dead and about to be freed. 2033 */ 2034 kfree(rcu_access_pointer(dev->ifalias)); 2035 kvfree(dev); 2036 } 2037 2038 static const void *net_namespace(const struct device *d) 2039 { 2040 const struct net_device *dev = to_net_dev(d); 2041 2042 return dev_net(dev); 2043 } 2044 2045 static void net_get_ownership(const struct device *d, kuid_t *uid, kgid_t *gid) 2046 { 2047 const struct net_device *dev = to_net_dev(d); 2048 const struct net *net = dev_net(dev); 2049 2050 net_ns_get_ownership(net, uid, gid); 2051 } 2052 2053 static const struct class net_class = { 2054 .name = "net", 2055 .dev_release = netdev_release, 2056 .dev_groups = net_class_groups, 2057 .dev_uevent = netdev_uevent, 2058 .ns_type = &net_ns_type_operations, 2059 .namespace = net_namespace, 2060 .get_ownership = net_get_ownership, 2061 }; 2062 2063 #ifdef CONFIG_OF 2064 static int of_dev_node_match(struct device *dev, const void *data) 2065 { 2066 for (; dev; dev = dev->parent) { 2067 if (dev->of_node == data) 2068 return 1; 2069 } 2070 2071 return 0; 2072 } 2073 2074 /* 2075 * of_find_net_device_by_node - lookup the net device for the device node 2076 * @np: OF device node 2077 * 2078 * Looks up the net_device structure corresponding with the device node. 2079 * If successful, returns a pointer to the net_device with the embedded 2080 * struct device refcount incremented by one, or NULL on failure. The 2081 * refcount must be dropped when done with the net_device. 2082 */ 2083 struct net_device *of_find_net_device_by_node(struct device_node *np) 2084 { 2085 struct device *dev; 2086 2087 dev = class_find_device(&net_class, NULL, np, of_dev_node_match); 2088 if (!dev) 2089 return NULL; 2090 2091 return to_net_dev(dev); 2092 } 2093 EXPORT_SYMBOL(of_find_net_device_by_node); 2094 #endif 2095 2096 /* Delete sysfs entries but hold kobject reference until after all 2097 * netdev references are gone. 2098 */ 2099 void netdev_unregister_kobject(struct net_device *ndev) 2100 { 2101 struct device *dev = &ndev->dev; 2102 2103 if (!refcount_read(&dev_net(ndev)->ns.count)) 2104 dev_set_uevent_suppress(dev, 1); 2105 2106 kobject_get(&dev->kobj); 2107 2108 remove_queue_kobjects(ndev); 2109 2110 pm_runtime_set_memalloc_noio(dev, false); 2111 2112 device_del(dev); 2113 } 2114 2115 /* Create sysfs entries for network device. */ 2116 int netdev_register_kobject(struct net_device *ndev) 2117 { 2118 struct device *dev = &ndev->dev; 2119 const struct attribute_group **groups = ndev->sysfs_groups; 2120 int error = 0; 2121 2122 device_initialize(dev); 2123 dev->class = &net_class; 2124 dev->platform_data = ndev; 2125 dev->groups = groups; 2126 2127 dev_set_name(dev, "%s", ndev->name); 2128 2129 #ifdef CONFIG_SYSFS 2130 /* Allow for a device specific group */ 2131 if (*groups) 2132 groups++; 2133 2134 *groups++ = &netstat_group; 2135 2136 if (wireless_group_needed(ndev)) 2137 *groups++ = &wireless_group; 2138 #endif /* CONFIG_SYSFS */ 2139 2140 error = device_add(dev); 2141 if (error) 2142 return error; 2143 2144 error = register_queue_kobjects(ndev); 2145 if (error) { 2146 device_del(dev); 2147 return error; 2148 } 2149 2150 pm_runtime_set_memalloc_noio(dev, true); 2151 2152 return error; 2153 } 2154 2155 /* Change owner for sysfs entries when moving network devices across network 2156 * namespaces owned by different user namespaces. 2157 */ 2158 int netdev_change_owner(struct net_device *ndev, const struct net *net_old, 2159 const struct net *net_new) 2160 { 2161 kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID; 2162 kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID; 2163 struct device *dev = &ndev->dev; 2164 int error; 2165 2166 net_ns_get_ownership(net_old, &old_uid, &old_gid); 2167 net_ns_get_ownership(net_new, &new_uid, &new_gid); 2168 2169 /* The network namespace was changed but the owning user namespace is 2170 * identical so there's no need to change the owner of sysfs entries. 2171 */ 2172 if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid)) 2173 return 0; 2174 2175 error = device_change_owner(dev, new_uid, new_gid); 2176 if (error) 2177 return error; 2178 2179 error = queue_change_owner(ndev, new_uid, new_gid); 2180 if (error) 2181 return error; 2182 2183 return 0; 2184 } 2185 2186 int netdev_class_create_file_ns(const struct class_attribute *class_attr, 2187 const void *ns) 2188 { 2189 return class_create_file_ns(&net_class, class_attr, ns); 2190 } 2191 EXPORT_SYMBOL(netdev_class_create_file_ns); 2192 2193 void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 2194 const void *ns) 2195 { 2196 class_remove_file_ns(&net_class, class_attr, ns); 2197 } 2198 EXPORT_SYMBOL(netdev_class_remove_file_ns); 2199 2200 int __init netdev_kobject_init(void) 2201 { 2202 kobj_ns_type_register(&net_ns_type_operations); 2203 return class_register(&net_class); 2204 } 2205
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.