1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) B.A.T.M.A.N. contributors: 3 * 4 * Linus Lüssing 5 */ 6 7 #include "multicast.h" 8 #include "main.h" 9 10 #include <linux/atomic.h> 11 #include <linux/bitops.h> 12 #include <linux/bug.h> 13 #include <linux/byteorder/generic.h> 14 #include <linux/container_of.h> 15 #include <linux/errno.h> 16 #include <linux/etherdevice.h> 17 #include <linux/gfp.h> 18 #include <linux/icmpv6.h> 19 #include <linux/if_bridge.h> 20 #include <linux/if_ether.h> 21 #include <linux/igmp.h> 22 #include <linux/in.h> 23 #include <linux/in6.h> 24 #include <linux/inetdevice.h> 25 #include <linux/ip.h> 26 #include <linux/ipv6.h> 27 #include <linux/jiffies.h> 28 #include <linux/list.h> 29 #include <linux/lockdep.h> 30 #include <linux/netdevice.h> 31 #include <linux/netlink.h> 32 #include <linux/printk.h> 33 #include <linux/rculist.h> 34 #include <linux/rcupdate.h> 35 #include <linux/skbuff.h> 36 #include <linux/slab.h> 37 #include <linux/spinlock.h> 38 #include <linux/sprintf.h> 39 #include <linux/stddef.h> 40 #include <linux/string.h> 41 #include <linux/types.h> 42 #include <linux/workqueue.h> 43 #include <net/addrconf.h> 44 #include <net/genetlink.h> 45 #include <net/if_inet6.h> 46 #include <net/ip.h> 47 #include <net/ipv6.h> 48 #include <net/netlink.h> 49 #include <net/sock.h> 50 #include <uapi/linux/batadv_packet.h> 51 #include <uapi/linux/batman_adv.h> 52 53 #include "bridge_loop_avoidance.h" 54 #include "hard-interface.h" 55 #include "hash.h" 56 #include "log.h" 57 #include "netlink.h" 58 #include "send.h" 59 #include "soft-interface.h" 60 #include "translation-table.h" 61 #include "tvlv.h" 62 63 static void batadv_mcast_mla_update(struct work_struct *work); 64 65 /** 66 * batadv_mcast_start_timer() - schedule the multicast periodic worker 67 * @bat_priv: the bat priv with all the soft interface information 68 */ 69 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv) 70 { 71 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work, 72 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD)); 73 } 74 75 /** 76 * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists 77 * @soft_iface: netdev struct of the mesh interface 78 * 79 * If the given soft interface has a bridge on top then the refcount 80 * of the according net device is increased. 81 * 82 * Return: NULL if no such bridge exists. Otherwise the net device of the 83 * bridge. 84 */ 85 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) 86 { 87 struct net_device *upper = soft_iface; 88 89 rcu_read_lock(); 90 do { 91 upper = netdev_master_upper_dev_get_rcu(upper); 92 } while (upper && !netif_is_bridge_master(upper)); 93 94 dev_hold(upper); 95 rcu_read_unlock(); 96 97 return upper; 98 } 99 100 /** 101 * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from 102 * node for IPv4 103 * @dev: the interface to check 104 * 105 * Checks the presence of an IPv4 multicast router on this node. 106 * 107 * Caller needs to hold rcu read lock. 108 * 109 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise. 110 */ 111 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev) 112 { 113 struct in_device *in_dev = __in_dev_get_rcu(dev); 114 115 if (in_dev && IN_DEV_MFORWARD(in_dev)) 116 return BATADV_NO_FLAGS; 117 else 118 return BATADV_MCAST_WANT_NO_RTR4; 119 } 120 121 /** 122 * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from 123 * node for IPv6 124 * @dev: the interface to check 125 * 126 * Checks the presence of an IPv6 multicast router on this node. 127 * 128 * Caller needs to hold rcu read lock. 129 * 130 * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise. 131 */ 132 #if IS_ENABLED(CONFIG_IPV6_MROUTE) 133 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 134 { 135 struct inet6_dev *in6_dev = __in6_dev_get(dev); 136 137 if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding)) 138 return BATADV_NO_FLAGS; 139 else 140 return BATADV_MCAST_WANT_NO_RTR6; 141 } 142 #else 143 static inline u8 144 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) 145 { 146 return BATADV_MCAST_WANT_NO_RTR6; 147 } 148 #endif 149 150 /** 151 * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node 152 * @bat_priv: the bat priv with all the soft interface information 153 * @bridge: bridge interface on top of the soft_iface if present, 154 * otherwise pass NULL 155 * 156 * Checks the presence of IPv4 and IPv6 multicast routers on this 157 * node. 158 * 159 * Return: 160 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 161 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 162 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 163 * The former two OR'd: no multicast router is present 164 */ 165 static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv, 166 struct net_device *bridge) 167 { 168 struct net_device *dev = bridge ? bridge : bat_priv->soft_iface; 169 u8 flags = BATADV_NO_FLAGS; 170 171 rcu_read_lock(); 172 173 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev); 174 flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev); 175 176 rcu_read_unlock(); 177 178 return flags; 179 } 180 181 /** 182 * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge 183 * @bat_priv: the bat priv with all the soft interface information 184 * @bridge: bridge interface on top of the soft_iface if present, 185 * otherwise pass NULL 186 * 187 * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge. 188 * 189 * Return: 190 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 191 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 192 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 193 * The former two OR'd: no multicast router is present 194 */ 195 static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv, 196 struct net_device *bridge) 197 { 198 struct net_device *dev = bat_priv->soft_iface; 199 u8 flags = BATADV_NO_FLAGS; 200 201 if (!bridge) 202 return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 203 204 if (!br_multicast_has_router_adjacent(dev, ETH_P_IP)) 205 flags |= BATADV_MCAST_WANT_NO_RTR4; 206 if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6)) 207 flags |= BATADV_MCAST_WANT_NO_RTR6; 208 209 return flags; 210 } 211 212 /** 213 * batadv_mcast_mla_rtr_flags_get() - get multicast router flags 214 * @bat_priv: the bat priv with all the soft interface information 215 * @bridge: bridge interface on top of the soft_iface if present, 216 * otherwise pass NULL 217 * 218 * Checks the presence of IPv4 and IPv6 multicast routers on this 219 * node or behind its bridge. 220 * 221 * Return: 222 * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present 223 * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present 224 * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present 225 * The former two OR'd: no multicast router is present 226 */ 227 static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv, 228 struct net_device *bridge) 229 { 230 u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; 231 232 flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge); 233 flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge); 234 235 return flags; 236 } 237 238 /** 239 * batadv_mcast_mla_forw_flags_get() - get multicast forwarding flags 240 * @bat_priv: the bat priv with all the soft interface information 241 * 242 * Checks if all active hard interfaces have an MTU larger or equal to 1280 243 * bytes (IPv6 minimum MTU). 244 * 245 * Return: BATADV_MCAST_HAVE_MC_PTYPE_CAPA if yes, BATADV_NO_FLAGS otherwise. 246 */ 247 static u8 batadv_mcast_mla_forw_flags_get(struct batadv_priv *bat_priv) 248 { 249 const struct batadv_hard_iface *hard_iface; 250 251 rcu_read_lock(); 252 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 253 if (hard_iface->if_status != BATADV_IF_ACTIVE) 254 continue; 255 256 if (hard_iface->soft_iface != bat_priv->soft_iface) 257 continue; 258 259 if (hard_iface->net_dev->mtu < IPV6_MIN_MTU) { 260 rcu_read_unlock(); 261 return BATADV_NO_FLAGS; 262 } 263 } 264 rcu_read_unlock(); 265 266 return BATADV_MCAST_HAVE_MC_PTYPE_CAPA; 267 } 268 269 /** 270 * batadv_mcast_mla_flags_get() - get the new multicast flags 271 * @bat_priv: the bat priv with all the soft interface information 272 * 273 * Return: A set of flags for the current/next TVLV, querier and 274 * bridge state. 275 */ 276 static struct batadv_mcast_mla_flags 277 batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv) 278 { 279 struct net_device *dev = bat_priv->soft_iface; 280 struct batadv_mcast_querier_state *qr4, *qr6; 281 struct batadv_mcast_mla_flags mla_flags; 282 struct net_device *bridge; 283 284 bridge = batadv_mcast_get_bridge(dev); 285 286 memset(&mla_flags, 0, sizeof(mla_flags)); 287 mla_flags.enabled = 1; 288 mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv, 289 bridge); 290 mla_flags.tvlv_flags |= batadv_mcast_mla_forw_flags_get(bat_priv); 291 292 if (!bridge) 293 return mla_flags; 294 295 dev_put(bridge); 296 297 mla_flags.bridged = 1; 298 qr4 = &mla_flags.querier_ipv4; 299 qr6 = &mla_flags.querier_ipv6; 300 301 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)) 302 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n"); 303 304 qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP); 305 qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP); 306 307 qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6); 308 qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6); 309 310 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES; 311 312 /* 1) If no querier exists at all, then multicast listeners on 313 * our local TT clients behind the bridge will keep silent. 314 * 2) If the selected querier is on one of our local TT clients, 315 * behind the bridge, then this querier might shadow multicast 316 * listeners on our local TT clients, behind this bridge. 317 * 318 * In both cases, we will signalize other batman nodes that 319 * we need all multicast traffic of the according protocol. 320 */ 321 if (!qr4->exists || qr4->shadowing) { 322 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4; 323 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4; 324 } 325 326 if (!qr6->exists || qr6->shadowing) { 327 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6; 328 mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6; 329 } 330 331 return mla_flags; 332 } 333 334 /** 335 * batadv_mcast_mla_is_duplicate() - check whether an address is in a list 336 * @mcast_addr: the multicast address to check 337 * @mcast_list: the list with multicast addresses to search in 338 * 339 * Return: true if the given address is already in the given list. 340 * Otherwise returns false. 341 */ 342 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr, 343 struct hlist_head *mcast_list) 344 { 345 struct batadv_hw_addr *mcast_entry; 346 347 hlist_for_each_entry(mcast_entry, mcast_list, list) 348 if (batadv_compare_eth(mcast_entry->addr, mcast_addr)) 349 return true; 350 351 return false; 352 } 353 354 /** 355 * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners 356 * @dev: the device to collect multicast addresses from 357 * @mcast_list: a list to put found addresses into 358 * @flags: flags indicating the new multicast state 359 * 360 * Collects multicast addresses of IPv4 multicast listeners residing 361 * on this kernel on the given soft interface, dev, in 362 * the given mcast_list. In general, multicast listeners provided by 363 * your multicast receiving applications run directly on this node. 364 * 365 * Return: -ENOMEM on memory allocation error or the number of 366 * items added to the mcast_list otherwise. 367 */ 368 static int 369 batadv_mcast_mla_softif_get_ipv4(struct net_device *dev, 370 struct hlist_head *mcast_list, 371 struct batadv_mcast_mla_flags *flags) 372 { 373 struct batadv_hw_addr *new; 374 struct in_device *in_dev; 375 u8 mcast_addr[ETH_ALEN]; 376 struct ip_mc_list *pmc; 377 int ret = 0; 378 379 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 380 return 0; 381 382 rcu_read_lock(); 383 384 in_dev = __in_dev_get_rcu(dev); 385 if (!in_dev) { 386 rcu_read_unlock(); 387 return 0; 388 } 389 390 for (pmc = rcu_dereference(in_dev->mc_list); pmc; 391 pmc = rcu_dereference(pmc->next_rcu)) { 392 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 393 ipv4_is_local_multicast(pmc->multiaddr)) 394 continue; 395 396 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 397 !ipv4_is_local_multicast(pmc->multiaddr)) 398 continue; 399 400 ip_eth_mc_map(pmc->multiaddr, mcast_addr); 401 402 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 403 continue; 404 405 new = kmalloc(sizeof(*new), GFP_ATOMIC); 406 if (!new) { 407 ret = -ENOMEM; 408 break; 409 } 410 411 ether_addr_copy(new->addr, mcast_addr); 412 hlist_add_head(&new->list, mcast_list); 413 ret++; 414 } 415 rcu_read_unlock(); 416 417 return ret; 418 } 419 420 /** 421 * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners 422 * @dev: the device to collect multicast addresses from 423 * @mcast_list: a list to put found addresses into 424 * @flags: flags indicating the new multicast state 425 * 426 * Collects multicast addresses of IPv6 multicast listeners residing 427 * on this kernel on the given soft interface, dev, in 428 * the given mcast_list. In general, multicast listeners provided by 429 * your multicast receiving applications run directly on this node. 430 * 431 * Return: -ENOMEM on memory allocation error or the number of 432 * items added to the mcast_list otherwise. 433 */ 434 #if IS_ENABLED(CONFIG_IPV6) 435 static int 436 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 437 struct hlist_head *mcast_list, 438 struct batadv_mcast_mla_flags *flags) 439 { 440 struct batadv_hw_addr *new; 441 struct inet6_dev *in6_dev; 442 u8 mcast_addr[ETH_ALEN]; 443 struct ifmcaddr6 *pmc6; 444 int ret = 0; 445 446 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 447 return 0; 448 449 rcu_read_lock(); 450 451 in6_dev = __in6_dev_get(dev); 452 if (!in6_dev) { 453 rcu_read_unlock(); 454 return 0; 455 } 456 457 for (pmc6 = rcu_dereference(in6_dev->mc_list); 458 pmc6; 459 pmc6 = rcu_dereference(pmc6->next)) { 460 if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) < 461 IPV6_ADDR_SCOPE_LINKLOCAL) 462 continue; 463 464 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 465 ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr)) 466 continue; 467 468 if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 469 IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) > 470 IPV6_ADDR_SCOPE_LINKLOCAL) 471 continue; 472 473 ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr); 474 475 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 476 continue; 477 478 new = kmalloc(sizeof(*new), GFP_ATOMIC); 479 if (!new) { 480 ret = -ENOMEM; 481 break; 482 } 483 484 ether_addr_copy(new->addr, mcast_addr); 485 hlist_add_head(&new->list, mcast_list); 486 ret++; 487 } 488 rcu_read_unlock(); 489 490 return ret; 491 } 492 #else 493 static inline int 494 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, 495 struct hlist_head *mcast_list, 496 struct batadv_mcast_mla_flags *flags) 497 { 498 return 0; 499 } 500 #endif 501 502 /** 503 * batadv_mcast_mla_softif_get() - get softif multicast listeners 504 * @dev: the device to collect multicast addresses from 505 * @mcast_list: a list to put found addresses into 506 * @flags: flags indicating the new multicast state 507 * 508 * Collects multicast addresses of multicast listeners residing 509 * on this kernel on the given soft interface, dev, in 510 * the given mcast_list. In general, multicast listeners provided by 511 * your multicast receiving applications run directly on this node. 512 * 513 * If there is a bridge interface on top of dev, collect from that one 514 * instead. Just like with IP addresses and routes, multicast listeners 515 * will(/should) register to the bridge interface instead of an 516 * enslaved bat0. 517 * 518 * Return: -ENOMEM on memory allocation error or the number of 519 * items added to the mcast_list otherwise. 520 */ 521 static int 522 batadv_mcast_mla_softif_get(struct net_device *dev, 523 struct hlist_head *mcast_list, 524 struct batadv_mcast_mla_flags *flags) 525 { 526 struct net_device *bridge = batadv_mcast_get_bridge(dev); 527 int ret4, ret6 = 0; 528 529 if (bridge) 530 dev = bridge; 531 532 ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags); 533 if (ret4 < 0) 534 goto out; 535 536 ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags); 537 if (ret6 < 0) { 538 ret4 = 0; 539 goto out; 540 } 541 542 out: 543 dev_put(bridge); 544 545 return ret4 + ret6; 546 } 547 548 /** 549 * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address 550 * @dst: destination to write to - a multicast MAC address 551 * @src: source to read from - a multicast IP address 552 * 553 * Converts a given multicast IPv4/IPv6 address from a bridge 554 * to its matching multicast MAC address and copies it into the given 555 * destination buffer. 556 * 557 * Caller needs to make sure the destination buffer can hold 558 * at least ETH_ALEN bytes. 559 */ 560 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src) 561 { 562 if (src->proto == htons(ETH_P_IP)) 563 ip_eth_mc_map(src->dst.ip4, dst); 564 #if IS_ENABLED(CONFIG_IPV6) 565 else if (src->proto == htons(ETH_P_IPV6)) 566 ipv6_eth_mc_map(&src->dst.ip6, dst); 567 #endif 568 else 569 eth_zero_addr(dst); 570 } 571 572 /** 573 * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners 574 * @dev: a bridge slave whose bridge to collect multicast addresses from 575 * @mcast_list: a list to put found addresses into 576 * @flags: flags indicating the new multicast state 577 * 578 * Collects multicast addresses of multicast listeners residing 579 * on foreign, non-mesh devices which we gave access to our mesh via 580 * a bridge on top of the given soft interface, dev, in the given 581 * mcast_list. 582 * 583 * Return: -ENOMEM on memory allocation error or the number of 584 * items added to the mcast_list otherwise. 585 */ 586 static int batadv_mcast_mla_bridge_get(struct net_device *dev, 587 struct hlist_head *mcast_list, 588 struct batadv_mcast_mla_flags *flags) 589 { 590 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list); 591 struct br_ip_list *br_ip_entry, *tmp; 592 u8 tvlv_flags = flags->tvlv_flags; 593 struct batadv_hw_addr *new; 594 u8 mcast_addr[ETH_ALEN]; 595 int ret; 596 597 /* we don't need to detect these devices/listeners, the IGMP/MLD 598 * snooping code of the Linux bridge already does that for us 599 */ 600 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list); 601 if (ret < 0) 602 goto out; 603 604 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) { 605 if (br_ip_entry->addr.proto == htons(ETH_P_IP)) { 606 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4) 607 continue; 608 609 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 610 ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 611 continue; 612 613 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) && 614 !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4)) 615 continue; 616 } 617 618 #if IS_ENABLED(CONFIG_IPV6) 619 if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) { 620 if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6) 621 continue; 622 623 if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 624 ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6)) 625 continue; 626 627 if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) && 628 IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) > 629 IPV6_ADDR_SCOPE_LINKLOCAL) 630 continue; 631 } 632 #endif 633 634 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr); 635 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list)) 636 continue; 637 638 new = kmalloc(sizeof(*new), GFP_ATOMIC); 639 if (!new) { 640 ret = -ENOMEM; 641 break; 642 } 643 644 ether_addr_copy(new->addr, mcast_addr); 645 hlist_add_head(&new->list, mcast_list); 646 } 647 648 out: 649 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) { 650 list_del(&br_ip_entry->list); 651 kfree(br_ip_entry); 652 } 653 654 return ret; 655 } 656 657 /** 658 * batadv_mcast_mla_list_free() - free a list of multicast addresses 659 * @mcast_list: the list to free 660 * 661 * Removes and frees all items in the given mcast_list. 662 */ 663 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list) 664 { 665 struct batadv_hw_addr *mcast_entry; 666 struct hlist_node *tmp; 667 668 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 669 hlist_del(&mcast_entry->list); 670 kfree(mcast_entry); 671 } 672 } 673 674 /** 675 * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements 676 * @bat_priv: the bat priv with all the soft interface information 677 * @mcast_list: a list of addresses which should _not_ be removed 678 * 679 * Retracts the announcement of any multicast listener from the 680 * translation table except the ones listed in the given mcast_list. 681 * 682 * If mcast_list is NULL then all are retracted. 683 */ 684 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, 685 struct hlist_head *mcast_list) 686 { 687 struct batadv_hw_addr *mcast_entry; 688 struct hlist_node *tmp; 689 690 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list, 691 list) { 692 if (mcast_list && 693 batadv_mcast_mla_is_duplicate(mcast_entry->addr, 694 mcast_list)) 695 continue; 696 697 batadv_tt_local_remove(bat_priv, mcast_entry->addr, 698 BATADV_NO_FLAGS, 699 "mcast TT outdated", false); 700 701 hlist_del(&mcast_entry->list); 702 kfree(mcast_entry); 703 } 704 } 705 706 /** 707 * batadv_mcast_mla_tt_add() - add multicast listener announcements 708 * @bat_priv: the bat priv with all the soft interface information 709 * @mcast_list: a list of addresses which are going to get added 710 * 711 * Adds multicast listener announcements from the given mcast_list to the 712 * translation table if they have not been added yet. 713 */ 714 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, 715 struct hlist_head *mcast_list) 716 { 717 struct batadv_hw_addr *mcast_entry; 718 struct hlist_node *tmp; 719 720 if (!mcast_list) 721 return; 722 723 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) { 724 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr, 725 &bat_priv->mcast.mla_list)) 726 continue; 727 728 if (!batadv_tt_local_add(bat_priv->soft_iface, 729 mcast_entry->addr, BATADV_NO_FLAGS, 730 BATADV_NULL_IFINDEX, BATADV_NO_MARK)) 731 continue; 732 733 hlist_del(&mcast_entry->list); 734 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list); 735 } 736 } 737 738 /** 739 * batadv_mcast_querier_log() - debug output regarding the querier status on 740 * link 741 * @bat_priv: the bat priv with all the soft interface information 742 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD") 743 * @old_state: the previous querier state on our link 744 * @new_state: the new querier state on our link 745 * 746 * Outputs debug messages to the logging facility with log level 'mcast' 747 * regarding changes to the querier status on the link which are relevant 748 * to our multicast optimizations. 749 * 750 * Usually this is about whether a querier appeared or vanished in 751 * our mesh or whether the querier is in the suboptimal position of being 752 * behind our local bridge segment: Snooping switches will directly 753 * forward listener reports to the querier, therefore batman-adv and 754 * the bridge will potentially not see these listeners - the querier is 755 * potentially shadowing listeners from us then. 756 * 757 * This is only interesting for nodes with a bridge on top of their 758 * soft interface. 759 */ 760 static void 761 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, 762 struct batadv_mcast_querier_state *old_state, 763 struct batadv_mcast_querier_state *new_state) 764 { 765 if (!old_state->exists && new_state->exists) 766 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n", 767 str_proto); 768 else if (old_state->exists && !new_state->exists) 769 batadv_info(bat_priv->soft_iface, 770 "%s Querier disappeared - multicast optimizations disabled\n", 771 str_proto); 772 else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists) 773 batadv_info(bat_priv->soft_iface, 774 "No %s Querier present - multicast optimizations disabled\n", 775 str_proto); 776 777 if (new_state->exists) { 778 if ((!old_state->shadowing && new_state->shadowing) || 779 (!old_state->exists && new_state->shadowing)) 780 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 781 "%s Querier is behind our bridged segment: Might shadow listeners\n", 782 str_proto); 783 else if (old_state->shadowing && !new_state->shadowing) 784 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 785 "%s Querier is not behind our bridged segment\n", 786 str_proto); 787 } 788 } 789 790 /** 791 * batadv_mcast_bridge_log() - debug output for topology changes in bridged 792 * setups 793 * @bat_priv: the bat priv with all the soft interface information 794 * @new_flags: flags indicating the new multicast state 795 * 796 * If no bridges are ever used on this node, then this function does nothing. 797 * 798 * Otherwise this function outputs debug information to the 'mcast' log level 799 * which might be relevant to our multicast optimizations. 800 * 801 * More precisely, it outputs information when a bridge interface is added or 802 * removed from a soft interface. And when a bridge is present, it further 803 * outputs information about the querier state which is relevant for the 804 * multicast flags this node is going to set. 805 */ 806 static void 807 batadv_mcast_bridge_log(struct batadv_priv *bat_priv, 808 struct batadv_mcast_mla_flags *new_flags) 809 { 810 struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags; 811 812 if (!old_flags->bridged && new_flags->bridged) 813 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 814 "Bridge added: Setting Unsnoopables(U)-flag\n"); 815 else if (old_flags->bridged && !new_flags->bridged) 816 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 817 "Bridge removed: Unsetting Unsnoopables(U)-flag\n"); 818 819 if (new_flags->bridged) { 820 batadv_mcast_querier_log(bat_priv, "IGMP", 821 &old_flags->querier_ipv4, 822 &new_flags->querier_ipv4); 823 batadv_mcast_querier_log(bat_priv, "MLD", 824 &old_flags->querier_ipv6, 825 &new_flags->querier_ipv6); 826 } 827 } 828 829 /** 830 * batadv_mcast_flags_log() - output debug information about mcast flag changes 831 * @bat_priv: the bat priv with all the soft interface information 832 * @flags: TVLV flags indicating the new multicast state 833 * 834 * Whenever the multicast TVLV flags this node announces change, this function 835 * should be used to notify userspace about the change. 836 */ 837 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) 838 { 839 bool old_enabled = bat_priv->mcast.mla_flags.enabled; 840 u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags; 841 char str_old_flags[] = "[.... . .]"; 842 843 sprintf(str_old_flags, "[%c%c%c%s%s%c]", 844 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 845 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 846 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 847 !(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 848 !(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ", 849 !(old_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA) ? 'P' : '.'); 850 851 batadv_dbg(BATADV_DBG_MCAST, bat_priv, 852 "Changing multicast flags from '%s' to '[%c%c%c%s%s%c]'\n", 853 old_enabled ? str_old_flags : "<undefined>", 854 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.', 855 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.', 856 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.', 857 !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ", 858 !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ", 859 !(flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA) ? 'P' : '.'); 860 } 861 862 /** 863 * batadv_mcast_mla_flags_update() - update multicast flags 864 * @bat_priv: the bat priv with all the soft interface information 865 * @flags: flags indicating the new multicast state 866 * 867 * Updates the own multicast tvlv with our current multicast related settings, 868 * capabilities and inabilities. 869 */ 870 static void 871 batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv, 872 struct batadv_mcast_mla_flags *flags) 873 { 874 struct batadv_tvlv_mcast_data mcast_data; 875 876 if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags))) 877 return; 878 879 batadv_mcast_bridge_log(bat_priv, flags); 880 batadv_mcast_flags_log(bat_priv, flags->tvlv_flags); 881 882 mcast_data.flags = flags->tvlv_flags; 883 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved)); 884 885 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2, 886 &mcast_data, sizeof(mcast_data)); 887 888 bat_priv->mcast.mla_flags = *flags; 889 } 890 891 /** 892 * __batadv_mcast_mla_update() - update the own MLAs 893 * @bat_priv: the bat priv with all the soft interface information 894 * 895 * Updates the own multicast listener announcements in the translation 896 * table as well as the own, announced multicast tvlv container. 897 * 898 * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list 899 * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are 900 * ensured by the non-parallel execution of the worker this function 901 * belongs to. 902 */ 903 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv) 904 { 905 struct net_device *soft_iface = bat_priv->soft_iface; 906 struct hlist_head mcast_list = HLIST_HEAD_INIT; 907 struct batadv_mcast_mla_flags flags; 908 int ret; 909 910 flags = batadv_mcast_mla_flags_get(bat_priv); 911 912 ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags); 913 if (ret < 0) 914 goto out; 915 916 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags); 917 if (ret < 0) 918 goto out; 919 920 spin_lock(&bat_priv->mcast.mla_lock); 921 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list); 922 batadv_mcast_mla_tt_add(bat_priv, &mcast_list); 923 batadv_mcast_mla_flags_update(bat_priv, &flags); 924 spin_unlock(&bat_priv->mcast.mla_lock); 925 926 out: 927 batadv_mcast_mla_list_free(&mcast_list); 928 } 929 930 /** 931 * batadv_mcast_mla_update() - update the own MLAs 932 * @work: kernel work struct 933 * 934 * Updates the own multicast listener announcements in the translation 935 * table as well as the own, announced multicast tvlv container. 936 * 937 * In the end, reschedules the work timer. 938 */ 939 static void batadv_mcast_mla_update(struct work_struct *work) 940 { 941 struct delayed_work *delayed_work; 942 struct batadv_priv_mcast *priv_mcast; 943 struct batadv_priv *bat_priv; 944 945 delayed_work = to_delayed_work(work); 946 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work); 947 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast); 948 949 __batadv_mcast_mla_update(bat_priv); 950 batadv_mcast_start_timer(bat_priv); 951 } 952 953 /** 954 * batadv_mcast_is_report_ipv4() - check for IGMP reports 955 * @skb: the ethernet frame destined for the mesh 956 * 957 * This call might reallocate skb data. 958 * 959 * Checks whether the given frame is a valid IGMP report. 960 * 961 * Return: If so then true, otherwise false. 962 */ 963 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb) 964 { 965 if (ip_mc_check_igmp(skb) < 0) 966 return false; 967 968 switch (igmp_hdr(skb)->type) { 969 case IGMP_HOST_MEMBERSHIP_REPORT: 970 case IGMPV2_HOST_MEMBERSHIP_REPORT: 971 case IGMPV3_HOST_MEMBERSHIP_REPORT: 972 return true; 973 } 974 975 return false; 976 } 977 978 /** 979 * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding 980 * potential 981 * @bat_priv: the bat priv with all the soft interface information 982 * @skb: the IPv4 packet to check 983 * @is_unsnoopable: stores whether the destination is snoopable 984 * @is_routable: stores whether the destination is routable 985 * 986 * Checks whether the given IPv4 packet has the potential to be forwarded with a 987 * mode more optimal than classic flooding. 988 * 989 * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory 990 * allocation failure. 991 */ 992 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv, 993 struct sk_buff *skb, 994 bool *is_unsnoopable, 995 int *is_routable) 996 { 997 struct iphdr *iphdr; 998 999 /* We might fail due to out-of-memory -> drop it */ 1000 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr))) 1001 return -ENOMEM; 1002 1003 if (batadv_mcast_is_report_ipv4(skb)) 1004 return -EINVAL; 1005 1006 iphdr = ip_hdr(skb); 1007 1008 /* link-local multicast listeners behind a bridge are 1009 * not snoopable (see RFC4541, section 2.1.2.2) 1010 */ 1011 if (ipv4_is_local_multicast(iphdr->daddr)) 1012 *is_unsnoopable = true; 1013 else 1014 *is_routable = ETH_P_IP; 1015 1016 return 0; 1017 } 1018 1019 /** 1020 * batadv_mcast_is_report_ipv6() - check for MLD reports 1021 * @skb: the ethernet frame destined for the mesh 1022 * 1023 * This call might reallocate skb data. 1024 * 1025 * Checks whether the given frame is a valid MLD report. 1026 * 1027 * Return: If so then true, otherwise false. 1028 */ 1029 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb) 1030 { 1031 if (ipv6_mc_check_mld(skb) < 0) 1032 return false; 1033 1034 switch (icmp6_hdr(skb)->icmp6_type) { 1035 case ICMPV6_MGM_REPORT: 1036 case ICMPV6_MLD2_REPORT: 1037 return true; 1038 } 1039 1040 return false; 1041 } 1042 1043 /** 1044 * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding 1045 * potential 1046 * @bat_priv: the bat priv with all the soft interface information 1047 * @skb: the IPv6 packet to check 1048 * @is_unsnoopable: stores whether the destination is snoopable 1049 * @is_routable: stores whether the destination is routable 1050 * 1051 * Checks whether the given IPv6 packet has the potential to be forwarded with a 1052 * mode more optimal than classic flooding. 1053 * 1054 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1055 */ 1056 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv, 1057 struct sk_buff *skb, 1058 bool *is_unsnoopable, 1059 int *is_routable) 1060 { 1061 struct ipv6hdr *ip6hdr; 1062 1063 /* We might fail due to out-of-memory -> drop it */ 1064 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr))) 1065 return -ENOMEM; 1066 1067 if (batadv_mcast_is_report_ipv6(skb)) 1068 return -EINVAL; 1069 1070 ip6hdr = ipv6_hdr(skb); 1071 1072 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL) 1073 return -EINVAL; 1074 1075 /* link-local-all-nodes multicast listeners behind a bridge are 1076 * not snoopable (see RFC4541, section 3, paragraph 3) 1077 */ 1078 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr)) 1079 *is_unsnoopable = true; 1080 else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL) 1081 *is_routable = ETH_P_IPV6; 1082 1083 return 0; 1084 } 1085 1086 /** 1087 * batadv_mcast_forw_mode_check() - check for optimized forwarding potential 1088 * @bat_priv: the bat priv with all the soft interface information 1089 * @skb: the multicast frame to check 1090 * @is_unsnoopable: stores whether the destination is snoopable 1091 * @is_routable: stores whether the destination is routable 1092 * 1093 * Checks whether the given multicast ethernet frame has the potential to be 1094 * forwarded with a mode more optimal than classic flooding. 1095 * 1096 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory 1097 */ 1098 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv, 1099 struct sk_buff *skb, 1100 bool *is_unsnoopable, 1101 int *is_routable) 1102 { 1103 struct ethhdr *ethhdr = eth_hdr(skb); 1104 1105 if (!atomic_read(&bat_priv->multicast_mode)) 1106 return -EINVAL; 1107 1108 switch (ntohs(ethhdr->h_proto)) { 1109 case ETH_P_IP: 1110 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb, 1111 is_unsnoopable, 1112 is_routable); 1113 case ETH_P_IPV6: 1114 if (!IS_ENABLED(CONFIG_IPV6)) 1115 return -EINVAL; 1116 1117 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb, 1118 is_unsnoopable, 1119 is_routable); 1120 default: 1121 return -EINVAL; 1122 } 1123 } 1124 1125 /** 1126 * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast 1127 * interest 1128 * @bat_priv: the bat priv with all the soft interface information 1129 * @ethhdr: ethernet header of a packet 1130 * 1131 * Return: the number of nodes which want all IPv4 multicast traffic if the 1132 * given ethhdr is from an IPv4 packet or the number of nodes which want all 1133 * IPv6 traffic if it matches an IPv6 packet. 1134 */ 1135 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv, 1136 struct ethhdr *ethhdr) 1137 { 1138 switch (ntohs(ethhdr->h_proto)) { 1139 case ETH_P_IP: 1140 return atomic_read(&bat_priv->mcast.num_want_all_ipv4); 1141 case ETH_P_IPV6: 1142 return atomic_read(&bat_priv->mcast.num_want_all_ipv6); 1143 default: 1144 /* we shouldn't be here... */ 1145 return 0; 1146 } 1147 } 1148 1149 /** 1150 * batadv_mcast_forw_rtr_count() - count nodes with a multicast router 1151 * @bat_priv: the bat priv with all the soft interface information 1152 * @protocol: the ethernet protocol type to count multicast routers for 1153 * 1154 * Return: the number of nodes which want all routable IPv4 multicast traffic 1155 * if the protocol is ETH_P_IP or the number of nodes which want all routable 1156 * IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0. 1157 */ 1158 1159 static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv, 1160 int protocol) 1161 { 1162 switch (protocol) { 1163 case ETH_P_IP: 1164 return atomic_read(&bat_priv->mcast.num_want_all_rtr4); 1165 case ETH_P_IPV6: 1166 return atomic_read(&bat_priv->mcast.num_want_all_rtr6); 1167 default: 1168 return 0; 1169 } 1170 } 1171 1172 /** 1173 * batadv_mcast_forw_mode_by_count() - get forwarding mode by count 1174 * @bat_priv: the bat priv with all the soft interface information 1175 * @skb: the multicast packet to check 1176 * @vid: the vlan identifier 1177 * @is_routable: stores whether the destination is routable 1178 * @count: the number of originators the multicast packet need to be sent to 1179 * 1180 * For a multicast packet with multiple destination originators, checks which 1181 * mode to use. For BATADV_FORW_MCAST it also encapsulates the packet with a 1182 * complete batman-adv multicast header. 1183 * 1184 * Return: 1185 * BATADV_FORW_MCAST: If all nodes have multicast packet routing 1186 * capabilities and an MTU >= 1280 on all hard interfaces (including us) 1187 * and the encapsulated multicast packet with all destination addresses 1188 * would still fit into an 1280 bytes batman-adv multicast packet 1189 * (excluding the outer ethernet frame) and we could successfully push 1190 * the full batman-adv multicast packet header. 1191 * BATADV_FORW_UCASTS: If the packet cannot be sent in a batman-adv 1192 * multicast packet and the amount of batman-adv unicast packets needed 1193 * is smaller or equal to the configured multicast fanout. 1194 * BATADV_FORW_BCAST: Otherwise. 1195 */ 1196 static enum batadv_forw_mode 1197 batadv_mcast_forw_mode_by_count(struct batadv_priv *bat_priv, 1198 struct sk_buff *skb, unsigned short vid, 1199 int is_routable, int count) 1200 { 1201 unsigned int mcast_hdrlen = batadv_mcast_forw_packet_hdrlen(count); 1202 u8 own_tvlv_flags = bat_priv->mcast.mla_flags.tvlv_flags; 1203 1204 if (!atomic_read(&bat_priv->mcast.num_no_mc_ptype_capa) && 1205 own_tvlv_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA && 1206 skb->len + mcast_hdrlen <= IPV6_MIN_MTU && 1207 batadv_mcast_forw_push(bat_priv, skb, vid, is_routable, count)) 1208 return BATADV_FORW_MCAST; 1209 1210 if (count <= atomic_read(&bat_priv->multicast_fanout)) 1211 return BATADV_FORW_UCASTS; 1212 1213 return BATADV_FORW_BCAST; 1214 } 1215 1216 /** 1217 * batadv_mcast_forw_mode() - check on how to forward a multicast packet 1218 * @bat_priv: the bat priv with all the soft interface information 1219 * @skb: the multicast packet to check 1220 * @vid: the vlan identifier 1221 * @is_routable: stores whether the destination is routable 1222 * 1223 * Return: The forwarding mode as enum batadv_forw_mode. 1224 */ 1225 enum batadv_forw_mode 1226 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, 1227 unsigned short vid, int *is_routable) 1228 { 1229 int ret, tt_count, ip_count, unsnoop_count, total_count; 1230 bool is_unsnoopable = false; 1231 struct ethhdr *ethhdr; 1232 int rtr_count = 0; 1233 1234 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable, 1235 is_routable); 1236 if (ret == -ENOMEM) 1237 return BATADV_FORW_NONE; 1238 else if (ret < 0) 1239 return BATADV_FORW_BCAST; 1240 1241 ethhdr = eth_hdr(skb); 1242 1243 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest, 1244 BATADV_NO_FLAGS); 1245 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr); 1246 unsnoop_count = !is_unsnoopable ? 0 : 1247 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables); 1248 rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable); 1249 1250 total_count = tt_count + ip_count + unsnoop_count + rtr_count; 1251 1252 if (!total_count) 1253 return BATADV_FORW_NONE; 1254 else if (unsnoop_count) 1255 return BATADV_FORW_BCAST; 1256 1257 return batadv_mcast_forw_mode_by_count(bat_priv, skb, vid, *is_routable, 1258 total_count); 1259 } 1260 1261 /** 1262 * batadv_mcast_forw_send_orig() - send a multicast packet to an originator 1263 * @bat_priv: the bat priv with all the soft interface information 1264 * @skb: the multicast packet to send 1265 * @vid: the vlan identifier 1266 * @orig_node: the originator to send the packet to 1267 * 1268 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 1269 */ 1270 static int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, 1271 struct sk_buff *skb, 1272 unsigned short vid, 1273 struct batadv_orig_node *orig_node) 1274 { 1275 /* Avoid sending multicast-in-unicast packets to other BLA 1276 * gateways - they already got the frame from the LAN side 1277 * we share with them. 1278 * TODO: Refactor to take BLA into account earlier, to avoid 1279 * reducing the mcast_fanout count. 1280 */ 1281 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) { 1282 dev_kfree_skb(skb); 1283 return NET_XMIT_SUCCESS; 1284 } 1285 1286 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, 1287 orig_node, vid); 1288 } 1289 1290 /** 1291 * batadv_mcast_forw_tt() - forwards a packet to multicast listeners 1292 * @bat_priv: the bat priv with all the soft interface information 1293 * @skb: the multicast packet to transmit 1294 * @vid: the vlan identifier 1295 * 1296 * Sends copies of a frame with multicast destination to any multicast 1297 * listener registered in the translation table. A transmission is performed 1298 * via a batman-adv unicast packet for each such destination node. 1299 * 1300 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1301 * otherwise. 1302 */ 1303 static int 1304 batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, 1305 unsigned short vid) 1306 { 1307 int ret = NET_XMIT_SUCCESS; 1308 struct sk_buff *newskb; 1309 1310 struct batadv_tt_orig_list_entry *orig_entry; 1311 1312 struct batadv_tt_global_entry *tt_global; 1313 const u8 *addr = eth_hdr(skb)->h_dest; 1314 1315 tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid); 1316 if (!tt_global) 1317 goto out; 1318 1319 rcu_read_lock(); 1320 hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) { 1321 newskb = skb_copy(skb, GFP_ATOMIC); 1322 if (!newskb) { 1323 ret = NET_XMIT_DROP; 1324 break; 1325 } 1326 1327 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, 1328 orig_entry->orig_node); 1329 } 1330 rcu_read_unlock(); 1331 1332 batadv_tt_global_entry_put(tt_global); 1333 1334 out: 1335 return ret; 1336 } 1337 1338 /** 1339 * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4 1340 * @bat_priv: the bat priv with all the soft interface information 1341 * @skb: the multicast packet to transmit 1342 * @vid: the vlan identifier 1343 * 1344 * Sends copies of a frame with multicast destination to any node with a 1345 * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a 1346 * batman-adv unicast packet for each such destination node. 1347 * 1348 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1349 * otherwise. 1350 */ 1351 static int 1352 batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv, 1353 struct sk_buff *skb, unsigned short vid) 1354 { 1355 struct batadv_orig_node *orig_node; 1356 int ret = NET_XMIT_SUCCESS; 1357 struct sk_buff *newskb; 1358 1359 rcu_read_lock(); 1360 hlist_for_each_entry_rcu(orig_node, 1361 &bat_priv->mcast.want_all_ipv4_list, 1362 mcast_want_all_ipv4_node) { 1363 newskb = skb_copy(skb, GFP_ATOMIC); 1364 if (!newskb) { 1365 ret = NET_XMIT_DROP; 1366 break; 1367 } 1368 1369 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1370 } 1371 rcu_read_unlock(); 1372 return ret; 1373 } 1374 1375 /** 1376 * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6 1377 * @bat_priv: the bat priv with all the soft interface information 1378 * @skb: The multicast packet to transmit 1379 * @vid: the vlan identifier 1380 * 1381 * Sends copies of a frame with multicast destination to any node with a 1382 * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a 1383 * batman-adv unicast packet for each such destination node. 1384 * 1385 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1386 * otherwise. 1387 */ 1388 static int 1389 batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv, 1390 struct sk_buff *skb, unsigned short vid) 1391 { 1392 struct batadv_orig_node *orig_node; 1393 int ret = NET_XMIT_SUCCESS; 1394 struct sk_buff *newskb; 1395 1396 rcu_read_lock(); 1397 hlist_for_each_entry_rcu(orig_node, 1398 &bat_priv->mcast.want_all_ipv6_list, 1399 mcast_want_all_ipv6_node) { 1400 newskb = skb_copy(skb, GFP_ATOMIC); 1401 if (!newskb) { 1402 ret = NET_XMIT_DROP; 1403 break; 1404 } 1405 1406 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1407 } 1408 rcu_read_unlock(); 1409 return ret; 1410 } 1411 1412 /** 1413 * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list 1414 * @bat_priv: the bat priv with all the soft interface information 1415 * @skb: the multicast packet to transmit 1416 * @vid: the vlan identifier 1417 * 1418 * Sends copies of a frame with multicast destination to any node with a 1419 * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A 1420 * transmission is performed via a batman-adv unicast packet for each such 1421 * destination node. 1422 * 1423 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1424 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1425 */ 1426 static int 1427 batadv_mcast_forw_want_all(struct batadv_priv *bat_priv, 1428 struct sk_buff *skb, unsigned short vid) 1429 { 1430 switch (ntohs(eth_hdr(skb)->h_proto)) { 1431 case ETH_P_IP: 1432 return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid); 1433 case ETH_P_IPV6: 1434 return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid); 1435 default: 1436 /* we shouldn't be here... */ 1437 return NET_XMIT_DROP; 1438 } 1439 } 1440 1441 /** 1442 * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4 1443 * @bat_priv: the bat priv with all the soft interface information 1444 * @skb: the multicast packet to transmit 1445 * @vid: the vlan identifier 1446 * 1447 * Sends copies of a frame with multicast destination to any node with a 1448 * BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a 1449 * batman-adv unicast packet for each such destination node. 1450 * 1451 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1452 * otherwise. 1453 */ 1454 static int 1455 batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv, 1456 struct sk_buff *skb, unsigned short vid) 1457 { 1458 struct batadv_orig_node *orig_node; 1459 int ret = NET_XMIT_SUCCESS; 1460 struct sk_buff *newskb; 1461 1462 rcu_read_lock(); 1463 hlist_for_each_entry_rcu(orig_node, 1464 &bat_priv->mcast.want_all_rtr4_list, 1465 mcast_want_all_rtr4_node) { 1466 newskb = skb_copy(skb, GFP_ATOMIC); 1467 if (!newskb) { 1468 ret = NET_XMIT_DROP; 1469 break; 1470 } 1471 1472 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1473 } 1474 rcu_read_unlock(); 1475 return ret; 1476 } 1477 1478 /** 1479 * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6 1480 * @bat_priv: the bat priv with all the soft interface information 1481 * @skb: The multicast packet to transmit 1482 * @vid: the vlan identifier 1483 * 1484 * Sends copies of a frame with multicast destination to any node with a 1485 * BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a 1486 * batman-adv unicast packet for each such destination node. 1487 * 1488 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS 1489 * otherwise. 1490 */ 1491 static int 1492 batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv, 1493 struct sk_buff *skb, unsigned short vid) 1494 { 1495 struct batadv_orig_node *orig_node; 1496 int ret = NET_XMIT_SUCCESS; 1497 struct sk_buff *newskb; 1498 1499 rcu_read_lock(); 1500 hlist_for_each_entry_rcu(orig_node, 1501 &bat_priv->mcast.want_all_rtr6_list, 1502 mcast_want_all_rtr6_node) { 1503 newskb = skb_copy(skb, GFP_ATOMIC); 1504 if (!newskb) { 1505 ret = NET_XMIT_DROP; 1506 break; 1507 } 1508 1509 batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); 1510 } 1511 rcu_read_unlock(); 1512 return ret; 1513 } 1514 1515 /** 1516 * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list 1517 * @bat_priv: the bat priv with all the soft interface information 1518 * @skb: the multicast packet to transmit 1519 * @vid: the vlan identifier 1520 * 1521 * Sends copies of a frame with multicast destination to any node with a 1522 * BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A 1523 * transmission is performed via a batman-adv unicast packet for each such 1524 * destination node. 1525 * 1526 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1527 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1528 */ 1529 static int 1530 batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, 1531 struct sk_buff *skb, unsigned short vid) 1532 { 1533 switch (ntohs(eth_hdr(skb)->h_proto)) { 1534 case ETH_P_IP: 1535 return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid); 1536 case ETH_P_IPV6: 1537 return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid); 1538 default: 1539 /* we shouldn't be here... */ 1540 return NET_XMIT_DROP; 1541 } 1542 } 1543 1544 /** 1545 * batadv_mcast_forw_send() - send packet to any detected multicast recipient 1546 * @bat_priv: the bat priv with all the soft interface information 1547 * @skb: the multicast packet to transmit 1548 * @vid: the vlan identifier 1549 * @is_routable: stores whether the destination is routable 1550 * 1551 * Sends copies of a frame with multicast destination to any node that signaled 1552 * interest in it, that is either via the translation table or the according 1553 * want-all flags. A transmission is performed via a batman-adv unicast packet 1554 * for each such destination node. 1555 * 1556 * The given skb is consumed/freed. 1557 * 1558 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family 1559 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. 1560 */ 1561 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, 1562 unsigned short vid, int is_routable) 1563 { 1564 int ret; 1565 1566 ret = batadv_mcast_forw_tt(bat_priv, skb, vid); 1567 if (ret != NET_XMIT_SUCCESS) { 1568 kfree_skb(skb); 1569 return ret; 1570 } 1571 1572 ret = batadv_mcast_forw_want_all(bat_priv, skb, vid); 1573 if (ret != NET_XMIT_SUCCESS) { 1574 kfree_skb(skb); 1575 return ret; 1576 } 1577 1578 if (!is_routable) 1579 goto skip_mc_router; 1580 1581 ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid); 1582 if (ret != NET_XMIT_SUCCESS) { 1583 kfree_skb(skb); 1584 return ret; 1585 } 1586 1587 skip_mc_router: 1588 consume_skb(skb); 1589 return ret; 1590 } 1591 1592 /** 1593 * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list 1594 * @bat_priv: the bat priv with all the soft interface information 1595 * @orig: the orig_node which multicast state might have changed of 1596 * @mcast_flags: flags indicating the new multicast state 1597 * 1598 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, 1599 * orig, has toggled then this method updates the counter and the list 1600 * accordingly. 1601 * 1602 * Caller needs to hold orig->mcast_handler_lock. 1603 */ 1604 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, 1605 struct batadv_orig_node *orig, 1606 u8 mcast_flags) 1607 { 1608 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node; 1609 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list; 1610 1611 lockdep_assert_held(&orig->mcast_handler_lock); 1612 1613 /* switched from flag unset to set */ 1614 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && 1615 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) { 1616 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables); 1617 1618 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1619 /* flag checks above + mcast_handler_lock prevents this */ 1620 WARN_ON(!hlist_unhashed(node)); 1621 1622 hlist_add_head_rcu(node, head); 1623 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1624 /* switched from flag set to unset */ 1625 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) && 1626 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) { 1627 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables); 1628 1629 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1630 /* flag checks above + mcast_handler_lock prevents this */ 1631 WARN_ON(hlist_unhashed(node)); 1632 1633 hlist_del_init_rcu(node); 1634 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1635 } 1636 } 1637 1638 /** 1639 * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list 1640 * @bat_priv: the bat priv with all the soft interface information 1641 * @orig: the orig_node which multicast state might have changed of 1642 * @mcast_flags: flags indicating the new multicast state 1643 * 1644 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has 1645 * toggled then this method updates the counter and the list accordingly. 1646 * 1647 * Caller needs to hold orig->mcast_handler_lock. 1648 */ 1649 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, 1650 struct batadv_orig_node *orig, 1651 u8 mcast_flags) 1652 { 1653 struct hlist_node *node = &orig->mcast_want_all_ipv4_node; 1654 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list; 1655 1656 lockdep_assert_held(&orig->mcast_handler_lock); 1657 1658 /* switched from flag unset to set */ 1659 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 && 1660 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) { 1661 atomic_inc(&bat_priv->mcast.num_want_all_ipv4); 1662 1663 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1664 /* flag checks above + mcast_handler_lock prevents this */ 1665 WARN_ON(!hlist_unhashed(node)); 1666 1667 hlist_add_head_rcu(node, head); 1668 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1669 /* switched from flag set to unset */ 1670 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) && 1671 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) { 1672 atomic_dec(&bat_priv->mcast.num_want_all_ipv4); 1673 1674 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1675 /* flag checks above + mcast_handler_lock prevents this */ 1676 WARN_ON(hlist_unhashed(node)); 1677 1678 hlist_del_init_rcu(node); 1679 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1680 } 1681 } 1682 1683 /** 1684 * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list 1685 * @bat_priv: the bat priv with all the soft interface information 1686 * @orig: the orig_node which multicast state might have changed of 1687 * @mcast_flags: flags indicating the new multicast state 1688 * 1689 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has 1690 * toggled then this method updates the counter and the list accordingly. 1691 * 1692 * Caller needs to hold orig->mcast_handler_lock. 1693 */ 1694 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, 1695 struct batadv_orig_node *orig, 1696 u8 mcast_flags) 1697 { 1698 struct hlist_node *node = &orig->mcast_want_all_ipv6_node; 1699 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list; 1700 1701 lockdep_assert_held(&orig->mcast_handler_lock); 1702 1703 /* switched from flag unset to set */ 1704 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 && 1705 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) { 1706 atomic_inc(&bat_priv->mcast.num_want_all_ipv6); 1707 1708 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1709 /* flag checks above + mcast_handler_lock prevents this */ 1710 WARN_ON(!hlist_unhashed(node)); 1711 1712 hlist_add_head_rcu(node, head); 1713 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1714 /* switched from flag set to unset */ 1715 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) && 1716 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) { 1717 atomic_dec(&bat_priv->mcast.num_want_all_ipv6); 1718 1719 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1720 /* flag checks above + mcast_handler_lock prevents this */ 1721 WARN_ON(hlist_unhashed(node)); 1722 1723 hlist_del_init_rcu(node); 1724 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1725 } 1726 } 1727 1728 /** 1729 * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list 1730 * @bat_priv: the bat priv with all the soft interface information 1731 * @orig: the orig_node which multicast state might have changed of 1732 * @mcast_flags: flags indicating the new multicast state 1733 * 1734 * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has 1735 * toggled then this method updates the counter and the list accordingly. 1736 * 1737 * Caller needs to hold orig->mcast_handler_lock. 1738 */ 1739 static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv, 1740 struct batadv_orig_node *orig, 1741 u8 mcast_flags) 1742 { 1743 struct hlist_node *node = &orig->mcast_want_all_rtr4_node; 1744 struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list; 1745 1746 lockdep_assert_held(&orig->mcast_handler_lock); 1747 1748 /* switched from flag set to unset */ 1749 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) && 1750 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) { 1751 atomic_inc(&bat_priv->mcast.num_want_all_rtr4); 1752 1753 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1754 /* flag checks above + mcast_handler_lock prevents this */ 1755 WARN_ON(!hlist_unhashed(node)); 1756 1757 hlist_add_head_rcu(node, head); 1758 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1759 /* switched from flag unset to set */ 1760 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 && 1761 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) { 1762 atomic_dec(&bat_priv->mcast.num_want_all_rtr4); 1763 1764 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1765 /* flag checks above + mcast_handler_lock prevents this */ 1766 WARN_ON(hlist_unhashed(node)); 1767 1768 hlist_del_init_rcu(node); 1769 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1770 } 1771 } 1772 1773 /** 1774 * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list 1775 * @bat_priv: the bat priv with all the soft interface information 1776 * @orig: the orig_node which multicast state might have changed of 1777 * @mcast_flags: flags indicating the new multicast state 1778 * 1779 * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has 1780 * toggled then this method updates the counter and the list accordingly. 1781 * 1782 * Caller needs to hold orig->mcast_handler_lock. 1783 */ 1784 static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv, 1785 struct batadv_orig_node *orig, 1786 u8 mcast_flags) 1787 { 1788 struct hlist_node *node = &orig->mcast_want_all_rtr6_node; 1789 struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list; 1790 1791 lockdep_assert_held(&orig->mcast_handler_lock); 1792 1793 /* switched from flag set to unset */ 1794 if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) && 1795 orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) { 1796 atomic_inc(&bat_priv->mcast.num_want_all_rtr6); 1797 1798 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1799 /* flag checks above + mcast_handler_lock prevents this */ 1800 WARN_ON(!hlist_unhashed(node)); 1801 1802 hlist_add_head_rcu(node, head); 1803 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1804 /* switched from flag unset to set */ 1805 } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 && 1806 !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) { 1807 atomic_dec(&bat_priv->mcast.num_want_all_rtr6); 1808 1809 spin_lock_bh(&bat_priv->mcast.want_lists_lock); 1810 /* flag checks above + mcast_handler_lock prevents this */ 1811 WARN_ON(hlist_unhashed(node)); 1812 1813 hlist_del_init_rcu(node); 1814 spin_unlock_bh(&bat_priv->mcast.want_lists_lock); 1815 } 1816 } 1817 1818 /** 1819 * batadv_mcast_have_mc_ptype_update() - update multicast packet type counter 1820 * @bat_priv: the bat priv with all the soft interface information 1821 * @orig: the orig_node which multicast state might have changed of 1822 * @mcast_flags: flags indicating the new multicast state 1823 * 1824 * If the BATADV_MCAST_HAVE_MC_PTYPE_CAPA flag of this originator, orig, has 1825 * toggled then this method updates the counter accordingly. 1826 */ 1827 static void batadv_mcast_have_mc_ptype_update(struct batadv_priv *bat_priv, 1828 struct batadv_orig_node *orig, 1829 u8 mcast_flags) 1830 { 1831 lockdep_assert_held(&orig->mcast_handler_lock); 1832 1833 /* switched from flag set to unset */ 1834 if (!(mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA) && 1835 orig->mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA) 1836 atomic_inc(&bat_priv->mcast.num_no_mc_ptype_capa); 1837 /* switched from flag unset to set */ 1838 else if (mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA && 1839 !(orig->mcast_flags & BATADV_MCAST_HAVE_MC_PTYPE_CAPA)) 1840 atomic_dec(&bat_priv->mcast.num_no_mc_ptype_capa); 1841 } 1842 1843 /** 1844 * batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV 1845 * @enabled: whether the originator has multicast TVLV support enabled 1846 * @tvlv_value: tvlv buffer containing the multicast flags 1847 * @tvlv_value_len: tvlv buffer length 1848 * 1849 * Return: multicast flags for the given tvlv buffer 1850 */ 1851 static u8 1852 batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len) 1853 { 1854 u8 mcast_flags = BATADV_NO_FLAGS; 1855 1856 if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags)) 1857 mcast_flags = *(u8 *)tvlv_value; 1858 1859 if (!enabled) { 1860 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4; 1861 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6; 1862 } 1863 1864 /* remove redundant flags to avoid sending duplicate packets later */ 1865 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) 1866 mcast_flags |= BATADV_MCAST_WANT_NO_RTR4; 1867 1868 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) 1869 mcast_flags |= BATADV_MCAST_WANT_NO_RTR6; 1870 1871 return mcast_flags; 1872 } 1873 1874 /** 1875 * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container 1876 * @bat_priv: the bat priv with all the soft interface information 1877 * @orig: the orig_node of the ogm 1878 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) 1879 * @tvlv_value: tvlv buffer containing the multicast data 1880 * @tvlv_value_len: tvlv buffer length 1881 */ 1882 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv, 1883 struct batadv_orig_node *orig, 1884 u8 flags, 1885 void *tvlv_value, 1886 u16 tvlv_value_len) 1887 { 1888 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1889 u8 mcast_flags; 1890 1891 mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled, 1892 tvlv_value, tvlv_value_len); 1893 1894 spin_lock_bh(&orig->mcast_handler_lock); 1895 1896 if (orig_mcast_enabled && 1897 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 1898 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 1899 } else if (!orig_mcast_enabled && 1900 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { 1901 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); 1902 } 1903 1904 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized); 1905 1906 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags); 1907 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags); 1908 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags); 1909 batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags); 1910 batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags); 1911 batadv_mcast_have_mc_ptype_update(bat_priv, orig, mcast_flags); 1912 1913 orig->mcast_flags = mcast_flags; 1914 spin_unlock_bh(&orig->mcast_handler_lock); 1915 } 1916 1917 /** 1918 * batadv_mcast_init() - initialize the multicast optimizations structures 1919 * @bat_priv: the bat priv with all the soft interface information 1920 */ 1921 void batadv_mcast_init(struct batadv_priv *bat_priv) 1922 { 1923 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler, 1924 NULL, NULL, BATADV_TVLV_MCAST, 2, 1925 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1926 batadv_tvlv_handler_register(bat_priv, NULL, NULL, 1927 batadv_mcast_forw_tracker_tvlv_handler, 1928 BATADV_TVLV_MCAST_TRACKER, 1, 1929 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1930 1931 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update); 1932 batadv_mcast_start_timer(bat_priv); 1933 } 1934 1935 /** 1936 * batadv_mcast_mesh_info_put() - put multicast info into a netlink message 1937 * @msg: buffer for the message 1938 * @bat_priv: the bat priv with all the soft interface information 1939 * 1940 * Return: 0 or error code. 1941 */ 1942 int batadv_mcast_mesh_info_put(struct sk_buff *msg, 1943 struct batadv_priv *bat_priv) 1944 { 1945 u32 flags = bat_priv->mcast.mla_flags.tvlv_flags; 1946 u32 flags_priv = BATADV_NO_FLAGS; 1947 1948 if (bat_priv->mcast.mla_flags.bridged) { 1949 flags_priv |= BATADV_MCAST_FLAGS_BRIDGED; 1950 1951 if (bat_priv->mcast.mla_flags.querier_ipv4.exists) 1952 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS; 1953 if (bat_priv->mcast.mla_flags.querier_ipv6.exists) 1954 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS; 1955 if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing) 1956 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING; 1957 if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing) 1958 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING; 1959 } 1960 1961 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) || 1962 nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv)) 1963 return -EMSGSIZE; 1964 1965 return 0; 1966 } 1967 1968 /** 1969 * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table 1970 * to a netlink socket 1971 * @msg: buffer for the message 1972 * @portid: netlink port 1973 * @cb: Control block containing additional options 1974 * @orig_node: originator to dump the multicast flags of 1975 * 1976 * Return: 0 or error code. 1977 */ 1978 static int 1979 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, 1980 struct netlink_callback *cb, 1981 struct batadv_orig_node *orig_node) 1982 { 1983 void *hdr; 1984 1985 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, 1986 &batadv_netlink_family, NLM_F_MULTI, 1987 BATADV_CMD_GET_MCAST_FLAGS); 1988 if (!hdr) 1989 return -ENOBUFS; 1990 1991 genl_dump_check_consistent(cb, hdr); 1992 1993 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, 1994 orig_node->orig)) { 1995 genlmsg_cancel(msg, hdr); 1996 return -EMSGSIZE; 1997 } 1998 1999 if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2000 &orig_node->capabilities)) { 2001 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, 2002 orig_node->mcast_flags)) { 2003 genlmsg_cancel(msg, hdr); 2004 return -EMSGSIZE; 2005 } 2006 } 2007 2008 genlmsg_end(msg, hdr); 2009 return 0; 2010 } 2011 2012 /** 2013 * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags 2014 * table to a netlink socket 2015 * @msg: buffer for the message 2016 * @portid: netlink port 2017 * @cb: Control block containing additional options 2018 * @hash: hash to dump 2019 * @bucket: bucket index to dump 2020 * @idx_skip: How many entries to skip 2021 * 2022 * Return: 0 or error code. 2023 */ 2024 static int 2025 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, 2026 struct netlink_callback *cb, 2027 struct batadv_hashtable *hash, 2028 unsigned int bucket, long *idx_skip) 2029 { 2030 struct batadv_orig_node *orig_node; 2031 long idx = 0; 2032 2033 spin_lock_bh(&hash->list_locks[bucket]); 2034 cb->seq = atomic_read(&hash->generation) << 1 | 1; 2035 2036 hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) { 2037 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, 2038 &orig_node->capa_initialized)) 2039 continue; 2040 2041 if (idx < *idx_skip) 2042 goto skip; 2043 2044 if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) { 2045 spin_unlock_bh(&hash->list_locks[bucket]); 2046 *idx_skip = idx; 2047 2048 return -EMSGSIZE; 2049 } 2050 2051 skip: 2052 idx++; 2053 } 2054 spin_unlock_bh(&hash->list_locks[bucket]); 2055 2056 return 0; 2057 } 2058 2059 /** 2060 * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2061 * @msg: buffer for the message 2062 * @portid: netlink port 2063 * @cb: Control block containing additional options 2064 * @bat_priv: the bat priv with all the soft interface information 2065 * @bucket: current bucket to dump 2066 * @idx: index in current bucket to the next entry to dump 2067 * 2068 * Return: 0 or error code. 2069 */ 2070 static int 2071 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, 2072 struct netlink_callback *cb, 2073 struct batadv_priv *bat_priv, long *bucket, long *idx) 2074 { 2075 struct batadv_hashtable *hash = bat_priv->orig_hash; 2076 long bucket_tmp = *bucket; 2077 long idx_tmp = *idx; 2078 2079 while (bucket_tmp < hash->size) { 2080 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, 2081 bucket_tmp, &idx_tmp)) 2082 break; 2083 2084 bucket_tmp++; 2085 idx_tmp = 0; 2086 } 2087 2088 *bucket = bucket_tmp; 2089 *idx = idx_tmp; 2090 2091 return msg->len; 2092 } 2093 2094 /** 2095 * batadv_mcast_netlink_get_primary() - get primary interface from netlink 2096 * callback 2097 * @cb: netlink callback structure 2098 * @primary_if: the primary interface pointer to return the result in 2099 * 2100 * Return: 0 or error code. 2101 */ 2102 static int 2103 batadv_mcast_netlink_get_primary(struct netlink_callback *cb, 2104 struct batadv_hard_iface **primary_if) 2105 { 2106 struct batadv_hard_iface *hard_iface = NULL; 2107 struct net *net = sock_net(cb->skb->sk); 2108 struct net_device *soft_iface; 2109 struct batadv_priv *bat_priv; 2110 int ifindex; 2111 int ret = 0; 2112 2113 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); 2114 if (!ifindex) 2115 return -EINVAL; 2116 2117 soft_iface = dev_get_by_index(net, ifindex); 2118 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { 2119 ret = -ENODEV; 2120 goto out; 2121 } 2122 2123 bat_priv = netdev_priv(soft_iface); 2124 2125 hard_iface = batadv_primary_if_get_selected(bat_priv); 2126 if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) { 2127 ret = -ENOENT; 2128 goto out; 2129 } 2130 2131 out: 2132 dev_put(soft_iface); 2133 2134 if (!ret && primary_if) 2135 *primary_if = hard_iface; 2136 else 2137 batadv_hardif_put(hard_iface); 2138 2139 return ret; 2140 } 2141 2142 /** 2143 * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket 2144 * @msg: buffer for the message 2145 * @cb: callback structure containing arguments 2146 * 2147 * Return: message length. 2148 */ 2149 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb) 2150 { 2151 struct batadv_hard_iface *primary_if = NULL; 2152 int portid = NETLINK_CB(cb->skb).portid; 2153 struct batadv_priv *bat_priv; 2154 long *bucket = &cb->args[0]; 2155 long *idx = &cb->args[1]; 2156 int ret; 2157 2158 ret = batadv_mcast_netlink_get_primary(cb, &primary_if); 2159 if (ret) 2160 return ret; 2161 2162 bat_priv = netdev_priv(primary_if->soft_iface); 2163 ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx); 2164 2165 batadv_hardif_put(primary_if); 2166 return ret; 2167 } 2168 2169 /** 2170 * batadv_mcast_free() - free the multicast optimizations structures 2171 * @bat_priv: the bat priv with all the soft interface information 2172 */ 2173 void batadv_mcast_free(struct batadv_priv *bat_priv) 2174 { 2175 cancel_delayed_work_sync(&bat_priv->mcast.work); 2176 2177 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2178 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST_TRACKER, 1); 2179 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2); 2180 2181 /* safely calling outside of worker, as worker was canceled above */ 2182 batadv_mcast_mla_tt_retract(bat_priv, NULL); 2183 } 2184 2185 /** 2186 * batadv_mcast_purge_orig() - reset originator global mcast state modifications 2187 * @orig: the originator which is going to get purged 2188 */ 2189 void batadv_mcast_purge_orig(struct batadv_orig_node *orig) 2190 { 2191 struct batadv_priv *bat_priv = orig->bat_priv; 2192 2193 spin_lock_bh(&orig->mcast_handler_lock); 2194 2195 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 2196 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 2197 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 2198 batadv_mcast_want_rtr4_update(bat_priv, orig, 2199 BATADV_MCAST_WANT_NO_RTR4); 2200 batadv_mcast_want_rtr6_update(bat_priv, orig, 2201 BATADV_MCAST_WANT_NO_RTR6); 2202 batadv_mcast_have_mc_ptype_update(bat_priv, orig, 2203 BATADV_MCAST_HAVE_MC_PTYPE_CAPA); 2204 2205 spin_unlock_bh(&orig->mcast_handler_lock); 2206 } 2207
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.