1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright Gavin Shan, IBM Corporation 2016. 4 */ 5 6 #include <linux/module.h> 7 #include <linux/kernel.h> 8 #include <linux/init.h> 9 #include <linux/netdevice.h> 10 #include <linux/skbuff.h> 11 #include <linux/of.h> 12 #include <linux/platform_device.h> 13 14 #include <net/ncsi.h> 15 #include <net/net_namespace.h> 16 #include <net/sock.h> 17 #include <net/addrconf.h> 18 #include <net/ipv6.h> 19 #include <net/genetlink.h> 20 21 #include "internal.h" 22 #include "ncsi-pkt.h" 23 #include "ncsi-netlink.h" 24 25 LIST_HEAD(ncsi_dev_list); 26 DEFINE_SPINLOCK(ncsi_dev_lock); 27 28 bool ncsi_channel_has_link(struct ncsi_channel *channel) 29 { 30 return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1); 31 } 32 33 bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp, 34 struct ncsi_channel *channel) 35 { 36 struct ncsi_package *np; 37 struct ncsi_channel *nc; 38 39 NCSI_FOR_EACH_PACKAGE(ndp, np) 40 NCSI_FOR_EACH_CHANNEL(np, nc) { 41 if (nc == channel) 42 continue; 43 if (nc->state == NCSI_CHANNEL_ACTIVE && 44 ncsi_channel_has_link(nc)) 45 return false; 46 } 47 48 return true; 49 } 50 51 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down) 52 { 53 struct ncsi_dev *nd = &ndp->ndev; 54 struct ncsi_package *np; 55 struct ncsi_channel *nc; 56 unsigned long flags; 57 58 nd->state = ncsi_dev_state_functional; 59 if (force_down) { 60 nd->link_up = 0; 61 goto report; 62 } 63 64 nd->link_up = 0; 65 NCSI_FOR_EACH_PACKAGE(ndp, np) { 66 NCSI_FOR_EACH_CHANNEL(np, nc) { 67 spin_lock_irqsave(&nc->lock, flags); 68 69 if (!list_empty(&nc->link) || 70 nc->state != NCSI_CHANNEL_ACTIVE) { 71 spin_unlock_irqrestore(&nc->lock, flags); 72 continue; 73 } 74 75 if (ncsi_channel_has_link(nc)) { 76 spin_unlock_irqrestore(&nc->lock, flags); 77 nd->link_up = 1; 78 goto report; 79 } 80 81 spin_unlock_irqrestore(&nc->lock, flags); 82 } 83 } 84 85 report: 86 nd->handler(nd); 87 } 88 89 static void ncsi_channel_monitor(struct timer_list *t) 90 { 91 struct ncsi_channel *nc = from_timer(nc, t, monitor.timer); 92 struct ncsi_package *np = nc->package; 93 struct ncsi_dev_priv *ndp = np->ndp; 94 struct ncsi_channel_mode *ncm; 95 struct ncsi_cmd_arg nca; 96 bool enabled, chained; 97 unsigned int monitor_state; 98 unsigned long flags; 99 int state, ret; 100 101 spin_lock_irqsave(&nc->lock, flags); 102 state = nc->state; 103 chained = !list_empty(&nc->link); 104 enabled = nc->monitor.enabled; 105 monitor_state = nc->monitor.state; 106 spin_unlock_irqrestore(&nc->lock, flags); 107 108 if (!enabled) 109 return; /* expected race disabling timer */ 110 if (WARN_ON_ONCE(chained)) 111 goto bad_state; 112 113 if (state != NCSI_CHANNEL_INACTIVE && 114 state != NCSI_CHANNEL_ACTIVE) { 115 bad_state: 116 netdev_warn(ndp->ndev.dev, 117 "Bad NCSI monitor state channel %d 0x%x %s queue\n", 118 nc->id, state, chained ? "on" : "off"); 119 spin_lock_irqsave(&nc->lock, flags); 120 nc->monitor.enabled = false; 121 spin_unlock_irqrestore(&nc->lock, flags); 122 return; 123 } 124 125 switch (monitor_state) { 126 case NCSI_CHANNEL_MONITOR_START: 127 case NCSI_CHANNEL_MONITOR_RETRY: 128 nca.ndp = ndp; 129 nca.package = np->id; 130 nca.channel = nc->id; 131 nca.type = NCSI_PKT_CMD_GLS; 132 nca.req_flags = 0; 133 ret = ncsi_xmit_cmd(&nca); 134 if (ret) 135 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n", 136 ret); 137 break; 138 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX: 139 break; 140 default: 141 netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n", 142 nc->id); 143 ncsi_report_link(ndp, true); 144 ndp->flags |= NCSI_DEV_RESHUFFLE; 145 146 ncm = &nc->modes[NCSI_MODE_LINK]; 147 spin_lock_irqsave(&nc->lock, flags); 148 nc->monitor.enabled = false; 149 nc->state = NCSI_CHANNEL_INVISIBLE; 150 ncm->data[2] &= ~0x1; 151 spin_unlock_irqrestore(&nc->lock, flags); 152 153 spin_lock_irqsave(&ndp->lock, flags); 154 nc->state = NCSI_CHANNEL_ACTIVE; 155 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 156 spin_unlock_irqrestore(&ndp->lock, flags); 157 ncsi_process_next_channel(ndp); 158 return; 159 } 160 161 spin_lock_irqsave(&nc->lock, flags); 162 nc->monitor.state++; 163 spin_unlock_irqrestore(&nc->lock, flags); 164 mod_timer(&nc->monitor.timer, jiffies + HZ); 165 } 166 167 void ncsi_start_channel_monitor(struct ncsi_channel *nc) 168 { 169 unsigned long flags; 170 171 spin_lock_irqsave(&nc->lock, flags); 172 WARN_ON_ONCE(nc->monitor.enabled); 173 nc->monitor.enabled = true; 174 nc->monitor.state = NCSI_CHANNEL_MONITOR_START; 175 spin_unlock_irqrestore(&nc->lock, flags); 176 177 mod_timer(&nc->monitor.timer, jiffies + HZ); 178 } 179 180 void ncsi_stop_channel_monitor(struct ncsi_channel *nc) 181 { 182 unsigned long flags; 183 184 spin_lock_irqsave(&nc->lock, flags); 185 if (!nc->monitor.enabled) { 186 spin_unlock_irqrestore(&nc->lock, flags); 187 return; 188 } 189 nc->monitor.enabled = false; 190 spin_unlock_irqrestore(&nc->lock, flags); 191 192 del_timer_sync(&nc->monitor.timer); 193 } 194 195 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np, 196 unsigned char id) 197 { 198 struct ncsi_channel *nc; 199 200 NCSI_FOR_EACH_CHANNEL(np, nc) { 201 if (nc->id == id) 202 return nc; 203 } 204 205 return NULL; 206 } 207 208 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id) 209 { 210 struct ncsi_channel *nc, *tmp; 211 int index; 212 unsigned long flags; 213 214 nc = kzalloc(sizeof(*nc), GFP_ATOMIC); 215 if (!nc) 216 return NULL; 217 218 nc->id = id; 219 nc->package = np; 220 nc->state = NCSI_CHANNEL_INACTIVE; 221 nc->monitor.enabled = false; 222 timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0); 223 spin_lock_init(&nc->lock); 224 INIT_LIST_HEAD(&nc->link); 225 for (index = 0; index < NCSI_CAP_MAX; index++) 226 nc->caps[index].index = index; 227 for (index = 0; index < NCSI_MODE_MAX; index++) 228 nc->modes[index].index = index; 229 230 spin_lock_irqsave(&np->lock, flags); 231 tmp = ncsi_find_channel(np, id); 232 if (tmp) { 233 spin_unlock_irqrestore(&np->lock, flags); 234 kfree(nc); 235 return tmp; 236 } 237 238 list_add_tail_rcu(&nc->node, &np->channels); 239 np->channel_num++; 240 spin_unlock_irqrestore(&np->lock, flags); 241 242 return nc; 243 } 244 245 static void ncsi_remove_channel(struct ncsi_channel *nc) 246 { 247 struct ncsi_package *np = nc->package; 248 unsigned long flags; 249 250 spin_lock_irqsave(&nc->lock, flags); 251 252 /* Release filters */ 253 kfree(nc->mac_filter.addrs); 254 kfree(nc->vlan_filter.vids); 255 256 nc->state = NCSI_CHANNEL_INACTIVE; 257 spin_unlock_irqrestore(&nc->lock, flags); 258 ncsi_stop_channel_monitor(nc); 259 260 /* Remove and free channel */ 261 spin_lock_irqsave(&np->lock, flags); 262 list_del_rcu(&nc->node); 263 np->channel_num--; 264 spin_unlock_irqrestore(&np->lock, flags); 265 266 kfree(nc); 267 } 268 269 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp, 270 unsigned char id) 271 { 272 struct ncsi_package *np; 273 274 NCSI_FOR_EACH_PACKAGE(ndp, np) { 275 if (np->id == id) 276 return np; 277 } 278 279 return NULL; 280 } 281 282 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp, 283 unsigned char id) 284 { 285 struct ncsi_package *np, *tmp; 286 unsigned long flags; 287 288 np = kzalloc(sizeof(*np), GFP_ATOMIC); 289 if (!np) 290 return NULL; 291 292 np->id = id; 293 np->ndp = ndp; 294 spin_lock_init(&np->lock); 295 INIT_LIST_HEAD(&np->channels); 296 np->channel_whitelist = UINT_MAX; 297 298 spin_lock_irqsave(&ndp->lock, flags); 299 tmp = ncsi_find_package(ndp, id); 300 if (tmp) { 301 spin_unlock_irqrestore(&ndp->lock, flags); 302 kfree(np); 303 return tmp; 304 } 305 306 list_add_tail_rcu(&np->node, &ndp->packages); 307 ndp->package_num++; 308 spin_unlock_irqrestore(&ndp->lock, flags); 309 310 return np; 311 } 312 313 void ncsi_remove_package(struct ncsi_package *np) 314 { 315 struct ncsi_dev_priv *ndp = np->ndp; 316 struct ncsi_channel *nc, *tmp; 317 unsigned long flags; 318 319 /* Release all child channels */ 320 list_for_each_entry_safe(nc, tmp, &np->channels, node) 321 ncsi_remove_channel(nc); 322 323 /* Remove and free package */ 324 spin_lock_irqsave(&ndp->lock, flags); 325 list_del_rcu(&np->node); 326 ndp->package_num--; 327 spin_unlock_irqrestore(&ndp->lock, flags); 328 329 kfree(np); 330 } 331 332 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp, 333 unsigned char id, 334 struct ncsi_package **np, 335 struct ncsi_channel **nc) 336 { 337 struct ncsi_package *p; 338 struct ncsi_channel *c; 339 340 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id)); 341 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL; 342 343 if (np) 344 *np = p; 345 if (nc) 346 *nc = c; 347 } 348 349 /* For two consecutive NCSI commands, the packet IDs shouldn't 350 * be same. Otherwise, the bogus response might be replied. So 351 * the available IDs are allocated in round-robin fashion. 352 */ 353 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp, 354 unsigned int req_flags) 355 { 356 struct ncsi_request *nr = NULL; 357 int i, limit = ARRAY_SIZE(ndp->requests); 358 unsigned long flags; 359 360 /* Check if there is one available request until the ceiling */ 361 spin_lock_irqsave(&ndp->lock, flags); 362 for (i = ndp->request_id; i < limit; i++) { 363 if (ndp->requests[i].used) 364 continue; 365 366 nr = &ndp->requests[i]; 367 nr->used = true; 368 nr->flags = req_flags; 369 ndp->request_id = i + 1; 370 goto found; 371 } 372 373 /* Fail back to check from the starting cursor */ 374 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) { 375 if (ndp->requests[i].used) 376 continue; 377 378 nr = &ndp->requests[i]; 379 nr->used = true; 380 nr->flags = req_flags; 381 ndp->request_id = i + 1; 382 goto found; 383 } 384 385 found: 386 spin_unlock_irqrestore(&ndp->lock, flags); 387 return nr; 388 } 389 390 void ncsi_free_request(struct ncsi_request *nr) 391 { 392 struct ncsi_dev_priv *ndp = nr->ndp; 393 struct sk_buff *cmd, *rsp; 394 unsigned long flags; 395 bool driven; 396 397 if (nr->enabled) { 398 nr->enabled = false; 399 del_timer_sync(&nr->timer); 400 } 401 402 spin_lock_irqsave(&ndp->lock, flags); 403 cmd = nr->cmd; 404 rsp = nr->rsp; 405 nr->cmd = NULL; 406 nr->rsp = NULL; 407 nr->used = false; 408 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN); 409 spin_unlock_irqrestore(&ndp->lock, flags); 410 411 if (driven && cmd && --ndp->pending_req_num == 0) 412 schedule_work(&ndp->work); 413 414 /* Release command and response */ 415 consume_skb(cmd); 416 consume_skb(rsp); 417 } 418 419 struct ncsi_dev *ncsi_find_dev(struct net_device *dev) 420 { 421 struct ncsi_dev_priv *ndp; 422 423 NCSI_FOR_EACH_DEV(ndp) { 424 if (ndp->ndev.dev == dev) 425 return &ndp->ndev; 426 } 427 428 return NULL; 429 } 430 431 static void ncsi_request_timeout(struct timer_list *t) 432 { 433 struct ncsi_request *nr = from_timer(nr, t, timer); 434 struct ncsi_dev_priv *ndp = nr->ndp; 435 struct ncsi_cmd_pkt *cmd; 436 struct ncsi_package *np; 437 struct ncsi_channel *nc; 438 unsigned long flags; 439 440 /* If the request already had associated response, 441 * let the response handler to release it. 442 */ 443 spin_lock_irqsave(&ndp->lock, flags); 444 nr->enabled = false; 445 if (nr->rsp || !nr->cmd) { 446 spin_unlock_irqrestore(&ndp->lock, flags); 447 return; 448 } 449 spin_unlock_irqrestore(&ndp->lock, flags); 450 451 if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) { 452 if (nr->cmd) { 453 /* Find the package */ 454 cmd = (struct ncsi_cmd_pkt *) 455 skb_network_header(nr->cmd); 456 ncsi_find_package_and_channel(ndp, 457 cmd->cmd.common.channel, 458 &np, &nc); 459 ncsi_send_netlink_timeout(nr, np, nc); 460 } 461 } 462 463 /* Release the request */ 464 ncsi_free_request(nr); 465 } 466 467 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp) 468 { 469 struct ncsi_dev *nd = &ndp->ndev; 470 struct ncsi_package *np; 471 struct ncsi_channel *nc, *tmp; 472 struct ncsi_cmd_arg nca; 473 unsigned long flags; 474 int ret; 475 476 np = ndp->active_package; 477 nc = ndp->active_channel; 478 nca.ndp = ndp; 479 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 480 switch (nd->state) { 481 case ncsi_dev_state_suspend: 482 nd->state = ncsi_dev_state_suspend_select; 483 fallthrough; 484 case ncsi_dev_state_suspend_select: 485 ndp->pending_req_num = 1; 486 487 nca.type = NCSI_PKT_CMD_SP; 488 nca.package = np->id; 489 nca.channel = NCSI_RESERVED_CHANNEL; 490 if (ndp->flags & NCSI_DEV_HWA) 491 nca.bytes[0] = 0; 492 else 493 nca.bytes[0] = 1; 494 495 /* To retrieve the last link states of channels in current 496 * package when current active channel needs fail over to 497 * another one. It means we will possibly select another 498 * channel as next active one. The link states of channels 499 * are most important factor of the selection. So we need 500 * accurate link states. Unfortunately, the link states on 501 * inactive channels can't be updated with LSC AEN in time. 502 */ 503 if (ndp->flags & NCSI_DEV_RESHUFFLE) 504 nd->state = ncsi_dev_state_suspend_gls; 505 else 506 nd->state = ncsi_dev_state_suspend_dcnt; 507 ret = ncsi_xmit_cmd(&nca); 508 if (ret) 509 goto error; 510 511 break; 512 case ncsi_dev_state_suspend_gls: 513 ndp->pending_req_num = 1; 514 515 nca.type = NCSI_PKT_CMD_GLS; 516 nca.package = np->id; 517 nca.channel = ndp->channel_probe_id; 518 ret = ncsi_xmit_cmd(&nca); 519 if (ret) 520 goto error; 521 ndp->channel_probe_id++; 522 523 if (ndp->channel_probe_id == ndp->channel_count) { 524 ndp->channel_probe_id = 0; 525 nd->state = ncsi_dev_state_suspend_dcnt; 526 } 527 528 break; 529 case ncsi_dev_state_suspend_dcnt: 530 ndp->pending_req_num = 1; 531 532 nca.type = NCSI_PKT_CMD_DCNT; 533 nca.package = np->id; 534 nca.channel = nc->id; 535 536 nd->state = ncsi_dev_state_suspend_dc; 537 ret = ncsi_xmit_cmd(&nca); 538 if (ret) 539 goto error; 540 541 break; 542 case ncsi_dev_state_suspend_dc: 543 ndp->pending_req_num = 1; 544 545 nca.type = NCSI_PKT_CMD_DC; 546 nca.package = np->id; 547 nca.channel = nc->id; 548 nca.bytes[0] = 1; 549 550 nd->state = ncsi_dev_state_suspend_deselect; 551 ret = ncsi_xmit_cmd(&nca); 552 if (ret) 553 goto error; 554 555 NCSI_FOR_EACH_CHANNEL(np, tmp) { 556 /* If there is another channel active on this package 557 * do not deselect the package. 558 */ 559 if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) { 560 nd->state = ncsi_dev_state_suspend_done; 561 break; 562 } 563 } 564 break; 565 case ncsi_dev_state_suspend_deselect: 566 ndp->pending_req_num = 1; 567 568 nca.type = NCSI_PKT_CMD_DP; 569 nca.package = np->id; 570 nca.channel = NCSI_RESERVED_CHANNEL; 571 572 nd->state = ncsi_dev_state_suspend_done; 573 ret = ncsi_xmit_cmd(&nca); 574 if (ret) 575 goto error; 576 577 break; 578 case ncsi_dev_state_suspend_done: 579 spin_lock_irqsave(&nc->lock, flags); 580 nc->state = NCSI_CHANNEL_INACTIVE; 581 spin_unlock_irqrestore(&nc->lock, flags); 582 if (ndp->flags & NCSI_DEV_RESET) 583 ncsi_reset_dev(nd); 584 else 585 ncsi_process_next_channel(ndp); 586 break; 587 default: 588 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n", 589 nd->state); 590 } 591 592 return; 593 error: 594 nd->state = ncsi_dev_state_functional; 595 } 596 597 /* Check the VLAN filter bitmap for a set filter, and construct a 598 * "Set VLAN Filter - Disable" packet if found. 599 */ 600 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, 601 struct ncsi_cmd_arg *nca) 602 { 603 struct ncsi_channel_vlan_filter *ncf; 604 unsigned long flags; 605 void *bitmap; 606 int index; 607 u16 vid; 608 609 ncf = &nc->vlan_filter; 610 bitmap = &ncf->bitmap; 611 612 spin_lock_irqsave(&nc->lock, flags); 613 index = find_first_bit(bitmap, ncf->n_vids); 614 if (index >= ncf->n_vids) { 615 spin_unlock_irqrestore(&nc->lock, flags); 616 return -1; 617 } 618 vid = ncf->vids[index]; 619 620 clear_bit(index, bitmap); 621 ncf->vids[index] = 0; 622 spin_unlock_irqrestore(&nc->lock, flags); 623 624 nca->type = NCSI_PKT_CMD_SVF; 625 nca->words[1] = vid; 626 /* HW filter index starts at 1 */ 627 nca->bytes[6] = index + 1; 628 nca->bytes[7] = 0x00; 629 return 0; 630 } 631 632 /* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable" 633 * packet. 634 */ 635 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, 636 struct ncsi_cmd_arg *nca) 637 { 638 struct ncsi_channel_vlan_filter *ncf; 639 struct vlan_vid *vlan = NULL; 640 unsigned long flags; 641 int i, index; 642 void *bitmap; 643 u16 vid; 644 645 if (list_empty(&ndp->vlan_vids)) 646 return -1; 647 648 ncf = &nc->vlan_filter; 649 bitmap = &ncf->bitmap; 650 651 spin_lock_irqsave(&nc->lock, flags); 652 653 rcu_read_lock(); 654 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { 655 vid = vlan->vid; 656 for (i = 0; i < ncf->n_vids; i++) 657 if (ncf->vids[i] == vid) { 658 vid = 0; 659 break; 660 } 661 if (vid) 662 break; 663 } 664 rcu_read_unlock(); 665 666 if (!vid) { 667 /* No VLAN ID is not set */ 668 spin_unlock_irqrestore(&nc->lock, flags); 669 return -1; 670 } 671 672 index = find_first_zero_bit(bitmap, ncf->n_vids); 673 if (index < 0 || index >= ncf->n_vids) { 674 netdev_err(ndp->ndev.dev, 675 "Channel %u already has all VLAN filters set\n", 676 nc->id); 677 spin_unlock_irqrestore(&nc->lock, flags); 678 return -1; 679 } 680 681 ncf->vids[index] = vid; 682 set_bit(index, bitmap); 683 spin_unlock_irqrestore(&nc->lock, flags); 684 685 nca->type = NCSI_PKT_CMD_SVF; 686 nca->words[1] = vid; 687 /* HW filter index starts at 1 */ 688 nca->bytes[6] = index + 1; 689 nca->bytes[7] = 0x01; 690 691 return 0; 692 } 693 694 static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca) 695 { 696 unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN]; 697 int ret = 0; 698 699 nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN; 700 701 memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN); 702 *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID); 703 704 data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY; 705 706 /* PHY Link up attribute */ 707 data[6] = 0x1; 708 709 nca->data = data; 710 711 ret = ncsi_xmit_cmd(nca); 712 if (ret) 713 netdev_err(nca->ndp->ndev.dev, 714 "NCSI: Failed to transmit cmd 0x%x during configure\n", 715 nca->type); 716 return ret; 717 } 718 719 /* NCSI OEM Command APIs */ 720 static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca) 721 { 722 unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN]; 723 int ret = 0; 724 725 nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN; 726 727 memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN); 728 *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID); 729 data[5] = NCSI_OEM_BCM_CMD_GMA; 730 731 nca->data = data; 732 733 ret = ncsi_xmit_cmd(nca); 734 if (ret) 735 netdev_err(nca->ndp->ndev.dev, 736 "NCSI: Failed to transmit cmd 0x%x during configure\n", 737 nca->type); 738 return ret; 739 } 740 741 static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca) 742 { 743 union { 744 u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN]; 745 u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)]; 746 } u; 747 int ret = 0; 748 749 nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN; 750 751 memset(&u, 0, sizeof(u)); 752 u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID); 753 u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA; 754 u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM; 755 756 nca->data = u.data_u8; 757 758 ret = ncsi_xmit_cmd(nca); 759 if (ret) 760 netdev_err(nca->ndp->ndev.dev, 761 "NCSI: Failed to transmit cmd 0x%x during configure\n", 762 nca->type); 763 return ret; 764 } 765 766 static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca) 767 { 768 union { 769 u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN]; 770 u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)]; 771 } u; 772 int ret = 0; 773 774 memset(&u, 0, sizeof(u)); 775 u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID); 776 u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF; 777 u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM; 778 memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET], 779 nca->ndp->ndev.dev->dev_addr, ETH_ALEN); 780 u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] = 781 (MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT); 782 783 nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN; 784 nca->data = u.data_u8; 785 786 ret = ncsi_xmit_cmd(nca); 787 if (ret) 788 netdev_err(nca->ndp->ndev.dev, 789 "NCSI: Failed to transmit cmd 0x%x during probe\n", 790 nca->type); 791 return ret; 792 } 793 794 static int ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg *nca) 795 { 796 unsigned char data[NCSI_OEM_INTEL_CMD_GMA_LEN]; 797 int ret = 0; 798 799 nca->payload = NCSI_OEM_INTEL_CMD_GMA_LEN; 800 801 memset(data, 0, NCSI_OEM_INTEL_CMD_GMA_LEN); 802 *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID); 803 data[4] = NCSI_OEM_INTEL_CMD_GMA; 804 805 nca->data = data; 806 807 ret = ncsi_xmit_cmd(nca); 808 if (ret) 809 netdev_err(nca->ndp->ndev.dev, 810 "NCSI: Failed to transmit cmd 0x%x during configure\n", 811 nca->type); 812 813 return ret; 814 } 815 816 /* OEM Command handlers initialization */ 817 static struct ncsi_oem_gma_handler { 818 unsigned int mfr_id; 819 int (*handler)(struct ncsi_cmd_arg *nca); 820 } ncsi_oem_gma_handlers[] = { 821 { NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm }, 822 { NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx }, 823 { NCSI_OEM_MFR_INTEL_ID, ncsi_oem_gma_handler_intel } 824 }; 825 826 static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id) 827 { 828 struct ncsi_oem_gma_handler *nch = NULL; 829 int i; 830 831 /* This function should only be called once, return if flag set */ 832 if (nca->ndp->gma_flag == 1) 833 return -1; 834 835 /* Find gma handler for given manufacturer id */ 836 for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) { 837 if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) { 838 if (ncsi_oem_gma_handlers[i].handler) 839 nch = &ncsi_oem_gma_handlers[i]; 840 break; 841 } 842 } 843 844 if (!nch) { 845 netdev_err(nca->ndp->ndev.dev, 846 "NCSI: No GMA handler available for MFR-ID (0x%x)\n", 847 mf_id); 848 return -1; 849 } 850 851 /* Get Mac address from NCSI device */ 852 return nch->handler(nca); 853 } 854 855 /* Determine if a given channel from the channel_queue should be used for Tx */ 856 static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp, 857 struct ncsi_channel *nc) 858 { 859 struct ncsi_channel_mode *ncm; 860 struct ncsi_channel *channel; 861 struct ncsi_package *np; 862 863 /* Check if any other channel has Tx enabled; a channel may have already 864 * been configured and removed from the channel queue. 865 */ 866 NCSI_FOR_EACH_PACKAGE(ndp, np) { 867 if (!ndp->multi_package && np != nc->package) 868 continue; 869 NCSI_FOR_EACH_CHANNEL(np, channel) { 870 ncm = &channel->modes[NCSI_MODE_TX_ENABLE]; 871 if (ncm->enable) 872 return false; 873 } 874 } 875 876 /* This channel is the preferred channel and has link */ 877 list_for_each_entry_rcu(channel, &ndp->channel_queue, link) { 878 np = channel->package; 879 if (np->preferred_channel && 880 ncsi_channel_has_link(np->preferred_channel)) { 881 return np->preferred_channel == nc; 882 } 883 } 884 885 /* This channel has link */ 886 if (ncsi_channel_has_link(nc)) 887 return true; 888 889 list_for_each_entry_rcu(channel, &ndp->channel_queue, link) 890 if (ncsi_channel_has_link(channel)) 891 return false; 892 893 /* No other channel has link; default to this one */ 894 return true; 895 } 896 897 /* Change the active Tx channel in a multi-channel setup */ 898 int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp, 899 struct ncsi_package *package, 900 struct ncsi_channel *disable, 901 struct ncsi_channel *enable) 902 { 903 struct ncsi_cmd_arg nca; 904 struct ncsi_channel *nc; 905 struct ncsi_package *np; 906 int ret = 0; 907 908 if (!package->multi_channel && !ndp->multi_package) 909 netdev_warn(ndp->ndev.dev, 910 "NCSI: Trying to update Tx channel in single-channel mode\n"); 911 nca.ndp = ndp; 912 nca.req_flags = 0; 913 914 /* Find current channel with Tx enabled */ 915 NCSI_FOR_EACH_PACKAGE(ndp, np) { 916 if (disable) 917 break; 918 if (!ndp->multi_package && np != package) 919 continue; 920 921 NCSI_FOR_EACH_CHANNEL(np, nc) 922 if (nc->modes[NCSI_MODE_TX_ENABLE].enable) { 923 disable = nc; 924 break; 925 } 926 } 927 928 /* Find a suitable channel for Tx */ 929 NCSI_FOR_EACH_PACKAGE(ndp, np) { 930 if (enable) 931 break; 932 if (!ndp->multi_package && np != package) 933 continue; 934 if (!(ndp->package_whitelist & (0x1 << np->id))) 935 continue; 936 937 if (np->preferred_channel && 938 ncsi_channel_has_link(np->preferred_channel)) { 939 enable = np->preferred_channel; 940 break; 941 } 942 943 NCSI_FOR_EACH_CHANNEL(np, nc) { 944 if (!(np->channel_whitelist & 0x1 << nc->id)) 945 continue; 946 if (nc->state != NCSI_CHANNEL_ACTIVE) 947 continue; 948 if (ncsi_channel_has_link(nc)) { 949 enable = nc; 950 break; 951 } 952 } 953 } 954 955 if (disable == enable) 956 return -1; 957 958 if (!enable) 959 return -1; 960 961 if (disable) { 962 nca.channel = disable->id; 963 nca.package = disable->package->id; 964 nca.type = NCSI_PKT_CMD_DCNT; 965 ret = ncsi_xmit_cmd(&nca); 966 if (ret) 967 netdev_err(ndp->ndev.dev, 968 "Error %d sending DCNT\n", 969 ret); 970 } 971 972 netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id); 973 974 nca.channel = enable->id; 975 nca.package = enable->package->id; 976 nca.type = NCSI_PKT_CMD_ECNT; 977 ret = ncsi_xmit_cmd(&nca); 978 if (ret) 979 netdev_err(ndp->ndev.dev, 980 "Error %d sending ECNT\n", 981 ret); 982 983 return ret; 984 } 985 986 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) 987 { 988 struct ncsi_package *np = ndp->active_package; 989 struct ncsi_channel *nc = ndp->active_channel; 990 struct ncsi_channel *hot_nc = NULL; 991 struct ncsi_dev *nd = &ndp->ndev; 992 struct net_device *dev = nd->dev; 993 struct ncsi_cmd_arg nca; 994 unsigned char index; 995 unsigned long flags; 996 int ret; 997 998 nca.ndp = ndp; 999 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 1000 switch (nd->state) { 1001 case ncsi_dev_state_config: 1002 case ncsi_dev_state_config_sp: 1003 ndp->pending_req_num = 1; 1004 1005 /* Select the specific package */ 1006 nca.type = NCSI_PKT_CMD_SP; 1007 if (ndp->flags & NCSI_DEV_HWA) 1008 nca.bytes[0] = 0; 1009 else 1010 nca.bytes[0] = 1; 1011 nca.package = np->id; 1012 nca.channel = NCSI_RESERVED_CHANNEL; 1013 ret = ncsi_xmit_cmd(&nca); 1014 if (ret) { 1015 netdev_err(ndp->ndev.dev, 1016 "NCSI: Failed to transmit CMD_SP\n"); 1017 goto error; 1018 } 1019 1020 nd->state = ncsi_dev_state_config_cis; 1021 break; 1022 case ncsi_dev_state_config_cis: 1023 ndp->pending_req_num = 1; 1024 1025 /* Clear initial state */ 1026 nca.type = NCSI_PKT_CMD_CIS; 1027 nca.package = np->id; 1028 nca.channel = nc->id; 1029 ret = ncsi_xmit_cmd(&nca); 1030 if (ret) { 1031 netdev_err(ndp->ndev.dev, 1032 "NCSI: Failed to transmit CMD_CIS\n"); 1033 goto error; 1034 } 1035 1036 nd->state = IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) 1037 ? ncsi_dev_state_config_oem_gma 1038 : ncsi_dev_state_config_clear_vids; 1039 break; 1040 case ncsi_dev_state_config_oem_gma: 1041 nd->state = ncsi_dev_state_config_clear_vids; 1042 1043 nca.package = np->id; 1044 nca.channel = nc->id; 1045 ndp->pending_req_num = 1; 1046 if (nc->version.major >= 1 && nc->version.minor >= 2) { 1047 nca.type = NCSI_PKT_CMD_GMCMA; 1048 ret = ncsi_xmit_cmd(&nca); 1049 } else { 1050 nca.type = NCSI_PKT_CMD_OEM; 1051 ret = ncsi_gma_handler(&nca, nc->version.mf_id); 1052 } 1053 if (ret < 0) 1054 schedule_work(&ndp->work); 1055 1056 break; 1057 case ncsi_dev_state_config_clear_vids: 1058 case ncsi_dev_state_config_svf: 1059 case ncsi_dev_state_config_ev: 1060 case ncsi_dev_state_config_sma: 1061 case ncsi_dev_state_config_ebf: 1062 case ncsi_dev_state_config_dgmf: 1063 case ncsi_dev_state_config_ecnt: 1064 case ncsi_dev_state_config_ec: 1065 case ncsi_dev_state_config_ae: 1066 case ncsi_dev_state_config_gls: 1067 ndp->pending_req_num = 1; 1068 1069 nca.package = np->id; 1070 nca.channel = nc->id; 1071 1072 /* Clear any active filters on the channel before setting */ 1073 if (nd->state == ncsi_dev_state_config_clear_vids) { 1074 ret = clear_one_vid(ndp, nc, &nca); 1075 if (ret) { 1076 nd->state = ncsi_dev_state_config_svf; 1077 schedule_work(&ndp->work); 1078 break; 1079 } 1080 /* Repeat */ 1081 nd->state = ncsi_dev_state_config_clear_vids; 1082 /* Add known VLAN tags to the filter */ 1083 } else if (nd->state == ncsi_dev_state_config_svf) { 1084 ret = set_one_vid(ndp, nc, &nca); 1085 if (ret) { 1086 nd->state = ncsi_dev_state_config_ev; 1087 schedule_work(&ndp->work); 1088 break; 1089 } 1090 /* Repeat */ 1091 nd->state = ncsi_dev_state_config_svf; 1092 /* Enable/Disable the VLAN filter */ 1093 } else if (nd->state == ncsi_dev_state_config_ev) { 1094 if (list_empty(&ndp->vlan_vids)) { 1095 nca.type = NCSI_PKT_CMD_DV; 1096 } else { 1097 nca.type = NCSI_PKT_CMD_EV; 1098 nca.bytes[3] = NCSI_CAP_VLAN_NO; 1099 } 1100 nd->state = ncsi_dev_state_config_sma; 1101 } else if (nd->state == ncsi_dev_state_config_sma) { 1102 /* Use first entry in unicast filter table. Note that 1103 * the MAC filter table starts from entry 1 instead of 1104 * 0. 1105 */ 1106 nca.type = NCSI_PKT_CMD_SMA; 1107 for (index = 0; index < 6; index++) 1108 nca.bytes[index] = dev->dev_addr[index]; 1109 nca.bytes[6] = 0x1; 1110 nca.bytes[7] = 0x1; 1111 nd->state = ncsi_dev_state_config_ebf; 1112 } else if (nd->state == ncsi_dev_state_config_ebf) { 1113 nca.type = NCSI_PKT_CMD_EBF; 1114 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap; 1115 /* if multicast global filtering is supported then 1116 * disable it so that all multicast packet will be 1117 * forwarded to management controller 1118 */ 1119 if (nc->caps[NCSI_CAP_GENERIC].cap & 1120 NCSI_CAP_GENERIC_MC) 1121 nd->state = ncsi_dev_state_config_dgmf; 1122 else if (ncsi_channel_is_tx(ndp, nc)) 1123 nd->state = ncsi_dev_state_config_ecnt; 1124 else 1125 nd->state = ncsi_dev_state_config_ec; 1126 } else if (nd->state == ncsi_dev_state_config_dgmf) { 1127 nca.type = NCSI_PKT_CMD_DGMF; 1128 if (ncsi_channel_is_tx(ndp, nc)) 1129 nd->state = ncsi_dev_state_config_ecnt; 1130 else 1131 nd->state = ncsi_dev_state_config_ec; 1132 } else if (nd->state == ncsi_dev_state_config_ecnt) { 1133 if (np->preferred_channel && 1134 nc != np->preferred_channel) 1135 netdev_info(ndp->ndev.dev, 1136 "NCSI: Tx failed over to channel %u\n", 1137 nc->id); 1138 nca.type = NCSI_PKT_CMD_ECNT; 1139 nd->state = ncsi_dev_state_config_ec; 1140 } else if (nd->state == ncsi_dev_state_config_ec) { 1141 /* Enable AEN if it's supported */ 1142 nca.type = NCSI_PKT_CMD_EC; 1143 nd->state = ncsi_dev_state_config_ae; 1144 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK)) 1145 nd->state = ncsi_dev_state_config_gls; 1146 } else if (nd->state == ncsi_dev_state_config_ae) { 1147 nca.type = NCSI_PKT_CMD_AE; 1148 nca.bytes[0] = 0; 1149 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap; 1150 nd->state = ncsi_dev_state_config_gls; 1151 } else if (nd->state == ncsi_dev_state_config_gls) { 1152 nca.type = NCSI_PKT_CMD_GLS; 1153 nd->state = ncsi_dev_state_config_done; 1154 } 1155 1156 ret = ncsi_xmit_cmd(&nca); 1157 if (ret) { 1158 netdev_err(ndp->ndev.dev, 1159 "NCSI: Failed to transmit CMD %x\n", 1160 nca.type); 1161 goto error; 1162 } 1163 break; 1164 case ncsi_dev_state_config_done: 1165 netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n", 1166 nc->id); 1167 spin_lock_irqsave(&nc->lock, flags); 1168 nc->state = NCSI_CHANNEL_ACTIVE; 1169 1170 if (ndp->flags & NCSI_DEV_RESET) { 1171 /* A reset event happened during config, start it now */ 1172 nc->reconfigure_needed = false; 1173 spin_unlock_irqrestore(&nc->lock, flags); 1174 ncsi_reset_dev(nd); 1175 break; 1176 } 1177 1178 if (nc->reconfigure_needed) { 1179 /* This channel's configuration has been updated 1180 * part-way during the config state - start the 1181 * channel configuration over 1182 */ 1183 nc->reconfigure_needed = false; 1184 nc->state = NCSI_CHANNEL_INACTIVE; 1185 spin_unlock_irqrestore(&nc->lock, flags); 1186 1187 spin_lock_irqsave(&ndp->lock, flags); 1188 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 1189 spin_unlock_irqrestore(&ndp->lock, flags); 1190 1191 netdev_dbg(dev, "Dirty NCSI channel state reset\n"); 1192 ncsi_process_next_channel(ndp); 1193 break; 1194 } 1195 1196 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) { 1197 hot_nc = nc; 1198 } else { 1199 hot_nc = NULL; 1200 netdev_dbg(ndp->ndev.dev, 1201 "NCSI: channel %u link down after config\n", 1202 nc->id); 1203 } 1204 spin_unlock_irqrestore(&nc->lock, flags); 1205 1206 /* Update the hot channel */ 1207 spin_lock_irqsave(&ndp->lock, flags); 1208 ndp->hot_channel = hot_nc; 1209 spin_unlock_irqrestore(&ndp->lock, flags); 1210 1211 ncsi_start_channel_monitor(nc); 1212 ncsi_process_next_channel(ndp); 1213 break; 1214 default: 1215 netdev_alert(dev, "Wrong NCSI state 0x%x in config\n", 1216 nd->state); 1217 } 1218 1219 return; 1220 1221 error: 1222 ncsi_report_link(ndp, true); 1223 } 1224 1225 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp) 1226 { 1227 struct ncsi_channel *nc, *found, *hot_nc; 1228 struct ncsi_channel_mode *ncm; 1229 unsigned long flags, cflags; 1230 struct ncsi_package *np; 1231 bool with_link; 1232 1233 spin_lock_irqsave(&ndp->lock, flags); 1234 hot_nc = ndp->hot_channel; 1235 spin_unlock_irqrestore(&ndp->lock, flags); 1236 1237 /* By default the search is done once an inactive channel with up 1238 * link is found, unless a preferred channel is set. 1239 * If multi_package or multi_channel are configured all channels in the 1240 * whitelist are added to the channel queue. 1241 */ 1242 found = NULL; 1243 with_link = false; 1244 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1245 if (!(ndp->package_whitelist & (0x1 << np->id))) 1246 continue; 1247 NCSI_FOR_EACH_CHANNEL(np, nc) { 1248 if (!(np->channel_whitelist & (0x1 << nc->id))) 1249 continue; 1250 1251 spin_lock_irqsave(&nc->lock, cflags); 1252 1253 if (!list_empty(&nc->link) || 1254 nc->state != NCSI_CHANNEL_INACTIVE) { 1255 spin_unlock_irqrestore(&nc->lock, cflags); 1256 continue; 1257 } 1258 1259 if (!found) 1260 found = nc; 1261 1262 if (nc == hot_nc) 1263 found = nc; 1264 1265 ncm = &nc->modes[NCSI_MODE_LINK]; 1266 if (ncm->data[2] & 0x1) { 1267 found = nc; 1268 with_link = true; 1269 } 1270 1271 /* If multi_channel is enabled configure all valid 1272 * channels whether or not they currently have link 1273 * so they will have AENs enabled. 1274 */ 1275 if (with_link || np->multi_channel) { 1276 spin_lock_irqsave(&ndp->lock, flags); 1277 list_add_tail_rcu(&nc->link, 1278 &ndp->channel_queue); 1279 spin_unlock_irqrestore(&ndp->lock, flags); 1280 1281 netdev_dbg(ndp->ndev.dev, 1282 "NCSI: Channel %u added to queue (link %s)\n", 1283 nc->id, 1284 ncm->data[2] & 0x1 ? "up" : "down"); 1285 } 1286 1287 spin_unlock_irqrestore(&nc->lock, cflags); 1288 1289 if (with_link && !np->multi_channel) 1290 break; 1291 } 1292 if (with_link && !ndp->multi_package) 1293 break; 1294 } 1295 1296 if (list_empty(&ndp->channel_queue) && found) { 1297 netdev_info(ndp->ndev.dev, 1298 "NCSI: No channel with link found, configuring channel %u\n", 1299 found->id); 1300 spin_lock_irqsave(&ndp->lock, flags); 1301 list_add_tail_rcu(&found->link, &ndp->channel_queue); 1302 spin_unlock_irqrestore(&ndp->lock, flags); 1303 } else if (!found) { 1304 netdev_warn(ndp->ndev.dev, 1305 "NCSI: No channel found to configure!\n"); 1306 ncsi_report_link(ndp, true); 1307 return -ENODEV; 1308 } 1309 1310 return ncsi_process_next_channel(ndp); 1311 } 1312 1313 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp) 1314 { 1315 struct ncsi_package *np; 1316 struct ncsi_channel *nc; 1317 unsigned int cap; 1318 bool has_channel = false; 1319 1320 /* The hardware arbitration is disabled if any one channel 1321 * doesn't support explicitly. 1322 */ 1323 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1324 NCSI_FOR_EACH_CHANNEL(np, nc) { 1325 has_channel = true; 1326 1327 cap = nc->caps[NCSI_CAP_GENERIC].cap; 1328 if (!(cap & NCSI_CAP_GENERIC_HWA) || 1329 (cap & NCSI_CAP_GENERIC_HWA_MASK) != 1330 NCSI_CAP_GENERIC_HWA_SUPPORT) { 1331 ndp->flags &= ~NCSI_DEV_HWA; 1332 return false; 1333 } 1334 } 1335 } 1336 1337 if (has_channel) { 1338 ndp->flags |= NCSI_DEV_HWA; 1339 return true; 1340 } 1341 1342 ndp->flags &= ~NCSI_DEV_HWA; 1343 return false; 1344 } 1345 1346 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) 1347 { 1348 struct ncsi_dev *nd = &ndp->ndev; 1349 struct ncsi_package *np; 1350 struct ncsi_cmd_arg nca; 1351 unsigned char index; 1352 int ret; 1353 1354 nca.ndp = ndp; 1355 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 1356 switch (nd->state) { 1357 case ncsi_dev_state_probe: 1358 nd->state = ncsi_dev_state_probe_deselect; 1359 fallthrough; 1360 case ncsi_dev_state_probe_deselect: 1361 ndp->pending_req_num = 8; 1362 1363 /* Deselect all possible packages */ 1364 nca.type = NCSI_PKT_CMD_DP; 1365 nca.channel = NCSI_RESERVED_CHANNEL; 1366 for (index = 0; index < 8; index++) { 1367 nca.package = index; 1368 ret = ncsi_xmit_cmd(&nca); 1369 if (ret) 1370 goto error; 1371 } 1372 1373 nd->state = ncsi_dev_state_probe_package; 1374 break; 1375 case ncsi_dev_state_probe_package: 1376 ndp->pending_req_num = 1; 1377 1378 nca.type = NCSI_PKT_CMD_SP; 1379 nca.bytes[0] = 1; 1380 nca.package = ndp->package_probe_id; 1381 nca.channel = NCSI_RESERVED_CHANNEL; 1382 ret = ncsi_xmit_cmd(&nca); 1383 if (ret) 1384 goto error; 1385 nd->state = ncsi_dev_state_probe_channel; 1386 break; 1387 case ncsi_dev_state_probe_channel: 1388 ndp->active_package = ncsi_find_package(ndp, 1389 ndp->package_probe_id); 1390 if (!ndp->active_package) { 1391 /* No response */ 1392 nd->state = ncsi_dev_state_probe_dp; 1393 schedule_work(&ndp->work); 1394 break; 1395 } 1396 nd->state = ncsi_dev_state_probe_cis; 1397 if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) && 1398 ndp->mlx_multi_host) 1399 nd->state = ncsi_dev_state_probe_mlx_gma; 1400 1401 schedule_work(&ndp->work); 1402 break; 1403 case ncsi_dev_state_probe_mlx_gma: 1404 ndp->pending_req_num = 1; 1405 1406 nca.type = NCSI_PKT_CMD_OEM; 1407 nca.package = ndp->active_package->id; 1408 nca.channel = 0; 1409 ret = ncsi_oem_gma_handler_mlx(&nca); 1410 if (ret) 1411 goto error; 1412 1413 nd->state = ncsi_dev_state_probe_mlx_smaf; 1414 break; 1415 case ncsi_dev_state_probe_mlx_smaf: 1416 ndp->pending_req_num = 1; 1417 1418 nca.type = NCSI_PKT_CMD_OEM; 1419 nca.package = ndp->active_package->id; 1420 nca.channel = 0; 1421 ret = ncsi_oem_smaf_mlx(&nca); 1422 if (ret) 1423 goto error; 1424 1425 nd->state = ncsi_dev_state_probe_cis; 1426 break; 1427 case ncsi_dev_state_probe_keep_phy: 1428 ndp->pending_req_num = 1; 1429 1430 nca.type = NCSI_PKT_CMD_OEM; 1431 nca.package = ndp->active_package->id; 1432 nca.channel = 0; 1433 ret = ncsi_oem_keep_phy_intel(&nca); 1434 if (ret) 1435 goto error; 1436 1437 nd->state = ncsi_dev_state_probe_gvi; 1438 break; 1439 case ncsi_dev_state_probe_cis: 1440 case ncsi_dev_state_probe_gvi: 1441 case ncsi_dev_state_probe_gc: 1442 case ncsi_dev_state_probe_gls: 1443 np = ndp->active_package; 1444 ndp->pending_req_num = 1; 1445 1446 /* Clear initial state Retrieve version, capability or link status */ 1447 if (nd->state == ncsi_dev_state_probe_cis) 1448 nca.type = NCSI_PKT_CMD_CIS; 1449 else if (nd->state == ncsi_dev_state_probe_gvi) 1450 nca.type = NCSI_PKT_CMD_GVI; 1451 else if (nd->state == ncsi_dev_state_probe_gc) 1452 nca.type = NCSI_PKT_CMD_GC; 1453 else 1454 nca.type = NCSI_PKT_CMD_GLS; 1455 1456 nca.package = np->id; 1457 nca.channel = ndp->channel_probe_id; 1458 1459 ret = ncsi_xmit_cmd(&nca); 1460 if (ret) 1461 goto error; 1462 1463 if (nd->state == ncsi_dev_state_probe_cis) { 1464 nd->state = ncsi_dev_state_probe_gvi; 1465 if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0) 1466 nd->state = ncsi_dev_state_probe_keep_phy; 1467 } else if (nd->state == ncsi_dev_state_probe_gvi) { 1468 nd->state = ncsi_dev_state_probe_gc; 1469 } else if (nd->state == ncsi_dev_state_probe_gc) { 1470 nd->state = ncsi_dev_state_probe_gls; 1471 } else { 1472 nd->state = ncsi_dev_state_probe_cis; 1473 ndp->channel_probe_id++; 1474 } 1475 1476 if (ndp->channel_probe_id == ndp->channel_count) { 1477 ndp->channel_probe_id = 0; 1478 nd->state = ncsi_dev_state_probe_dp; 1479 } 1480 break; 1481 case ncsi_dev_state_probe_dp: 1482 ndp->pending_req_num = 1; 1483 1484 /* Deselect the current package */ 1485 nca.type = NCSI_PKT_CMD_DP; 1486 nca.package = ndp->package_probe_id; 1487 nca.channel = NCSI_RESERVED_CHANNEL; 1488 ret = ncsi_xmit_cmd(&nca); 1489 if (ret) 1490 goto error; 1491 1492 /* Probe next package */ 1493 ndp->package_probe_id++; 1494 if (ndp->package_probe_id >= 8) { 1495 /* Probe finished */ 1496 ndp->flags |= NCSI_DEV_PROBED; 1497 break; 1498 } 1499 nd->state = ncsi_dev_state_probe_package; 1500 ndp->active_package = NULL; 1501 break; 1502 default: 1503 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n", 1504 nd->state); 1505 } 1506 1507 if (ndp->flags & NCSI_DEV_PROBED) { 1508 /* Check if all packages have HWA support */ 1509 ncsi_check_hwa(ndp); 1510 ncsi_choose_active_channel(ndp); 1511 } 1512 1513 return; 1514 error: 1515 netdev_err(ndp->ndev.dev, 1516 "NCSI: Failed to transmit cmd 0x%x during probe\n", 1517 nca.type); 1518 ncsi_report_link(ndp, true); 1519 } 1520 1521 static void ncsi_dev_work(struct work_struct *work) 1522 { 1523 struct ncsi_dev_priv *ndp = container_of(work, 1524 struct ncsi_dev_priv, work); 1525 struct ncsi_dev *nd = &ndp->ndev; 1526 1527 switch (nd->state & ncsi_dev_state_major) { 1528 case ncsi_dev_state_probe: 1529 ncsi_probe_channel(ndp); 1530 break; 1531 case ncsi_dev_state_suspend: 1532 ncsi_suspend_channel(ndp); 1533 break; 1534 case ncsi_dev_state_config: 1535 ncsi_configure_channel(ndp); 1536 break; 1537 default: 1538 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n", 1539 nd->state); 1540 } 1541 } 1542 1543 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp) 1544 { 1545 struct ncsi_channel *nc; 1546 int old_state; 1547 unsigned long flags; 1548 1549 spin_lock_irqsave(&ndp->lock, flags); 1550 nc = list_first_or_null_rcu(&ndp->channel_queue, 1551 struct ncsi_channel, link); 1552 if (!nc) { 1553 spin_unlock_irqrestore(&ndp->lock, flags); 1554 goto out; 1555 } 1556 1557 list_del_init(&nc->link); 1558 spin_unlock_irqrestore(&ndp->lock, flags); 1559 1560 spin_lock_irqsave(&nc->lock, flags); 1561 old_state = nc->state; 1562 nc->state = NCSI_CHANNEL_INVISIBLE; 1563 spin_unlock_irqrestore(&nc->lock, flags); 1564 1565 ndp->active_channel = nc; 1566 ndp->active_package = nc->package; 1567 1568 switch (old_state) { 1569 case NCSI_CHANNEL_INACTIVE: 1570 ndp->ndev.state = ncsi_dev_state_config; 1571 netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n", 1572 nc->id); 1573 ncsi_configure_channel(ndp); 1574 break; 1575 case NCSI_CHANNEL_ACTIVE: 1576 ndp->ndev.state = ncsi_dev_state_suspend; 1577 netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n", 1578 nc->id); 1579 ncsi_suspend_channel(ndp); 1580 break; 1581 default: 1582 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n", 1583 old_state, nc->package->id, nc->id); 1584 ncsi_report_link(ndp, false); 1585 return -EINVAL; 1586 } 1587 1588 return 0; 1589 1590 out: 1591 ndp->active_channel = NULL; 1592 ndp->active_package = NULL; 1593 if (ndp->flags & NCSI_DEV_RESHUFFLE) { 1594 ndp->flags &= ~NCSI_DEV_RESHUFFLE; 1595 return ncsi_choose_active_channel(ndp); 1596 } 1597 1598 ncsi_report_link(ndp, false); 1599 return -ENODEV; 1600 } 1601 1602 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp) 1603 { 1604 struct ncsi_dev *nd = &ndp->ndev; 1605 struct ncsi_channel *nc; 1606 struct ncsi_package *np; 1607 unsigned long flags; 1608 unsigned int n = 0; 1609 1610 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1611 NCSI_FOR_EACH_CHANNEL(np, nc) { 1612 spin_lock_irqsave(&nc->lock, flags); 1613 1614 /* Channels may be busy, mark dirty instead of 1615 * kicking if; 1616 * a) not ACTIVE (configured) 1617 * b) in the channel_queue (to be configured) 1618 * c) it's ndev is in the config state 1619 */ 1620 if (nc->state != NCSI_CHANNEL_ACTIVE) { 1621 if ((ndp->ndev.state & 0xff00) == 1622 ncsi_dev_state_config || 1623 !list_empty(&nc->link)) { 1624 netdev_dbg(nd->dev, 1625 "NCSI: channel %p marked dirty\n", 1626 nc); 1627 nc->reconfigure_needed = true; 1628 } 1629 spin_unlock_irqrestore(&nc->lock, flags); 1630 continue; 1631 } 1632 1633 spin_unlock_irqrestore(&nc->lock, flags); 1634 1635 ncsi_stop_channel_monitor(nc); 1636 spin_lock_irqsave(&nc->lock, flags); 1637 nc->state = NCSI_CHANNEL_INACTIVE; 1638 spin_unlock_irqrestore(&nc->lock, flags); 1639 1640 spin_lock_irqsave(&ndp->lock, flags); 1641 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 1642 spin_unlock_irqrestore(&ndp->lock, flags); 1643 1644 netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc); 1645 n++; 1646 } 1647 } 1648 1649 return n; 1650 } 1651 1652 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 1653 { 1654 struct ncsi_dev_priv *ndp; 1655 unsigned int n_vids = 0; 1656 struct vlan_vid *vlan; 1657 struct ncsi_dev *nd; 1658 bool found = false; 1659 1660 if (vid == 0) 1661 return 0; 1662 1663 nd = ncsi_find_dev(dev); 1664 if (!nd) { 1665 netdev_warn(dev, "NCSI: No net_device?\n"); 1666 return 0; 1667 } 1668 1669 ndp = TO_NCSI_DEV_PRIV(nd); 1670 1671 /* Add the VLAN id to our internal list */ 1672 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { 1673 n_vids++; 1674 if (vlan->vid == vid) { 1675 netdev_dbg(dev, "NCSI: vid %u already registered\n", 1676 vid); 1677 return 0; 1678 } 1679 } 1680 if (n_vids >= NCSI_MAX_VLAN_VIDS) { 1681 netdev_warn(dev, 1682 "tried to add vlan id %u but NCSI max already registered (%u)\n", 1683 vid, NCSI_MAX_VLAN_VIDS); 1684 return -ENOSPC; 1685 } 1686 1687 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 1688 if (!vlan) 1689 return -ENOMEM; 1690 1691 vlan->proto = proto; 1692 vlan->vid = vid; 1693 list_add_rcu(&vlan->list, &ndp->vlan_vids); 1694 1695 netdev_dbg(dev, "NCSI: Added new vid %u\n", vid); 1696 1697 found = ncsi_kick_channels(ndp) != 0; 1698 1699 return found ? ncsi_process_next_channel(ndp) : 0; 1700 } 1701 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid); 1702 1703 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 1704 { 1705 struct vlan_vid *vlan, *tmp; 1706 struct ncsi_dev_priv *ndp; 1707 struct ncsi_dev *nd; 1708 bool found = false; 1709 1710 if (vid == 0) 1711 return 0; 1712 1713 nd = ncsi_find_dev(dev); 1714 if (!nd) { 1715 netdev_warn(dev, "NCSI: no net_device?\n"); 1716 return 0; 1717 } 1718 1719 ndp = TO_NCSI_DEV_PRIV(nd); 1720 1721 /* Remove the VLAN id from our internal list */ 1722 list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list) 1723 if (vlan->vid == vid) { 1724 netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid); 1725 list_del_rcu(&vlan->list); 1726 found = true; 1727 kfree(vlan); 1728 } 1729 1730 if (!found) { 1731 netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid); 1732 return -EINVAL; 1733 } 1734 1735 found = ncsi_kick_channels(ndp) != 0; 1736 1737 return found ? ncsi_process_next_channel(ndp) : 0; 1738 } 1739 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid); 1740 1741 struct ncsi_dev *ncsi_register_dev(struct net_device *dev, 1742 void (*handler)(struct ncsi_dev *ndev)) 1743 { 1744 struct ncsi_dev_priv *ndp; 1745 struct ncsi_dev *nd; 1746 struct platform_device *pdev; 1747 struct device_node *np; 1748 unsigned long flags; 1749 int i; 1750 1751 /* Check if the device has been registered or not */ 1752 nd = ncsi_find_dev(dev); 1753 if (nd) 1754 return nd; 1755 1756 /* Create NCSI device */ 1757 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC); 1758 if (!ndp) 1759 return NULL; 1760 1761 nd = &ndp->ndev; 1762 nd->state = ncsi_dev_state_registered; 1763 nd->dev = dev; 1764 nd->handler = handler; 1765 ndp->pending_req_num = 0; 1766 INIT_LIST_HEAD(&ndp->channel_queue); 1767 INIT_LIST_HEAD(&ndp->vlan_vids); 1768 INIT_WORK(&ndp->work, ncsi_dev_work); 1769 ndp->package_whitelist = UINT_MAX; 1770 1771 /* Initialize private NCSI device */ 1772 spin_lock_init(&ndp->lock); 1773 INIT_LIST_HEAD(&ndp->packages); 1774 ndp->request_id = NCSI_REQ_START_IDX; 1775 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) { 1776 ndp->requests[i].id = i; 1777 ndp->requests[i].ndp = ndp; 1778 timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0); 1779 } 1780 ndp->channel_count = NCSI_RESERVED_CHANNEL; 1781 1782 spin_lock_irqsave(&ncsi_dev_lock, flags); 1783 list_add_tail_rcu(&ndp->node, &ncsi_dev_list); 1784 spin_unlock_irqrestore(&ncsi_dev_lock, flags); 1785 1786 /* Register NCSI packet Rx handler */ 1787 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI); 1788 ndp->ptype.func = ncsi_rcv_rsp; 1789 ndp->ptype.dev = dev; 1790 dev_add_pack(&ndp->ptype); 1791 1792 pdev = to_platform_device(dev->dev.parent); 1793 if (pdev) { 1794 np = pdev->dev.of_node; 1795 if (np && (of_property_read_bool(np, "mellanox,multi-host") || 1796 of_property_read_bool(np, "mlx,multi-host"))) 1797 ndp->mlx_multi_host = true; 1798 } 1799 1800 return nd; 1801 } 1802 EXPORT_SYMBOL_GPL(ncsi_register_dev); 1803 1804 int ncsi_start_dev(struct ncsi_dev *nd) 1805 { 1806 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1807 1808 if (nd->state != ncsi_dev_state_registered && 1809 nd->state != ncsi_dev_state_functional) 1810 return -ENOTTY; 1811 1812 if (!(ndp->flags & NCSI_DEV_PROBED)) { 1813 ndp->package_probe_id = 0; 1814 ndp->channel_probe_id = 0; 1815 nd->state = ncsi_dev_state_probe; 1816 schedule_work(&ndp->work); 1817 return 0; 1818 } 1819 1820 return ncsi_reset_dev(nd); 1821 } 1822 EXPORT_SYMBOL_GPL(ncsi_start_dev); 1823 1824 void ncsi_stop_dev(struct ncsi_dev *nd) 1825 { 1826 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1827 struct ncsi_package *np; 1828 struct ncsi_channel *nc; 1829 bool chained; 1830 int old_state; 1831 unsigned long flags; 1832 1833 /* Stop the channel monitor on any active channels. Don't reset the 1834 * channel state so we know which were active when ncsi_start_dev() 1835 * is next called. 1836 */ 1837 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1838 NCSI_FOR_EACH_CHANNEL(np, nc) { 1839 ncsi_stop_channel_monitor(nc); 1840 1841 spin_lock_irqsave(&nc->lock, flags); 1842 chained = !list_empty(&nc->link); 1843 old_state = nc->state; 1844 spin_unlock_irqrestore(&nc->lock, flags); 1845 1846 WARN_ON_ONCE(chained || 1847 old_state == NCSI_CHANNEL_INVISIBLE); 1848 } 1849 } 1850 1851 netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n"); 1852 ncsi_report_link(ndp, true); 1853 } 1854 EXPORT_SYMBOL_GPL(ncsi_stop_dev); 1855 1856 int ncsi_reset_dev(struct ncsi_dev *nd) 1857 { 1858 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1859 struct ncsi_channel *nc, *active, *tmp; 1860 struct ncsi_package *np; 1861 unsigned long flags; 1862 1863 spin_lock_irqsave(&ndp->lock, flags); 1864 1865 if (!(ndp->flags & NCSI_DEV_RESET)) { 1866 /* Haven't been called yet, check states */ 1867 switch (nd->state & ncsi_dev_state_major) { 1868 case ncsi_dev_state_registered: 1869 case ncsi_dev_state_probe: 1870 /* Not even probed yet - do nothing */ 1871 spin_unlock_irqrestore(&ndp->lock, flags); 1872 return 0; 1873 case ncsi_dev_state_suspend: 1874 case ncsi_dev_state_config: 1875 /* Wait for the channel to finish its suspend/config 1876 * operation; once it finishes it will check for 1877 * NCSI_DEV_RESET and reset the state. 1878 */ 1879 ndp->flags |= NCSI_DEV_RESET; 1880 spin_unlock_irqrestore(&ndp->lock, flags); 1881 return 0; 1882 } 1883 } else { 1884 switch (nd->state) { 1885 case ncsi_dev_state_suspend_done: 1886 case ncsi_dev_state_config_done: 1887 case ncsi_dev_state_functional: 1888 /* Ok */ 1889 break; 1890 default: 1891 /* Current reset operation happening */ 1892 spin_unlock_irqrestore(&ndp->lock, flags); 1893 return 0; 1894 } 1895 } 1896 1897 if (!list_empty(&ndp->channel_queue)) { 1898 /* Clear any channel queue we may have interrupted */ 1899 list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link) 1900 list_del_init(&nc->link); 1901 } 1902 spin_unlock_irqrestore(&ndp->lock, flags); 1903 1904 active = NULL; 1905 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1906 NCSI_FOR_EACH_CHANNEL(np, nc) { 1907 spin_lock_irqsave(&nc->lock, flags); 1908 1909 if (nc->state == NCSI_CHANNEL_ACTIVE) { 1910 active = nc; 1911 nc->state = NCSI_CHANNEL_INVISIBLE; 1912 spin_unlock_irqrestore(&nc->lock, flags); 1913 ncsi_stop_channel_monitor(nc); 1914 break; 1915 } 1916 1917 spin_unlock_irqrestore(&nc->lock, flags); 1918 } 1919 if (active) 1920 break; 1921 } 1922 1923 if (!active) { 1924 /* Done */ 1925 spin_lock_irqsave(&ndp->lock, flags); 1926 ndp->flags &= ~NCSI_DEV_RESET; 1927 spin_unlock_irqrestore(&ndp->lock, flags); 1928 return ncsi_choose_active_channel(ndp); 1929 } 1930 1931 spin_lock_irqsave(&ndp->lock, flags); 1932 ndp->flags |= NCSI_DEV_RESET; 1933 ndp->active_channel = active; 1934 ndp->active_package = active->package; 1935 spin_unlock_irqrestore(&ndp->lock, flags); 1936 1937 nd->state = ncsi_dev_state_suspend; 1938 schedule_work(&ndp->work); 1939 return 0; 1940 } 1941 1942 void ncsi_unregister_dev(struct ncsi_dev *nd) 1943 { 1944 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1945 struct ncsi_package *np, *tmp; 1946 unsigned long flags; 1947 1948 dev_remove_pack(&ndp->ptype); 1949 1950 list_for_each_entry_safe(np, tmp, &ndp->packages, node) 1951 ncsi_remove_package(np); 1952 1953 spin_lock_irqsave(&ncsi_dev_lock, flags); 1954 list_del_rcu(&ndp->node); 1955 spin_unlock_irqrestore(&ncsi_dev_lock, flags); 1956 1957 disable_work_sync(&ndp->work); 1958 1959 kfree(ndp); 1960 } 1961 EXPORT_SYMBOL_GPL(ncsi_unregister_dev); 1962
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.