1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2011 Emulex. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 #include <emlxs.h> 29 30 /* Timer period in seconds */ 31 #define EMLXS_TIMER_PERIOD 1 /* secs */ 32 #define EMLXS_PKT_PERIOD 5 /* secs */ 33 #define EMLXS_UB_PERIOD 60 /* secs */ 34 35 EMLXS_MSG_DEF(EMLXS_CLOCK_C); 36 37 38 static void emlxs_timer_check_loopback(emlxs_hba_t *hba); 39 40 #ifdef DHCHAP_SUPPORT 41 static void emlxs_timer_check_dhchap(emlxs_port_t *port); 42 #endif /* DHCHAP_SUPPORT */ 43 44 static void emlxs_timer(void *arg); 45 static void emlxs_timer_check_fw_update(emlxs_hba_t *hba); 46 static void emlxs_timer_check_heartbeat(emlxs_hba_t *hba); 47 static uint32_t emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag); 48 static void emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag); 49 static void emlxs_timer_check_linkup(emlxs_hba_t *hba); 50 static void emlxs_timer_check_discovery(emlxs_port_t *port); 51 static void emlxs_timer_check_ub(emlxs_port_t *port); 52 static void emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag); 53 static uint32_t emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp, 54 Q *abortq, uint8_t *flag); 55 56 #ifdef TX_WATCHDOG 57 static void emlxs_tx_watchdog(emlxs_hba_t *hba); 58 #endif /* TX_WATCHDOG */ 59 60 extern clock_t 61 emlxs_timeout(emlxs_hba_t *hba, uint32_t timeout) 62 { 63 emlxs_config_t *cfg = &CFG; 64 clock_t time; 65 66 /* Set thread timeout */ 67 if (cfg[CFG_TIMEOUT_ENABLE].current) { 68 (void) drv_getparm(LBOLT, &time); 69 time += (timeout * drv_usectohz(1000000)); 70 } else { 71 time = -1; 72 } 73 74 return (time); 75 76 } /* emlxs_timeout() */ 77 78 79 static void 80 emlxs_timer(void *arg) 81 { 82 emlxs_hba_t *hba = (emlxs_hba_t *)arg; 83 84 if (!hba->timer_id) { 85 return; 86 } 87 88 mutex_enter(&EMLXS_TIMER_LOCK); 89 90 /* Only one timer thread is allowed */ 91 if (hba->timer_flags & EMLXS_TIMER_BUSY) { 92 mutex_exit(&EMLXS_TIMER_LOCK); 93 return; 94 } 95 96 /* Check if a kill request has been made */ 97 if (hba->timer_flags & EMLXS_TIMER_KILL) { 98 hba->timer_id = 0; 99 hba->timer_flags |= EMLXS_TIMER_ENDED; 100 101 mutex_exit(&EMLXS_TIMER_LOCK); 102 return; 103 } 104 105 hba->timer_flags |= (EMLXS_TIMER_BUSY | EMLXS_TIMER_STARTED); 106 hba->timer_tics = DRV_TIME; 107 108 mutex_exit(&EMLXS_TIMER_LOCK); 109 110 EMLXS_SLI_POLL_ERRATT(hba); 111 112 /* Perform standard checks */ 113 emlxs_timer_checks(hba); 114 115 /* Restart the timer */ 116 mutex_enter(&EMLXS_TIMER_LOCK); 117 118 hba->timer_flags &= ~EMLXS_TIMER_BUSY; 119 120 /* If timer is still enabled, restart it */ 121 if (!(hba->timer_flags & EMLXS_TIMER_KILL)) { 122 hba->timer_id = 123 timeout(emlxs_timer, (void *)hba, 124 (EMLXS_TIMER_PERIOD * drv_usectohz(1000000))); 125 } else { 126 hba->timer_id = 0; 127 hba->timer_flags |= EMLXS_TIMER_ENDED; 128 } 129 130 mutex_exit(&EMLXS_TIMER_LOCK); 131 132 return; 133 134 } /* emlxs_timer() */ 135 136 137 extern void 138 emlxs_timer_checks(emlxs_hba_t *hba) 139 { 140 emlxs_port_t *port = &PPORT; 141 uint8_t flag[MAX_CHANNEL]; 142 uint32_t i; 143 uint32_t rc; 144 145 /* Exit if we are still initializing */ 146 if (hba->state < FC_LINK_DOWN) { 147 return; 148 } 149 150 bzero((void *)flag, sizeof (flag)); 151 152 /* Check SLI level timeouts */ 153 EMLXS_SLI_TIMER(hba); 154 155 /* Check event queue */ 156 emlxs_timer_check_events(hba); 157 158 /* Check heartbeat timer */ 159 emlxs_timer_check_heartbeat(hba); 160 161 /* Check fw update timer */ 162 emlxs_timer_check_fw_update(hba); 163 164 #ifdef IDLE_TIMER 165 emlxs_pm_idle_timer(hba); 166 #endif /* IDLE_TIMER */ 167 168 /* Check for loopback timeouts */ 169 emlxs_timer_check_loopback(hba); 170 171 /* Check for packet timeouts */ 172 rc = emlxs_timer_check_pkts(hba, flag); 173 174 if (rc) { 175 /* Link or adapter is being reset */ 176 return; 177 } 178 179 /* Check for linkup timeout */ 180 emlxs_timer_check_linkup(hba); 181 182 /* Check the ports */ 183 for (i = 0; i < MAX_VPORTS; i++) { 184 port = &VPORT(i); 185 186 if (!(port->flag & EMLXS_PORT_BOUND)) { 187 continue; 188 } 189 190 /* Check for node gate timeouts */ 191 emlxs_timer_check_nodes(port, flag); 192 193 /* Check for tape discovery timeout */ 194 emlxs_timer_check_discovery(port); 195 196 /* Check for UB timeouts */ 197 emlxs_timer_check_ub(port); 198 199 #ifdef DHCHAP_SUPPORT 200 /* Check for DHCHAP authentication timeouts */ 201 emlxs_timer_check_dhchap(port); 202 #endif /* DHCHAP_SUPPORT */ 203 204 } 205 206 /* Check for IO channel service timeouts */ 207 /* Always do this last */ 208 emlxs_timer_check_channels(hba, flag); 209 210 return; 211 212 } /* emlxs_timer_checks() */ 213 214 215 extern void 216 emlxs_timer_start(emlxs_hba_t *hba) 217 { 218 if (hba->timer_id) { 219 return; 220 } 221 222 /* Restart the timer */ 223 mutex_enter(&EMLXS_TIMER_LOCK); 224 if (!hba->timer_id) { 225 hba->timer_flags = 0; 226 hba->timer_id = 227 timeout(emlxs_timer, (void *)hba, drv_usectohz(1000000)); 228 } 229 mutex_exit(&EMLXS_TIMER_LOCK); 230 231 } /* emlxs_timer_start() */ 232 233 234 extern void 235 emlxs_timer_stop(emlxs_hba_t *hba) 236 { 237 if (!hba->timer_id) { 238 return; 239 } 240 241 mutex_enter(&EMLXS_TIMER_LOCK); 242 hba->timer_flags |= EMLXS_TIMER_KILL; 243 244 while (hba->timer_id) { 245 mutex_exit(&EMLXS_TIMER_LOCK); 246 delay(drv_usectohz(500000)); 247 mutex_enter(&EMLXS_TIMER_LOCK); 248 } 249 mutex_exit(&EMLXS_TIMER_LOCK); 250 251 return; 252 253 } /* emlxs_timer_stop() */ 254 255 256 static uint32_t 257 emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag) 258 { 259 emlxs_port_t *port = &PPORT; 260 emlxs_config_t *cfg = &CFG; 261 Q tmo; 262 int32_t channelno; 263 CHANNEL *cp; 264 NODELIST *nlp; 265 IOCBQ *prev; 266 IOCBQ *next; 267 IOCB *iocb; 268 IOCBQ *iocbq; 269 emlxs_buf_t *sbp; 270 fc_packet_t *pkt; 271 Q abort; 272 uint32_t iotag; 273 uint32_t rc; 274 275 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 276 return (0); 277 } 278 279 if (hba->pkt_timer > hba->timer_tics) { 280 return (0); 281 } 282 283 hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD; 284 285 286 bzero((void *)&tmo, sizeof (Q)); 287 288 /* 289 * We must hold the locks here because we never know when an iocb 290 * will be removed out from under us 291 */ 292 293 mutex_enter(&EMLXS_TX_CHANNEL_LOCK); 294 295 for (channelno = 0; channelno < hba->chan_count; channelno++) { 296 cp = &hba->chan[channelno]; 297 298 /* Scan the tx queues for each active node on the channel */ 299 300 /* Get the first node */ 301 nlp = (NODELIST *)cp->nodeq.q_first; 302 303 while (nlp) { 304 /* Scan the node's priority tx queue */ 305 prev = NULL; 306 iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first; 307 308 while (iocbq) { 309 next = (IOCBQ *)iocbq->next; 310 iocb = &iocbq->iocb; 311 sbp = (emlxs_buf_t *)iocbq->sbp; 312 313 /* Check if iocb has timed out */ 314 if (sbp && hba->timer_tics >= sbp->ticks) { 315 /* iocb timed out, now deque it */ 316 if (next == NULL) { 317 nlp->nlp_ptx[channelno].q_last = 318 (uint8_t *)prev; 319 } 320 321 if (prev == NULL) { 322 nlp->nlp_ptx[channelno]. 323 q_first = (uint8_t *)next; 324 } else { 325 prev->next = next; 326 } 327 328 iocbq->next = NULL; 329 nlp->nlp_ptx[channelno].q_cnt--; 330 331 /* Add this iocb to our local */ 332 /* timout queue */ 333 334 /* 335 * This way we don't hold the TX_CHANNEL 336 * lock too long 337 */ 338 339 if (tmo.q_first) { 340 ((IOCBQ *)tmo.q_last)->next = 341 iocbq; 342 tmo.q_last = 343 (uint8_t *)iocbq; 344 tmo.q_cnt++; 345 } else { 346 tmo.q_first = 347 (uint8_t *)iocbq; 348 tmo.q_last = 349 (uint8_t *)iocbq; 350 tmo.q_cnt = 1; 351 } 352 iocbq->next = NULL; 353 354 } else { 355 prev = iocbq; 356 } 357 358 iocbq = next; 359 360 } /* while (iocbq) */ 361 362 363 /* Scan the node's tx queue */ 364 prev = NULL; 365 iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first; 366 367 while (iocbq) { 368 next = (IOCBQ *)iocbq->next; 369 iocb = &iocbq->iocb; 370 sbp = (emlxs_buf_t *)iocbq->sbp; 371 372 /* Check if iocb has timed out */ 373 if (sbp && hba->timer_tics >= sbp->ticks) { 374 /* iocb timed out, now deque it */ 375 if (next == NULL) { 376 nlp->nlp_tx[channelno].q_last = 377 (uint8_t *)prev; 378 } 379 380 if (prev == NULL) { 381 nlp->nlp_tx[channelno].q_first = 382 (uint8_t *)next; 383 } else { 384 prev->next = next; 385 } 386 387 iocbq->next = NULL; 388 nlp->nlp_tx[channelno].q_cnt--; 389 390 /* Add this iocb to our local */ 391 /* timout queue */ 392 393 /* 394 * This way we don't hold the TX_CHANNEL 395 * lock too long 396 */ 397 398 if (tmo.q_first) { 399 ((IOCBQ *)tmo.q_last)->next = 400 iocbq; 401 tmo.q_last = 402 (uint8_t *)iocbq; 403 tmo.q_cnt++; 404 } else { 405 tmo.q_first = 406 (uint8_t *)iocbq; 407 tmo.q_last = 408 (uint8_t *)iocbq; 409 tmo.q_cnt = 1; 410 } 411 iocbq->next = NULL; 412 413 } else { 414 prev = iocbq; 415 } 416 417 iocbq = next; 418 419 } /* while (iocbq) */ 420 421 if (nlp == (NODELIST *)cp->nodeq.q_last) { 422 nlp = NULL; 423 } else { 424 nlp = nlp->nlp_next[channelno]; 425 } 426 427 } /* while (nlp) */ 428 429 } /* end of for */ 430 431 /* Now cleanup the iocb's */ 432 iocbq = (IOCBQ *)tmo.q_first; 433 while (iocbq) { 434 /* Free the IoTag and the bmp */ 435 iocb = &iocbq->iocb; 436 channelno = ((CHANNEL *)iocbq->channel)->channelno; 437 sbp = iocbq->sbp; 438 if (sbp && (sbp != STALE_PACKET)) { 439 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 440 emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1); 441 } else { 442 (void) emlxs_unregister_pkt( 443 (CHANNEL *)iocbq->channel, 444 iocb->ULPIOTAG, 0); 445 } 446 447 mutex_enter(&sbp->mtx); 448 sbp->pkt_flags |= PACKET_IN_TIMEOUT; 449 mutex_exit(&sbp->mtx); 450 } 451 452 iocbq = (IOCBQ *)iocbq->next; 453 454 } /* end of while */ 455 456 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 457 458 /* Now complete the transmit timeouts outside the locks */ 459 iocbq = (IOCBQ *)tmo.q_first; 460 while (iocbq) { 461 /* Save the next iocbq for now */ 462 next = (IOCBQ *)iocbq->next; 463 464 /* Unlink this iocbq */ 465 iocbq->next = NULL; 466 467 /* Get the pkt */ 468 sbp = (emlxs_buf_t *)iocbq->sbp; 469 470 if (sbp) { 471 /* Warning: Some FCT sbp's don't have */ 472 /* fc_packet objects */ 473 pkt = PRIV2PKT(sbp); 474 475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg, 476 "TXQ abort: sbp=%p iotag=%x tmo=%d", sbp, 477 sbp->iotag, (pkt) ? pkt->pkt_timeout : 0); 478 479 if (hba->state >= FC_LINK_UP) { 480 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 481 IOERR_ABORT_TIMEOUT, 1); 482 } else { 483 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 484 IOERR_LINK_DOWN, 1); 485 } 486 487 } 488 489 iocbq = next; 490 491 } /* end of while */ 492 493 494 495 /* Now check the chip */ 496 bzero((void *)&abort, sizeof (Q)); 497 498 /* Check the HBA for outstanding IOs */ 499 rc = 0; 500 mutex_enter(&EMLXS_FCTAB_LOCK); 501 for (iotag = 1; iotag < hba->max_iotag; iotag++) { 502 sbp = hba->fc_table[iotag]; 503 504 if (!sbp || (sbp == STALE_PACKET)) { 505 continue; 506 } 507 508 /* Check if IO is valid */ 509 if (!(sbp->pkt_flags & PACKET_VALID) || 510 (sbp->pkt_flags & (PACKET_ULP_OWNED| 511 PACKET_COMPLETED|PACKET_IN_COMPLETION))) { 512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg, 513 "timer_check_pkts: Invalid IO found. iotag=%x", 514 iotag); 515 516 hba->fc_table[iotag] = STALE_PACKET; 517 hba->io_count--; 518 continue; 519 } 520 521 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) && 522 (hba->timer_tics >= sbp->ticks)) { 523 rc = emlxs_pkt_chip_timeout(sbp->iocbq.port, 524 sbp, &abort, flag); 525 526 if (rc) { 527 break; 528 } 529 } 530 } 531 mutex_exit(&EMLXS_FCTAB_LOCK); 532 533 /* Now put the iocb's on the tx queue */ 534 iocbq = (IOCBQ *)abort.q_first; 535 while (iocbq) { 536 /* Save the next iocbq for now */ 537 next = (IOCBQ *)iocbq->next; 538 539 /* Unlink this iocbq */ 540 iocbq->next = NULL; 541 542 /* Send this iocbq */ 543 emlxs_tx_put(iocbq, 1); 544 545 iocbq = next; 546 } 547 548 /* Now trigger IO channel service to send these abort iocbq */ 549 for (channelno = 0; channelno < hba->chan_count; channelno++) { 550 if (!flag[channelno]) { 551 continue; 552 } 553 cp = &hba->chan[channelno]; 554 555 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0); 556 } 557 558 if (rc == 1) { 559 /* Spawn a thread to reset the link */ 560 emlxs_thread_spawn(hba, emlxs_reset_link_thread, NULL, NULL); 561 } else if (rc == 2) { 562 /* Spawn a thread to reset the adapter */ 563 emlxs_thread_spawn(hba, emlxs_restart_thread, NULL, NULL); 564 } 565 566 return (rc); 567 568 } /* emlxs_timer_check_pkts() */ 569 570 571 static void 572 emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag) 573 { 574 emlxs_port_t *port = &PPORT; 575 emlxs_config_t *cfg = &CFG; 576 int32_t channelno; 577 CHANNEL *cp; 578 uint32_t logit; 579 580 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 581 return; 582 } 583 584 for (channelno = 0; channelno < hba->chan_count; channelno++) { 585 cp = &hba->chan[channelno]; 586 587 logit = 0; 588 589 /* Check for channel timeout now */ 590 mutex_enter(&EMLXS_TX_CHANNEL_LOCK); 591 if (cp->timeout && (hba->timer_tics >= cp->timeout)) { 592 /* Check if there is work to do on channel and */ 593 /* the link is still up */ 594 if (cp->nodeq.q_first) { 595 flag[channelno] = 1; 596 cp->timeout = hba->timer_tics + 10; 597 598 if (hba->state >= FC_LINK_UP) { 599 logit = 1; 600 } 601 } else { 602 cp->timeout = 0; 603 } 604 } 605 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 606 607 if (logit) { 608 EMLXS_MSGF(EMLXS_CONTEXT, 609 &emlxs_chan_watchdog_msg, 610 "IO Channel %d cnt=%d,%d", 611 channelno, 612 hba->channel_tx_count, 613 hba->io_count); 614 } 615 616 /* 617 * If IO channel flag is set, request iocb servicing 618 * here to send any iocb's that may still be queued 619 */ 620 if (flag[channelno]) { 621 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0); 622 } 623 } 624 625 return; 626 627 } /* emlxs_timer_check_channels() */ 628 629 630 static void 631 emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag) 632 { 633 emlxs_hba_t *hba = HBA; 634 uint32_t found; 635 uint32_t i; 636 NODELIST *nlp; 637 int32_t channelno; 638 639 for (;;) { 640 /* Check node gate flag for expiration */ 641 found = 0; 642 643 /* 644 * We need to lock, scan, and unlock because we can't hold the 645 * lock while we call node_open 646 */ 647 rw_enter(&port->node_rwlock, RW_READER); 648 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { 649 nlp = port->node_table[i]; 650 while (nlp != NULL) { 651 for (channelno = 0; 652 channelno < hba->chan_count; 653 channelno++) { 654 /* Check if the node timer is active */ 655 /* and if timer has expired */ 656 if (nlp->nlp_tics[channelno] && 657 (hba->timer_tics >= 658 nlp->nlp_tics[channelno])) { 659 /* If so, set the flag and */ 660 /* break out */ 661 found = 1; 662 flag[channelno] = 1; 663 break; 664 } 665 } 666 667 if (nlp->nlp_force_rscn && 668 (hba->timer_tics >= nlp->nlp_force_rscn)) { 669 nlp->nlp_force_rscn = 0; 670 /* 671 * Generate an RSCN to 672 * wakeup ULP 673 */ 674 (void) emlxs_generate_rscn(port, 675 nlp->nlp_DID); 676 } 677 678 if (found) { 679 break; 680 } 681 682 nlp = nlp->nlp_list_next; 683 } 684 685 if (found) { 686 break; 687 } 688 689 } 690 rw_exit(&port->node_rwlock); 691 692 if (!found) { 693 break; 694 } 695 696 emlxs_node_timeout(port, nlp, channelno); 697 } 698 699 } /* emlxs_timer_check_nodes() */ 700 701 702 static void 703 emlxs_timer_check_loopback(emlxs_hba_t *hba) 704 { 705 emlxs_port_t *port = &PPORT; 706 emlxs_config_t *cfg = &CFG; 707 int32_t reset = 0; 708 709 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 710 return; 711 } 712 713 /* Check the loopback timer for expiration */ 714 mutex_enter(&EMLXS_PORT_LOCK); 715 716 if (!hba->loopback_tics || (hba->timer_tics < hba->loopback_tics)) { 717 mutex_exit(&EMLXS_PORT_LOCK); 718 return; 719 } 720 721 hba->loopback_tics = 0; 722 723 if (hba->flag & FC_LOOPBACK_MODE) { 724 reset = 1; 725 } 726 727 mutex_exit(&EMLXS_PORT_LOCK); 728 729 if (reset) { 730 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg, 731 "LOOPBACK_MODE: Expired. Resetting..."); 732 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 733 } 734 735 return; 736 737 } /* emlxs_timer_check_loopback() */ 738 739 740 static void 741 emlxs_timer_check_linkup(emlxs_hba_t *hba) 742 { 743 emlxs_port_t *port = &PPORT; 744 uint32_t linkup; 745 746 /* Check if all mbox commands from previous activity are processed */ 747 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) { 748 mutex_enter(&EMLXS_MBOX_LOCK); 749 if (hba->mbox_queue.q_first) { 750 mutex_exit(&EMLXS_MBOX_LOCK); 751 return; 752 } 753 mutex_exit(&EMLXS_MBOX_LOCK); 754 } 755 756 /* Check the linkup timer for expiration */ 757 mutex_enter(&EMLXS_PORT_LOCK); 758 linkup = 0; 759 if (hba->linkup_timer && (hba->timer_tics >= hba->linkup_timer)) { 760 hba->linkup_timer = 0; 761 762 /* Make sure link is still ready */ 763 if (hba->state >= FC_LINK_UP) { 764 linkup = 1; 765 } 766 } 767 mutex_exit(&EMLXS_PORT_LOCK); 768 769 /* Make the linkup callback */ 770 if (linkup) { 771 emlxs_port_online(port); 772 } 773 return; 774 775 } /* emlxs_timer_check_linkup() */ 776 777 778 static void 779 emlxs_timer_check_heartbeat(emlxs_hba_t *hba) 780 { 781 emlxs_port_t *port = &PPORT; 782 MAILBOXQ *mbq; 783 emlxs_config_t *cfg = &CFG; 784 int rc; 785 786 if (!cfg[CFG_HEARTBEAT_ENABLE].current) { 787 return; 788 } 789 790 if (hba->timer_tics < hba->heartbeat_timer) { 791 return; 792 } 793 794 hba->heartbeat_timer = hba->timer_tics + 5; 795 796 /* Return if adapter interrupts have occurred */ 797 if (hba->heartbeat_flag) { 798 hba->heartbeat_flag = 0; 799 return; 800 } 801 /* No adapter interrupts have occured for 5 seconds now */ 802 803 /* Return if mailbox is busy */ 804 /* This means the mailbox timer routine is watching for problems */ 805 if (hba->mbox_timer) { 806 return; 807 } 808 809 /* Return if heartbeat is still outstanding */ 810 if (hba->heartbeat_active) { 811 return; 812 } 813 814 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) { 815 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 816 "Unable to allocate heartbeat mailbox."); 817 return; 818 } 819 820 emlxs_mb_heartbeat(hba, mbq); 821 hba->heartbeat_active = 1; 822 823 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0); 824 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 825 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 826 } 827 828 return; 829 830 } /* emlxs_timer_check_heartbeat() */ 831 832 833 static void 834 emlxs_timer_check_fw_update(emlxs_hba_t *hba) 835 { 836 emlxs_port_t *port = &PPORT; 837 838 if (!(hba->fw_flag & FW_UPDATE_NEEDED)) { 839 hba->fw_timer = 0; 840 return; 841 } 842 843 if (hba->timer_tics < hba->fw_timer) { 844 return; 845 } 846 847 if (hba->tgt_mode) { 848 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fw_update_msg, 849 "A manual HBA reset or link reset (using emlxadm) " 850 "is required."); 851 } else { 852 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fw_update_msg, 853 "A manual HBA reset or link reset (using luxadm or fcadm) " 854 "is required."); 855 } 856 857 /* Set timer for 24 hours */ 858 hba->fw_timer = hba->timer_tics + (60 * 60 * 24); 859 860 return; 861 862 } /* emlxs_timer_check_fw_update() */ 863 864 865 static void 866 emlxs_timer_check_discovery(emlxs_port_t *port) 867 { 868 emlxs_hba_t *hba = HBA; 869 emlxs_config_t *cfg = &CFG; 870 int32_t send_clear_la; 871 uint32_t found; 872 uint32_t i; 873 NODELIST *nlp; 874 MAILBOXQ *mbox; 875 int rc; 876 877 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 878 return; 879 } 880 881 /* Check the discovery timer for expiration */ 882 send_clear_la = 0; 883 mutex_enter(&EMLXS_PORT_LOCK); 884 while (hba->discovery_timer && 885 (hba->timer_tics >= hba->discovery_timer) && 886 (hba->state == FC_LINK_UP)) { 887 send_clear_la = 1; 888 889 /* Perform a flush on fcp2 nodes that are still closed */ 890 found = 0; 891 rw_enter(&port->node_rwlock, RW_READER); 892 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { 893 nlp = port->node_table[i]; 894 while (nlp != NULL) { 895 if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && 896 (nlp->nlp_flag[hba->channel_fcp] & 897 NLP_CLOSED)) { 898 found = 1; 899 break; 900 901 } 902 nlp = nlp->nlp_list_next; 903 } 904 905 if (found) { 906 break; 907 } 908 } 909 rw_exit(&port->node_rwlock); 910 911 if (!found) { 912 break; 913 } 914 915 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg, 916 "FCP2 device (did=%06x) missing. Flushing...", 917 nlp->nlp_DID); 918 919 mutex_exit(&EMLXS_PORT_LOCK); 920 921 (void) emlxs_mb_unreg_node(port, nlp, NULL, NULL, NULL); 922 923 mutex_enter(&EMLXS_PORT_LOCK); 924 925 } 926 mutex_exit(&EMLXS_PORT_LOCK); 927 928 /* Try to send clear link attention, if needed */ 929 if ((hba->sli_mode < EMLXS_HBA_SLI4_MODE) && (send_clear_la == 1) && 930 (mbox = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) { 931 mutex_enter(&EMLXS_PORT_LOCK); 932 933 /* 934 * If state is not FC_LINK_UP, then either the link has gone 935 * down or a FC_CLEAR_LA has already been issued 936 */ 937 if (hba->state != FC_LINK_UP) { 938 mutex_exit(&EMLXS_PORT_LOCK); 939 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox); 940 } else { 941 /* Change state and clear discovery timer */ 942 EMLXS_STATE_CHANGE_LOCKED(hba, FC_CLEAR_LA); 943 944 hba->discovery_timer = 0; 945 946 mutex_exit(&EMLXS_PORT_LOCK); 947 948 /* Prepare and send the CLEAR_LA command */ 949 emlxs_mb_clear_la(hba, mbox); 950 951 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0); 952 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 953 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox); 954 } 955 } 956 } 957 958 return; 959 960 } /* emlxs_timer_check_discovery() */ 961 962 963 static void 964 emlxs_timer_check_ub(emlxs_port_t *port) 965 { 966 emlxs_hba_t *hba = HBA; 967 emlxs_unsol_buf_t *ulistp; 968 fc_unsol_buf_t *ubp; 969 emlxs_ub_priv_t *ub_priv; 970 uint32_t i; 971 972 if (port->ub_timer > hba->timer_tics) { 973 return; 974 } 975 976 port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD; 977 978 /* Check the unsolicited buffers */ 979 mutex_enter(&EMLXS_UB_LOCK); 980 981 ulistp = port->ub_pool; 982 while (ulistp) { 983 /* Check buffers in this pool */ 984 for (i = 0; i < ulistp->pool_nentries; i++) { 985 ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i]; 986 ub_priv = ubp->ub_fca_private; 987 988 if (!(ub_priv->flags & EMLXS_UB_IN_USE)) { 989 continue; 990 } 991 992 /* If buffer has timed out, print message and */ 993 /* increase timeout */ 994 if ((ub_priv->time + ub_priv->timeout) <= 995 hba->timer_tics) { 996 ub_priv->flags |= EMLXS_UB_TIMEOUT; 997 998 EMLXS_MSGF(EMLXS_CONTEXT, 999 &emlxs_sfs_debug_msg, 1000 "Stale UB buffer detected (%d mins): " 1001 "buffer=%p (%x,%x,%x,%x)", 1002 (ub_priv->timeout / 60), ubp, 1003 ubp->ub_frame.type, ubp->ub_frame.s_id, 1004 ubp->ub_frame.ox_id, ubp->ub_frame.rx_id); 1005 1006 /* Increase timeout period */ 1007 1008 /* If timeout was 5 mins or less, */ 1009 /* increase it to 10 mins */ 1010 if (ub_priv->timeout <= (5 * 60)) { 1011 ub_priv->timeout = (10 * 60); 1012 } 1013 /* If timeout was 10 mins or less, */ 1014 /* increase it to 30 mins */ 1015 else if (ub_priv->timeout <= (10 * 60)) { 1016 ub_priv->timeout = (30 * 60); 1017 } 1018 /* Otherwise double it. */ 1019 else { 1020 ub_priv->timeout *= 2; 1021 } 1022 } 1023 } 1024 1025 ulistp = ulistp->pool_next; 1026 } 1027 1028 mutex_exit(&EMLXS_UB_LOCK); 1029 1030 return; 1031 1032 } /* emlxs_timer_check_ub() */ 1033 1034 1035 /* EMLXS_FCTAB_LOCK must be held to call this */ 1036 static uint32_t 1037 emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abortq, 1038 uint8_t *flag) 1039 { 1040 emlxs_hba_t *hba = HBA; 1041 CHANNEL *cp = (CHANNEL *)sbp->channel; 1042 IOCBQ *iocbq = NULL; 1043 fc_packet_t *pkt; 1044 uint32_t rc = 0; 1045 1046 mutex_enter(&sbp->mtx); 1047 1048 /* Warning: Some FCT sbp's don't have fc_packet objects */ 1049 pkt = PRIV2PKT(sbp); 1050 1051 switch (sbp->abort_attempts) { 1052 case 0: 1053 1054 /* Create the abort IOCB */ 1055 if (hba->state >= FC_LINK_UP) { 1056 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg, 1057 "chipQ: 1:Aborting. sbp=%p iotag=%x tmo=%d " 1058 "flags=%x", 1059 sbp, sbp->iotag, 1060 (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags); 1061 1062 iocbq = 1063 emlxs_create_abort_xri_cn(port, sbp->node, 1064 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS); 1065 1066 /* The adapter will make 2 attempts to send ABTS */ 1067 /* with 2*ratov timeout each time */ 1068 sbp->ticks = 1069 hba->timer_tics + (4 * hba->fc_ratov) + 10; 1070 } else { 1071 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg, 1072 "chipQ: 1:Closing. sbp=%p iotag=%x tmo=%d " 1073 "flags=%x", 1074 sbp, sbp->iotag, 1075 (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags); 1076 1077 iocbq = 1078 emlxs_create_close_xri_cn(port, sbp->node, 1079 sbp->iotag, cp); 1080 1081 sbp->ticks = hba->timer_tics + 30; 1082 } 1083 1084 /* set the flags */ 1085 sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_XRI_CLOSED); 1086 1087 flag[cp->channelno] = 1; 1088 rc = 0; 1089 1090 break; 1091 1092 case 1: 1093 1094 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg, 1095 "chipQ: 2:Closing. sbp=%p iotag=%x", sbp, sbp->iotag); 1096 1097 iocbq = 1098 emlxs_create_close_xri_cn(port, sbp->node, sbp->iotag, 1099 cp); 1100 1101 sbp->ticks = hba->timer_tics + 30; 1102 1103 flag[cp->channelno] = 1; 1104 rc = 0; 1105 1106 break; 1107 1108 case 2: 1109 1110 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg, 1111 "chipQ: 3:Resetting link. sbp=%p iotag=%x", sbp, 1112 sbp->iotag); 1113 1114 sbp->ticks = hba->timer_tics + 60; 1115 rc = 1; 1116 1117 break; 1118 1119 default: 1120 1121 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg, 1122 "chipQ: %d:Resetting adapter. sbp=%p iotag=%x", 1123 sbp->abort_attempts, sbp, sbp->iotag); 1124 1125 sbp->ticks = hba->timer_tics + 60; 1126 rc = 2; 1127 1128 break; 1129 } 1130 1131 sbp->abort_attempts++; 1132 mutex_exit(&sbp->mtx); 1133 1134 if (iocbq) { 1135 if (abortq->q_first) { 1136 ((IOCBQ *)abortq->q_last)->next = iocbq; 1137 abortq->q_last = (uint8_t *)iocbq; 1138 abortq->q_cnt++; 1139 } else { 1140 abortq->q_first = (uint8_t *)iocbq; 1141 abortq->q_last = (uint8_t *)iocbq; 1142 abortq->q_cnt = 1; 1143 } 1144 iocbq->next = NULL; 1145 } 1146 1147 return (rc); 1148 1149 } /* emlxs_pkt_chip_timeout() */ 1150 1151 1152 #ifdef TX_WATCHDOG 1153 1154 static void 1155 emlxs_tx_watchdog(emlxs_hba_t *hba) 1156 { 1157 emlxs_port_t *port = &PPORT; 1158 NODELIST *nlp; 1159 uint32_t channelno; 1160 CHANNEL *cp; 1161 IOCBQ *next; 1162 IOCBQ *iocbq; 1163 IOCB *iocb; 1164 uint32_t found; 1165 MATCHMAP *bmp; 1166 Q abort; 1167 uint32_t iotag; 1168 emlxs_buf_t *sbp; 1169 fc_packet_t *pkt = NULL; 1170 uint32_t cmd; 1171 uint32_t did; 1172 1173 bzero((void *)&abort, sizeof (Q)); 1174 1175 mutex_enter(&EMLXS_TX_CHANNEL_LOCK); 1176 1177 mutex_enter(&EMLXS_FCTAB_LOCK); 1178 for (iotag = 1; iotag < hba->max_iotag; iotag++) { 1179 sbp = hba->fc_table[iotag]; 1180 if (sbp && (sbp != STALE_PACKET) && 1181 (sbp->pkt_flags & PACKET_IN_TXQ)) { 1182 nlp = sbp->node; 1183 iocbq = &sbp->iocbq; 1184 1185 channelno = (CHANNEL *)(sbp->channel)->channelno; 1186 if (iocbq->flag & IOCB_PRIORITY) { 1187 iocbq = 1188 (IOCBQ *)nlp->nlp_ptx[channelno]. 1189 q_first; 1190 } else { 1191 iocbq = 1192 (IOCBQ *)nlp->nlp_tx[channelno]. 1193 q_first; 1194 } 1195 1196 /* Find a matching entry */ 1197 found = 0; 1198 while (iocbq) { 1199 if (iocbq == &sbp->iocbq) { 1200 found = 1; 1201 break; 1202 } 1203 1204 iocbq = (IOCBQ *)iocbq->next; 1205 } 1206 1207 if (!found) { 1208 if (!(sbp->pkt_flags & PACKET_STALE)) { 1209 mutex_enter(&sbp->mtx); 1210 sbp->pkt_flags |= 1211 PACKET_STALE; 1212 mutex_exit(&sbp->mtx); 1213 } else { 1214 if (abort.q_first == 0) { 1215 abort.q_first = 1216 &sbp->iocbq; 1217 } else { 1218 ((IOCBQ *)abort. 1219 q_last)->next = 1220 &sbp->iocbq; 1221 } 1222 1223 abort.q_last = &sbp->iocbq; 1224 abort.q_cnt++; 1225 } 1226 1227 } else { 1228 if ((sbp->pkt_flags & PACKET_STALE)) { 1229 mutex_enter(&sbp->mtx); 1230 sbp->pkt_flags &= 1231 ~PACKET_STALE; 1232 mutex_exit(&sbp->mtx); 1233 } 1234 } 1235 } 1236 } 1237 mutex_exit(&EMLXS_FCTAB_LOCK); 1238 1239 iocbq = (IOCBQ *)abort.q_first; 1240 while (iocbq) { 1241 next = (IOCBQ *)iocbq->next; 1242 iocbq->next = NULL; 1243 sbp = (emlxs_buf_t *)iocbq->sbp; 1244 1245 pkt = PRIV2PKT(sbp); 1246 if (pkt) { 1247 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 1248 cmd = *((uint32_t *)pkt->pkt_cmd); 1249 cmd = LE_SWAP32(cmd); 1250 } 1251 1252 1253 emlxs_tx_put(iocbq, 0); 1254 1255 iocbq = next; 1256 1257 } /* end of while */ 1258 1259 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 1260 1261 return; 1262 1263 } /* emlxs_tx_watchdog() */ 1264 1265 #endif /* TX_WATCHDOG */ 1266 1267 1268 #ifdef DHCHAP_SUPPORT 1269 1270 static void 1271 emlxs_timer_check_dhchap(emlxs_port_t *port) 1272 { 1273 emlxs_hba_t *hba = HBA; 1274 uint32_t i; 1275 NODELIST *ndlp = NULL; 1276 1277 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { 1278 ndlp = port->node_table[i]; 1279 1280 if (!ndlp) { 1281 continue; 1282 } 1283 1284 /* Check authentication response timeout */ 1285 if (ndlp->node_dhc.nlp_authrsp_tmo && 1286 (hba->timer_tics >= ndlp->node_dhc.nlp_authrsp_tmo)) { 1287 /* Trigger authresp timeout handler */ 1288 (void) emlxs_dhc_authrsp_timeout(port, ndlp, NULL); 1289 } 1290 1291 /* Check reauthentication timeout */ 1292 if (ndlp->node_dhc.nlp_reauth_tmo && 1293 (hba->timer_tics >= ndlp->node_dhc.nlp_reauth_tmo)) { 1294 /* Trigger reauth timeout handler */ 1295 emlxs_dhc_reauth_timeout(port, NULL, ndlp); 1296 } 1297 } 1298 return; 1299 1300 } /* emlxs_timer_check_dhchap */ 1301 1302 #endif /* DHCHAP_SUPPORT */ 1303