1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Emulex. All rights reserved. 24 * Use is subject to License terms. 25 */ 26 27 #include <emlxs.h> 28 29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */ 30 EMLXS_MSG_DEF(EMLXS_FCP_C); 31 32 #define EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \ 33 getPaddr(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow)); 34 35 static void emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, 36 Q *abort, uint8_t *flag, emlxs_buf_t *fpkt); 37 static uint32_t emlxs_iotag_flush(emlxs_hba_t *hba); 38 39 /* 40 * This routine copies data from src then potentially swaps the destination to 41 * big endian. Assumes cnt is a multiple of sizeof(uint32_t). 42 */ 43 extern void 44 emlxs_pcimem_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt) 45 { 46 uint32_t ldata; 47 int32_t i; 48 49 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 50 ldata = *src++; 51 ldata = PCIMEM_LONG(ldata); 52 *dest++ = ldata; 53 } 54 } /* emlxs_pcimem_bcopy */ 55 56 57 /* 58 * This routine copies data from src then swaps the destination to big endian. 59 * Assumes cnt is a multiple of sizeof(uint32_t). 60 */ 61 extern void 62 emlxs_swap_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt) 63 { 64 uint32_t ldata; 65 int32_t i; 66 67 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 68 ldata = *src++; 69 ldata = SWAP_DATA32(ldata); 70 *dest++ = ldata; 71 } 72 } /* End fc_swap_bcopy */ 73 74 75 #define SCSI3_PERSISTENT_RESERVE_IN 0x5e 76 #define SCSI_INQUIRY 0x12 77 #define SCSI_RX_DIAG 0x1C 78 79 80 /* 81 * emlxs_handle_fcp_event 82 * 83 * Description: Process an FCP Rsp Ring completion 84 * 85 */ 86 /* ARGSUSED */ 87 extern void 88 emlxs_handle_fcp_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq) 89 { 90 emlxs_port_t *port = &PPORT; 91 IOCB *cmd; 92 emlxs_buf_t *sbp; 93 fc_packet_t *pkt = NULL; 94 #ifdef SAN_DIAG_SUPPORT 95 NODELIST *ndlp; 96 #endif 97 uint32_t iostat; 98 uint8_t localstat; 99 fcp_rsp_t *rsp; 100 uint32_t rsp_data_resid; 101 uint32_t check_underrun; 102 uint8_t asc; 103 uint8_t ascq; 104 uint8_t scsi_status; 105 uint8_t sense; 106 uint32_t did; 107 uint32_t fix_it; 108 uint8_t *scsi_cmd; 109 uint8_t scsi_opcode; 110 uint16_t scsi_dl; 111 uint32_t data_rx; 112 113 cmd = &iocbq->iocb; 114 115 /* Initialize the status */ 116 iostat = cmd->ulpStatus; 117 localstat = 0; 118 scsi_status = 0; 119 asc = 0; 120 ascq = 0; 121 sense = 0; 122 check_underrun = 0; 123 fix_it = 0; 124 125 HBASTATS.FcpEvent++; 126 127 sbp = (emlxs_buf_t *)iocbq->sbp; 128 129 if (!sbp) { 130 /* completion with missing xmit command */ 131 HBASTATS.FcpStray++; 132 133 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg, 134 "cmd=%x iotag=%x", cmd->ulpCommand, cmd->ulpIoTag); 135 136 return; 137 } 138 139 HBASTATS.FcpCompleted++; 140 141 #ifdef SAN_DIAG_SUPPORT 142 emlxs_update_sd_bucket(sbp); 143 #endif /* SAN_DIAG_SUPPORT */ 144 145 pkt = PRIV2PKT(sbp); 146 147 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 148 scsi_cmd = (uint8_t *)pkt->pkt_cmd; 149 scsi_opcode = scsi_cmd[12]; 150 data_rx = 0; 151 152 /* Sync data in data buffer only on FC_PKT_FCP_READ */ 153 if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) { 154 emlxs_mpdata_sync(pkt->pkt_data_dma, 0, pkt->pkt_datalen, 155 DDI_DMA_SYNC_FORKERNEL); 156 157 #ifdef TEST_SUPPORT 158 if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) && 159 (pkt->pkt_datalen >= 512)) { 160 hba->underrun_counter--; 161 iostat = IOSTAT_FCP_RSP_ERROR; 162 163 /* Report 512 bytes missing by adapter */ 164 cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512; 165 166 /* Corrupt 512 bytes of Data buffer */ 167 bzero((uint8_t *)pkt->pkt_data, 512); 168 169 /* Set FCP response to STATUS_GOOD */ 170 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen); 171 } 172 #endif /* TEST_SUPPORT */ 173 } 174 175 /* Process the pkt */ 176 mutex_enter(&sbp->mtx); 177 178 /* Check for immediate return */ 179 if ((iostat == IOSTAT_SUCCESS) && 180 (pkt->pkt_comp) && 181 !(sbp->pkt_flags & 182 (PACKET_RETURNED | PACKET_COMPLETED | 183 PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ | 184 PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH | 185 PACKET_IN_ABORT | PACKET_POLLED))) { 186 HBASTATS.FcpGood++; 187 188 sbp->pkt_flags |= 189 (PACKET_STATE_VALID | PACKET_IN_COMPLETION | 190 PACKET_COMPLETED | PACKET_RETURNED); 191 mutex_exit(&sbp->mtx); 192 193 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 194 emlxs_unswap_pkt(sbp); 195 #endif /* EMLXS_MODREV2X */ 196 197 (*pkt->pkt_comp) (pkt); 198 199 return; 200 } 201 202 /* 203 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR 204 * is reported. 205 */ 206 207 /* Check if a response buffer was provided */ 208 if ((iostat == IOSTAT_FCP_RSP_ERROR) && pkt->pkt_rsplen) { 209 emlxs_mpdata_sync(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen, 210 DDI_DMA_SYNC_FORKERNEL); 211 212 /* Get the response buffer pointer */ 213 rsp = (fcp_rsp_t *)pkt->pkt_resp; 214 215 /* Set the valid response flag */ 216 sbp->pkt_flags |= PACKET_FCP_RSP_VALID; 217 218 scsi_status = rsp->fcp_u.fcp_status.scsi_status; 219 220 #ifdef SAN_DIAG_SUPPORT 221 ndlp = (NODELIST *)iocbq->node; 222 if (scsi_status == SCSI_STAT_QUE_FULL) { 223 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL, 224 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun); 225 } else if (scsi_status == SCSI_STAT_BUSY) { 226 emlxs_log_sd_scsi_event(port, 227 SD_SCSI_SUBCATEGORY_DEVBSY, 228 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun); 229 } 230 #endif 231 232 /* 233 * Convert a task abort to a check condition with no data 234 * transferred. We saw a data corruption when Solaris received 235 * a Task Abort from a tape. 236 */ 237 if (scsi_status == SCSI_STAT_TASK_ABORT) { 238 EMLXS_MSGF(EMLXS_CONTEXT, 239 &emlxs_fcp_completion_error_msg, 240 "Task Abort. " 241 "Fixed.did=0x%06x sbp=%p cmd=%02x dl=%d", 242 did, sbp, scsi_opcode, pkt->pkt_datalen); 243 244 rsp->fcp_u.fcp_status.scsi_status = 245 SCSI_STAT_CHECK_COND; 246 rsp->fcp_u.fcp_status.rsp_len_set = 0; 247 rsp->fcp_u.fcp_status.sense_len_set = 0; 248 rsp->fcp_u.fcp_status.resid_over = 0; 249 250 if (pkt->pkt_datalen) { 251 rsp->fcp_u.fcp_status.resid_under = 1; 252 rsp->fcp_resid = 253 SWAP_DATA32(pkt->pkt_datalen); 254 } else { 255 rsp->fcp_u.fcp_status.resid_under = 0; 256 rsp->fcp_resid = 0; 257 } 258 259 scsi_status = SCSI_STAT_CHECK_COND; 260 } 261 262 /* 263 * We only need to check underrun if data could 264 * have been sent 265 */ 266 267 /* Always check underrun if status is good */ 268 if (scsi_status == SCSI_STAT_GOOD) { 269 check_underrun = 1; 270 } 271 /* Check the sense codes if this is a check condition */ 272 else if (scsi_status == SCSI_STAT_CHECK_COND) { 273 check_underrun = 1; 274 275 /* Check if sense data was provided */ 276 if (SWAP_DATA32(rsp->fcp_sense_len) >= 14) { 277 sense = *((uint8_t *)rsp + 32 + 2); 278 asc = *((uint8_t *)rsp + 32 + 12); 279 ascq = *((uint8_t *)rsp + 32 + 13); 280 } 281 282 #ifdef SAN_DIAG_SUPPORT 283 emlxs_log_sd_scsi_check_event(port, 284 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun, 285 scsi_opcode, sense, asc, ascq); 286 #endif 287 } 288 /* Status is not good and this is not a check condition */ 289 /* No data should have been sent */ 290 else { 291 check_underrun = 0; 292 } 293 294 /* Get the residual underrun count reported by the SCSI reply */ 295 rsp_data_resid = (pkt->pkt_datalen && 296 rsp->fcp_u.fcp_status.resid_under) ? SWAP_DATA32(rsp-> 297 fcp_resid) : 0; 298 299 /* Set the pkt resp_resid field */ 300 pkt->pkt_resp_resid = 0; 301 302 /* Set the pkt data_resid field */ 303 if (pkt->pkt_datalen && 304 (pkt->pkt_tran_type == FC_PKT_FCP_READ)) { 305 /* 306 * Get the residual underrun count reported by 307 * our adapter 308 */ 309 pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm; 310 311 #ifdef SAN_DIAG_SUPPORT 312 if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) { 313 emlxs_log_sd_fc_rdchk_event(port, 314 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun, 315 scsi_opcode, pkt->pkt_data_resid); 316 } 317 #endif 318 319 /* Get the actual amount of data transferred */ 320 data_rx = pkt->pkt_datalen - pkt->pkt_data_resid; 321 322 /* 323 * If the residual being reported by the adapter is 324 * greater than the residual being reported in the 325 * reply, then we have a true underrun. 326 */ 327 if (check_underrun && 328 (pkt->pkt_data_resid > rsp_data_resid)) { 329 switch (scsi_opcode) { 330 case SCSI_INQUIRY: 331 scsi_dl = scsi_cmd[16]; 332 break; 333 334 case SCSI_RX_DIAG: 335 scsi_dl = 336 (scsi_cmd[15] * 0x100) + 337 scsi_cmd[16]; 338 break; 339 340 default: 341 scsi_dl = pkt->pkt_datalen; 342 } 343 344 #ifdef FCP_UNDERRUN_PATCH1 345 /* 346 * If status is not good and no data was 347 * actually transferred, then we must fix 348 * the issue 349 */ 350 if ((scsi_status != SCSI_STAT_GOOD) && 351 (data_rx == 0)) { 352 fix_it = 1; 353 354 EMLXS_MSGF(EMLXS_CONTEXT, 355 &emlxs_fcp_completion_error_msg, 356 "Underrun(1). Fixed. " 357 "did=0x%06x sbp=%p cmd=%02x " 358 "dl=%d,%d rx=%d rsp=%d", 359 did, sbp, scsi_opcode, 360 pkt->pkt_datalen, scsi_dl, 361 (pkt->pkt_datalen - 362 cmd->un.fcpi.fcpi_parm), 363 rsp_data_resid); 364 365 } 366 #endif /* FCP_UNDERRUN_PATCH1 */ 367 368 369 #ifdef FCP_UNDERRUN_PATCH2 370 if ((scsi_status == SCSI_STAT_GOOD)) { 371 emlxs_msg_t *msg; 372 373 msg = &emlxs_fcp_completion_error_msg; 374 /* 375 * If status is good and this is an 376 * inquiry request and the amount of 377 * data 378 */ 379 /* 380 * requested <= data received, then we 381 * must fix the issue. 382 */ 383 384 if ((scsi_opcode == SCSI_INQUIRY) && 385 (pkt->pkt_datalen >= data_rx) && 386 (scsi_dl <= data_rx)) { 387 fix_it = 1; 388 389 EMLXS_MSGF(EMLXS_CONTEXT, msg, 390 "Underrun(2). Fixed. " 391 "did=0x%06x sbp=%p " 392 "cmd=%02x dl=%d,%d " 393 "rx=%d rsp=%d", 394 did, sbp, scsi_opcode, 395 pkt->pkt_datalen, scsi_dl, 396 data_rx, rsp_data_resid); 397 398 } 399 400 /* 401 * If status is good and this is an 402 * inquiry request and the amount of 403 * data requested >= 128 bytes, but 404 * only 128 bytes were received, 405 * then we must fix the issue. 406 */ 407 else if ((scsi_opcode == 408 SCSI_INQUIRY) && 409 (pkt->pkt_datalen >= 128) && 410 (scsi_dl >= 128) && 411 (data_rx == 128)) { 412 fix_it = 1; 413 414 EMLXS_MSGF(EMLXS_CONTEXT, msg, 415 "Underrun(3). Fixed. " 416 "did=0x%06x sbp=%p " 417 "cmd=%02x dl=%d,%d " 418 "rx=%d rsp=%d", 419 did, sbp, scsi_opcode, 420 pkt->pkt_datalen, scsi_dl, 421 data_rx, rsp_data_resid); 422 423 } 424 425 } 426 #endif /* FCP_UNDERRUN_PATCH2 */ 427 428 /* 429 * Check if SCSI response payload should be 430 * fixed or if a DATA_UNDERRUN should be 431 * reported 432 */ 433 if (fix_it) { 434 /* 435 * Fix the SCSI response payload itself 436 */ 437 rsp->fcp_u.fcp_status.resid_under = 1; 438 rsp->fcp_resid = 439 SWAP_DATA32(pkt->pkt_data_resid); 440 } else { 441 /* 442 * Change the status from 443 * IOSTAT_FCP_RSP_ERROR to 444 * IOSTAT_DATA_UNDERRUN 445 */ 446 iostat = IOSTAT_DATA_UNDERRUN; 447 pkt->pkt_data_resid = 448 pkt->pkt_datalen; 449 } 450 } 451 452 /* 453 * If the residual being reported by the adapter is 454 * less than the residual being reported in the reply, 455 * then we have a true overrun. Since we don't know 456 * where the extra data came from or went to then we 457 * cannot trust anything we received 458 */ 459 else if (rsp_data_resid > pkt->pkt_data_resid) { 460 /* 461 * Change the status from 462 * IOSTAT_FCP_RSP_ERROR to 463 * IOSTAT_DATA_OVERRUN 464 */ 465 iostat = IOSTAT_DATA_OVERRUN; 466 pkt->pkt_data_resid = pkt->pkt_datalen; 467 } 468 } else { /* pkt->pkt_datalen==0 or FC_PKT_FCP_WRITE */ 469 470 /* Report whatever the target reported */ 471 pkt->pkt_data_resid = rsp_data_resid; 472 } 473 } 474 475 /* 476 * If pkt is tagged for timeout then set the return codes 477 * appropriately 478 */ 479 if (sbp->pkt_flags & PACKET_IN_TIMEOUT) { 480 iostat = IOSTAT_LOCAL_REJECT; 481 localstat = IOERR_ABORT_TIMEOUT; 482 goto done; 483 } 484 485 /* If pkt is tagged for abort then set the return codes appropriately */ 486 if (sbp->pkt_flags & (PACKET_IN_FLUSH | PACKET_IN_ABORT)) { 487 iostat = IOSTAT_LOCAL_REJECT; 488 localstat = IOERR_ABORT_REQUESTED; 489 goto done; 490 } 491 492 /* Print completion message */ 493 switch (iostat) { 494 case IOSTAT_SUCCESS: 495 /* Build SCSI GOOD status */ 496 if (pkt->pkt_rsplen) { 497 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen); 498 } 499 break; 500 501 case IOSTAT_FCP_RSP_ERROR: 502 break; 503 504 case IOSTAT_REMOTE_STOP: 505 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg, 506 "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp, 507 scsi_opcode); 508 break; 509 510 case IOSTAT_LOCAL_REJECT: 511 localstat = cmd->un.grsp.perr.statLocalError; 512 513 switch (localstat) { 514 case IOERR_SEQUENCE_TIMEOUT: 515 EMLXS_MSGF(EMLXS_CONTEXT, 516 &emlxs_fcp_completion_error_msg, 517 "Local reject. " 518 "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ", 519 emlxs_error_xlate(localstat), did, sbp, 520 scsi_opcode, pkt->pkt_timeout); 521 break; 522 523 default: 524 EMLXS_MSGF(EMLXS_CONTEXT, 525 &emlxs_fcp_completion_error_msg, 526 "Local reject. %s did=0x%06x sbp=%p cmd=%02x", 527 emlxs_error_xlate(localstat), did, sbp, 528 scsi_opcode); 529 } 530 531 break; 532 533 case IOSTAT_NPORT_RJT: 534 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg, 535 "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp, 536 scsi_opcode); 537 break; 538 539 case IOSTAT_FABRIC_RJT: 540 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg, 541 "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp, 542 scsi_opcode); 543 break; 544 545 case IOSTAT_NPORT_BSY: 546 #ifdef SAN_DIAG_SUPPORT 547 ndlp = (NODELIST *)iocbq->node; 548 emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname); 549 #endif 550 551 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg, 552 "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp, 553 scsi_opcode); 554 break; 555 556 case IOSTAT_FABRIC_BSY: 557 #ifdef SAN_DIAG_SUPPORT 558 ndlp = (NODELIST *)iocbq->node; 559 emlxs_log_sd_fc_bsy_event(port, NULL); 560 #endif 561 562 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg, 563 "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp, 564 scsi_opcode); 565 break; 566 567 case IOSTAT_INTERMED_RSP: 568 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg, 569 "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did, 570 sbp, scsi_opcode); 571 break; 572 573 case IOSTAT_LS_RJT: 574 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg, 575 "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp, 576 scsi_opcode); 577 break; 578 579 case IOSTAT_DATA_UNDERRUN: 580 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg, 581 "Underrun. did=0x%06x sbp=%p cmd=%02x " 582 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)", 583 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx, 584 rsp_data_resid, scsi_status, sense, asc, ascq); 585 break; 586 587 case IOSTAT_DATA_OVERRUN: 588 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg, 589 "Overrun. did=0x%06x sbp=%p cmd=%02x " 590 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)", 591 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx, 592 rsp_data_resid, scsi_status, sense, asc, ascq); 593 break; 594 595 default: 596 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg, 597 "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x", 598 iostat, cmd->un.grsp.perr.statLocalError, did, sbp, 599 scsi_opcode); 600 break; 601 } 602 603 done: 604 605 if (iostat == IOSTAT_SUCCESS) { 606 HBASTATS.FcpGood++; 607 } else { 608 HBASTATS.FcpError++; 609 } 610 611 mutex_exit(&sbp->mtx); 612 613 emlxs_pkt_complete(sbp, iostat, localstat, 0); 614 615 return; 616 617 } /* emlxs_handle_fcp_event() */ 618 619 620 621 /* 622 * emlxs_post_buffer 623 * 624 * This routine will post count buffers to the 625 * ring with the QUE_RING_BUF_CN command. This 626 * allows 2 buffers / command to be posted. 627 * Returns the number of buffers NOT posted. 628 */ 629 extern int 630 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt) 631 { 632 emlxs_port_t *port = &PPORT; 633 IOCB *icmd; 634 IOCBQ *iocbq; 635 MATCHMAP *mp; 636 uint16_t tag; 637 uint32_t maxqbuf; 638 int32_t i; 639 int32_t j; 640 uint32_t seg; 641 uint32_t size; 642 643 mp = 0; 644 maxqbuf = 2; 645 tag = (uint16_t)cnt; 646 cnt += rp->fc_missbufcnt; 647 648 if (rp->ringno == FC_ELS_RING) { 649 seg = MEM_BUF; 650 size = MEM_ELSBUF_SIZE; 651 } else if (rp->ringno == FC_IP_RING) { 652 seg = MEM_IPBUF; 653 size = MEM_IPBUF_SIZE; 654 } else if (rp->ringno == FC_CT_RING) { 655 seg = MEM_CTBUF; 656 size = MEM_CTBUF_SIZE; 657 } 658 #ifdef SFCT_SUPPORT 659 else if (rp->ringno == FC_FCT_RING) { 660 seg = MEM_FCTBUF; 661 size = MEM_FCTBUF_SIZE; 662 } 663 #endif /* SFCT_SUPPORT */ 664 else { 665 return (0); 666 } 667 668 /* 669 * While there are buffers to post 670 */ 671 while (cnt) { 672 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) { 673 rp->fc_missbufcnt = cnt; 674 return (cnt); 675 } 676 677 iocbq->ring = (void *)rp; 678 iocbq->port = (void *)port; 679 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL); 680 681 icmd = &iocbq->iocb; 682 683 /* 684 * Max buffers can be posted per command 685 */ 686 for (i = 0; i < maxqbuf; i++) { 687 if (cnt <= 0) 688 break; 689 690 /* fill in BDEs for command */ 691 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg)) == 0) { 692 icmd->ulpBdeCount = i; 693 for (j = 0; j < i; j++) { 694 mp = EMLXS_GET_VADDR(hba, rp, icmd); 695 if (mp) { 696 (void) emlxs_mem_put(hba, seg, 697 (uint8_t *)mp); 698 } 699 } 700 701 rp->fc_missbufcnt = cnt + i; 702 703 (void) emlxs_mem_put(hba, MEM_IOCB, 704 (uint8_t *)iocbq); 705 706 return (cnt + i); 707 } 708 709 /* 710 * map that page and save the address pair for lookup 711 * later 712 */ 713 emlxs_mem_map_vaddr(hba, 714 rp, 715 mp, 716 (uint32_t *)&icmd->un.cont64[i].addrHigh, 717 (uint32_t *)&icmd->un.cont64[i].addrLow); 718 719 icmd->un.cont64[i].tus.f.bdeSize = size; 720 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 721 722 /* 723 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 724 * "UB Post: ring=%d addr=%08x%08x size=%d", 725 * rp->ringno, icmd->un.cont64[i].addrHigh, 726 * icmd->un.cont64[i].addrLow, size); 727 */ 728 729 cnt--; 730 } 731 732 icmd->ulpIoTag = tag; 733 icmd->ulpBdeCount = i; 734 icmd->ulpLe = 1; 735 icmd->ulpOwner = OWN_CHIP; 736 /* used for delimiter between commands */ 737 iocbq->bp = (uint8_t *)mp; 738 739 emlxs_sli_issue_iocb_cmd(hba, rp, iocbq); 740 } 741 742 rp->fc_missbufcnt = 0; 743 744 return (0); 745 746 } /* emlxs_post_buffer() */ 747 748 749 extern int 750 emlxs_port_offline(emlxs_port_t *port, uint32_t scope) 751 { 752 emlxs_hba_t *hba = HBA; 753 emlxs_config_t *cfg; 754 NODELIST *nlp; 755 fc_affected_id_t *aid; 756 uint32_t mask; 757 uint32_t aff_d_id; 758 uint32_t linkdown; 759 uint32_t vlinkdown; 760 uint32_t action; 761 int i; 762 uint32_t unreg_vpi; 763 uint32_t update; 764 uint32_t adisc_support; 765 766 /* Target mode only uses this routine for linkdowns */ 767 if (port->tgt_mode && (scope != 0xffffffff) && (scope != 0xfeffffff)) { 768 return (0); 769 } 770 771 cfg = &CFG; 772 aid = (fc_affected_id_t *)&scope; 773 linkdown = 0; 774 vlinkdown = 0; 775 unreg_vpi = 0; 776 update = 0; 777 778 if (!(port->flag & EMLXS_PORT_BOUND)) { 779 return (0); 780 } 781 782 switch (aid->aff_format) { 783 case 0: /* Port */ 784 mask = 0x00ffffff; 785 break; 786 787 case 1: /* Area */ 788 mask = 0x00ffff00; 789 break; 790 791 case 2: /* Domain */ 792 mask = 0x00ff0000; 793 break; 794 795 case 3: /* Network */ 796 mask = 0x00000000; 797 break; 798 799 #ifdef DHCHAP_SUPPORT 800 case 0xfe: /* Virtual link down */ 801 mask = 0x00000000; 802 vlinkdown = 1; 803 break; 804 #endif /* DHCHAP_SUPPORT */ 805 806 case 0xff: /* link is down */ 807 mask = 0x00000000; 808 linkdown = 1; 809 break; 810 811 } 812 813 aff_d_id = aid->aff_d_id & mask; 814 815 816 /* 817 * If link is down then this is a hard shutdown and flush 818 * If link not down then this is a soft shutdown and flush 819 * (e.g. RSCN) 820 */ 821 if (linkdown) { 822 mutex_enter(&EMLXS_PORT_LOCK); 823 824 port->flag &= EMLXS_PORT_LINKDOWN_MASK; 825 port->prev_did = port->did; 826 port->did = 0; 827 828 if (port->ulp_statec != FC_STATE_OFFLINE) { 829 port->ulp_statec = FC_STATE_OFFLINE; 830 update = 1; 831 } 832 833 mutex_exit(&EMLXS_PORT_LOCK); 834 835 /* Tell ULP about it */ 836 if (update) { 837 if (port->flag & EMLXS_PORT_BOUND) { 838 if (port->vpi == 0) { 839 EMLXS_MSGF(EMLXS_CONTEXT, 840 &emlxs_link_down_msg, NULL); 841 } 842 843 if (port->tgt_mode) { 844 #ifdef SFCT_SUPPORT 845 emlxs_fct_link_down(port); 846 #endif /* SFCT_SUPPORT */ 847 848 } else if (port->ini_mode) { 849 port->ulp_statec_cb(port->ulp_handle, 850 FC_STATE_OFFLINE); 851 } 852 } else { 853 if (port->vpi == 0) { 854 EMLXS_MSGF(EMLXS_CONTEXT, 855 &emlxs_link_down_msg, "*"); 856 } 857 } 858 859 860 } 861 862 unreg_vpi = 1; 863 864 #ifdef DHCHAP_SUPPORT 865 /* Stop authentication with all nodes */ 866 emlxs_dhc_auth_stop(port, NULL); 867 #endif /* DHCHAP_SUPPORT */ 868 869 /* Flush the base node */ 870 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0); 871 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0); 872 873 /* Flush any pending ub buffers */ 874 emlxs_ub_flush(port); 875 } 876 #ifdef DHCHAP_SUPPORT 877 /* virtual link down */ 878 else if (vlinkdown) { 879 mutex_enter(&EMLXS_PORT_LOCK); 880 881 if (port->ulp_statec != FC_STATE_OFFLINE) { 882 port->ulp_statec = FC_STATE_OFFLINE; 883 update = 1; 884 } 885 886 mutex_exit(&EMLXS_PORT_LOCK); 887 888 /* Tell ULP about it */ 889 if (update) { 890 if (port->flag & EMLXS_PORT_BOUND) { 891 if (port->vpi == 0) { 892 EMLXS_MSGF(EMLXS_CONTEXT, 893 &emlxs_link_down_msg, 894 "Switch authentication failed."); 895 } 896 897 #ifdef SFCT_SUPPORT 898 if (port->tgt_mode) { 899 emlxs_fct_link_down(port); 900 901 } else if (port->ini_mode) { 902 port->ulp_statec_cb(port->ulp_handle, 903 FC_STATE_OFFLINE); 904 } 905 #else 906 port->ulp_statec_cb(port->ulp_handle, 907 FC_STATE_OFFLINE); 908 #endif /* SFCT_SUPPORT */ 909 } else { 910 if (port->vpi == 0) { 911 EMLXS_MSGF(EMLXS_CONTEXT, 912 &emlxs_link_down_msg, 913 "Switch authentication failed. *"); 914 } 915 } 916 917 918 } 919 920 /* Flush the base node */ 921 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0); 922 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0); 923 } 924 #endif /* DHCHAP_SUPPORT */ 925 926 if (port->tgt_mode) { 927 goto done; 928 } 929 930 /* Set the node tags */ 931 /* We will process all nodes with this tag */ 932 rw_enter(&port->node_rwlock, RW_READER); 933 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { 934 nlp = port->node_table[i]; 935 while (nlp != NULL) { 936 nlp->nlp_tag = 1; 937 nlp = nlp->nlp_list_next; 938 } 939 } 940 rw_exit(&port->node_rwlock); 941 942 if (hba->flag & FC_ONLINE_MODE) { 943 adisc_support = cfg[CFG_ADISC_SUPPORT].current; 944 } else { 945 adisc_support = 0; 946 } 947 948 /* Check ADISC support level */ 949 switch (adisc_support) { 950 case 0: /* No support - Flush all IO to all matching nodes */ 951 952 for (;;) { 953 /* 954 * We need to hold the locks this way because 955 * emlxs_mb_unreg_did and the flush routines enter the 956 * same locks. Also, when we release the lock the list 957 * can change out from under us. 958 */ 959 960 /* Find first node */ 961 rw_enter(&port->node_rwlock, RW_READER); 962 action = 0; 963 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { 964 nlp = port->node_table[i]; 965 while (nlp != NULL) { 966 if (!nlp->nlp_tag) { 967 nlp = nlp->nlp_list_next; 968 continue; 969 } 970 nlp->nlp_tag = 0; 971 972 /* 973 * Check for any device that matches 974 * our mask 975 */ 976 if ((nlp->nlp_DID & mask) == aff_d_id) { 977 if (linkdown) { 978 action = 1; 979 break; 980 } else { /* Must be an RCSN */ 981 982 action = 2; 983 break; 984 } 985 } 986 nlp = nlp->nlp_list_next; 987 } 988 989 if (action) { 990 break; 991 } 992 } 993 rw_exit(&port->node_rwlock); 994 995 996 /* Check if nothing was found */ 997 if (action == 0) { 998 break; 999 } else if (action == 1) { 1000 (void) emlxs_mb_unreg_did(port, nlp->nlp_DID, 1001 NULL, NULL, NULL); 1002 } else if (action == 2) { 1003 #ifdef DHCHAP_SUPPORT 1004 emlxs_dhc_auth_stop(port, nlp); 1005 #endif /* DHCHAP_SUPPORT */ 1006 1007 /* 1008 * Close the node for any further normal IO 1009 * A PLOGI with reopen the node 1010 */ 1011 emlxs_node_close(port, nlp, FC_FCP_RING, 60); 1012 emlxs_node_close(port, nlp, FC_IP_RING, 60); 1013 1014 /* Flush tx queue */ 1015 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0); 1016 1017 /* Flush chip queue */ 1018 (void) emlxs_chipq_node_flush(port, 0, nlp, 0); 1019 } 1020 1021 } 1022 1023 break; 1024 1025 case 1: /* Partial support - Flush IO for non-FCP2 matching nodes */ 1026 1027 for (;;) { 1028 1029 /* 1030 * We need to hold the locks this way because 1031 * emlxs_mb_unreg_did and the flush routines enter the 1032 * same locks. Also, when we release the lock the list 1033 * can change out from under us. 1034 */ 1035 rw_enter(&port->node_rwlock, RW_READER); 1036 action = 0; 1037 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { 1038 nlp = port->node_table[i]; 1039 while (nlp != NULL) { 1040 if (!nlp->nlp_tag) { 1041 nlp = nlp->nlp_list_next; 1042 continue; 1043 } 1044 nlp->nlp_tag = 0; 1045 1046 /* 1047 * Check for special FCP2 target device 1048 * that matches our mask 1049 */ 1050 if ((nlp->nlp_fcp_info & 1051 NLP_FCP_TGT_DEVICE) && 1052 (nlp-> nlp_fcp_info & 1053 NLP_FCP_2_DEVICE) && 1054 (nlp->nlp_DID & mask) == 1055 aff_d_id) { 1056 action = 3; 1057 break; 1058 } 1059 1060 /* 1061 * Check for any other device that 1062 * matches our mask 1063 */ 1064 else if ((nlp->nlp_DID & mask) == 1065 aff_d_id) { 1066 if (linkdown) { 1067 action = 1; 1068 break; 1069 } else { /* Must be an RSCN */ 1070 1071 action = 2; 1072 break; 1073 } 1074 } 1075 1076 nlp = nlp->nlp_list_next; 1077 } 1078 1079 if (action) { 1080 break; 1081 } 1082 } 1083 rw_exit(&port->node_rwlock); 1084 1085 /* Check if nothing was found */ 1086 if (action == 0) { 1087 break; 1088 } else if (action == 1) { 1089 (void) emlxs_mb_unreg_did(port, nlp->nlp_DID, 1090 NULL, NULL, NULL); 1091 } else if (action == 2) { 1092 #ifdef DHCHAP_SUPPORT 1093 emlxs_dhc_auth_stop(port, nlp); 1094 #endif /* DHCHAP_SUPPORT */ 1095 1096 /* 1097 * Close the node for any further normal IO 1098 * A PLOGI with reopen the node 1099 */ 1100 emlxs_node_close(port, nlp, FC_FCP_RING, 60); 1101 emlxs_node_close(port, nlp, FC_IP_RING, 60); 1102 1103 /* Flush tx queue */ 1104 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0); 1105 1106 /* Flush chip queue */ 1107 (void) emlxs_chipq_node_flush(port, 0, nlp, 0); 1108 1109 } else if (action == 3) { /* FCP2 devices */ 1110 unreg_vpi = 0; 1111 1112 #ifdef DHCHAP_SUPPORT 1113 emlxs_dhc_auth_stop(port, nlp); 1114 #endif /* DHCHAP_SUPPORT */ 1115 1116 /* 1117 * Close the node for any further normal IO 1118 * An ADISC or a PLOGI with reopen the node 1119 */ 1120 emlxs_node_close(port, nlp, FC_FCP_RING, -1); 1121 emlxs_node_close(port, nlp, FC_IP_RING, 1122 ((linkdown) ? 0 : 60)); 1123 1124 /* Flush tx queues except for FCP ring */ 1125 (void) emlxs_tx_node_flush(port, nlp, 1126 &hba->ring[FC_CT_RING], 0, 0); 1127 (void) emlxs_tx_node_flush(port, nlp, 1128 &hba->ring[FC_ELS_RING], 0, 0); 1129 (void) emlxs_tx_node_flush(port, nlp, 1130 &hba->ring[FC_IP_RING], 0, 0); 1131 1132 /* Flush chip queues except for FCP ring */ 1133 (void) emlxs_chipq_node_flush(port, 1134 &hba->ring[FC_CT_RING], nlp, 0); 1135 (void) emlxs_chipq_node_flush(port, 1136 &hba->ring[FC_ELS_RING], nlp, 0); 1137 (void) emlxs_chipq_node_flush(port, 1138 &hba->ring[FC_IP_RING], nlp, 0); 1139 } 1140 } 1141 break; 1142 1143 case 2: /* Full support - Hold FCP IO to FCP target matching nodes */ 1144 1145 if (!linkdown && !vlinkdown) { 1146 break; 1147 } 1148 1149 for (;;) { 1150 /* 1151 * We need to hold the locks this way because 1152 * emlxs_mb_unreg_did and the flush routines enter the 1153 * same locks. Also, when we release the lock the list 1154 * can change out from under us. 1155 */ 1156 rw_enter(&port->node_rwlock, RW_READER); 1157 action = 0; 1158 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { 1159 nlp = port->node_table[i]; 1160 while (nlp != NULL) { 1161 if (!nlp->nlp_tag) { 1162 nlp = nlp->nlp_list_next; 1163 continue; 1164 } 1165 nlp->nlp_tag = 0; 1166 1167 /* 1168 * Check for FCP target device that 1169 * matches our mask 1170 */ 1171 if ((nlp-> nlp_fcp_info & 1172 NLP_FCP_TGT_DEVICE) && 1173 (nlp->nlp_DID & mask) == 1174 aff_d_id) { 1175 action = 3; 1176 break; 1177 } 1178 1179 /* 1180 * Check for any other device that 1181 * matches our mask 1182 */ 1183 else if ((nlp->nlp_DID & mask) == 1184 aff_d_id) { 1185 if (linkdown) { 1186 action = 1; 1187 break; 1188 } else { /* Must be an RSCN */ 1189 1190 action = 2; 1191 break; 1192 } 1193 } 1194 1195 nlp = nlp->nlp_list_next; 1196 } 1197 if (action) { 1198 break; 1199 } 1200 } 1201 rw_exit(&port->node_rwlock); 1202 1203 /* Check if nothing was found */ 1204 if (action == 0) { 1205 break; 1206 } else if (action == 1) { 1207 (void) emlxs_mb_unreg_did(port, nlp->nlp_DID, 1208 NULL, NULL, NULL); 1209 } else if (action == 2) { 1210 /* 1211 * Close the node for any further normal IO 1212 * A PLOGI with reopen the node 1213 */ 1214 emlxs_node_close(port, nlp, FC_FCP_RING, 60); 1215 emlxs_node_close(port, nlp, FC_IP_RING, 60); 1216 1217 /* Flush tx queue */ 1218 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0); 1219 1220 /* Flush chip queue */ 1221 (void) emlxs_chipq_node_flush(port, 0, nlp, 0); 1222 1223 } else if (action == 3) { /* FCP2 devices */ 1224 unreg_vpi = 0; 1225 1226 /* 1227 * Close the node for any further normal IO 1228 * An ADISC or a PLOGI with reopen the node 1229 */ 1230 emlxs_node_close(port, nlp, FC_FCP_RING, -1); 1231 emlxs_node_close(port, nlp, FC_IP_RING, 1232 ((linkdown) ? 0 : 60)); 1233 1234 /* Flush tx queues except for FCP ring */ 1235 (void) emlxs_tx_node_flush(port, nlp, 1236 &hba->ring[FC_CT_RING], 0, 0); 1237 (void) emlxs_tx_node_flush(port, nlp, 1238 &hba->ring[FC_ELS_RING], 0, 0); 1239 (void) emlxs_tx_node_flush(port, nlp, 1240 &hba->ring[FC_IP_RING], 0, 0); 1241 1242 /* Flush chip queues except for FCP ring */ 1243 (void) emlxs_chipq_node_flush(port, 1244 &hba->ring[FC_CT_RING], nlp, 0); 1245 (void) emlxs_chipq_node_flush(port, 1246 &hba->ring[FC_ELS_RING], nlp, 0); 1247 (void) emlxs_chipq_node_flush(port, 1248 &hba->ring[FC_IP_RING], nlp, 0); 1249 } 1250 } 1251 1252 break; 1253 1254 } /* switch() */ 1255 1256 done: 1257 1258 if (unreg_vpi) { 1259 (void) emlxs_mb_unreg_vpi(port); 1260 } 1261 1262 return (0); 1263 1264 } /* emlxs_port_offline() */ 1265 1266 1267 extern void 1268 emlxs_port_online(emlxs_port_t *vport) 1269 { 1270 emlxs_hba_t *hba = vport->hba; 1271 emlxs_port_t *port = &PPORT; 1272 uint32_t state; 1273 uint32_t update; 1274 uint32_t npiv_linkup; 1275 char topology[32]; 1276 char linkspeed[32]; 1277 char mode[32]; 1278 1279 /* 1280 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, 1281 * "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag); 1282 */ 1283 1284 if ((vport->vpi > 0) && 1285 (!(hba->flag & FC_NPIV_ENABLED) || 1286 !(hba->flag & FC_NPIV_SUPPORTED))) { 1287 return; 1288 } 1289 1290 if (!(vport->flag & EMLXS_PORT_BOUND) || 1291 !(vport->flag & EMLXS_PORT_ENABLE)) { 1292 return; 1293 } 1294 1295 mutex_enter(&EMLXS_PORT_LOCK); 1296 1297 /* Check for mode */ 1298 if (port->tgt_mode) { 1299 (void) strcpy(mode, ", target"); 1300 } else if (port->ini_mode) { 1301 (void) strcpy(mode, ", initiator"); 1302 } else { 1303 (void) strcpy(mode, ""); 1304 } 1305 1306 /* Check for loop topology */ 1307 if (hba->topology == TOPOLOGY_LOOP) { 1308 state = FC_STATE_LOOP; 1309 (void) strcpy(topology, ", loop"); 1310 } else { 1311 state = FC_STATE_ONLINE; 1312 (void) strcpy(topology, ", fabric"); 1313 } 1314 1315 /* Set the link speed */ 1316 switch (hba->linkspeed) { 1317 case 0: 1318 (void) strcpy(linkspeed, "Gb"); 1319 state |= FC_STATE_1GBIT_SPEED; 1320 break; 1321 1322 case LA_1GHZ_LINK: 1323 (void) strcpy(linkspeed, "1Gb"); 1324 state |= FC_STATE_1GBIT_SPEED; 1325 break; 1326 case LA_2GHZ_LINK: 1327 (void) strcpy(linkspeed, "2Gb"); 1328 state |= FC_STATE_2GBIT_SPEED; 1329 break; 1330 case LA_4GHZ_LINK: 1331 (void) strcpy(linkspeed, "4Gb"); 1332 state |= FC_STATE_4GBIT_SPEED; 1333 break; 1334 case LA_8GHZ_LINK: 1335 (void) strcpy(linkspeed, "8Gb"); 1336 state |= FC_STATE_8GBIT_SPEED; 1337 break; 1338 case LA_10GHZ_LINK: 1339 (void) strcpy(linkspeed, "10Gb"); 1340 state |= FC_STATE_10GBIT_SPEED; 1341 break; 1342 default: 1343 (void) sprintf(linkspeed, "unknown(0x%x)", hba->linkspeed); 1344 break; 1345 } 1346 1347 npiv_linkup = 0; 1348 update = 0; 1349 1350 if ((hba->state >= FC_LINK_UP) && 1351 !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) { 1352 update = 1; 1353 vport->ulp_statec = state; 1354 1355 if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) { 1356 hba->flag |= FC_NPIV_LINKUP; 1357 npiv_linkup = 1; 1358 } 1359 } 1360 1361 mutex_exit(&EMLXS_PORT_LOCK); 1362 1363 /* 1364 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, 1365 * "linkup_callback: update=%d vpi=%d flag=%d fc_flag=%x state=%x 1366 * statec=%x", update, vport->vpi, npiv_linkup, hba->flag, 1367 * hba->state, vport->ulp_statec); 1368 */ 1369 if (update) { 1370 if (vport->flag & EMLXS_PORT_BOUND) { 1371 if (vport->vpi == 0) { 1372 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, 1373 "%s%s%s", linkspeed, topology, mode); 1374 } else if (npiv_linkup) { 1375 EMLXS_MSGF(EMLXS_CONTEXT, 1376 &emlxs_npiv_link_up_msg, "%s%s%s", 1377 linkspeed, topology, mode); 1378 } 1379 1380 if (vport->tgt_mode) { 1381 #ifdef SFCT_SUPPORT 1382 emlxs_fct_link_up(vport); 1383 #endif /* SFCT_SUPPORT */ 1384 } else if (vport->ini_mode) { 1385 vport->ulp_statec_cb(vport->ulp_handle, 1386 state); 1387 } 1388 } else { 1389 if (vport->vpi == 0) { 1390 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, 1391 "%s%s%s *", linkspeed, topology, mode); 1392 } else if (npiv_linkup) { 1393 EMLXS_MSGF(EMLXS_CONTEXT, 1394 &emlxs_npiv_link_up_msg, "%s%s%s *", 1395 linkspeed, topology, mode); 1396 } 1397 } 1398 1399 /* Check for waiting threads */ 1400 if (vport->vpi == 0) { 1401 mutex_enter(&EMLXS_LINKUP_LOCK); 1402 if (hba->linkup_wait_flag == TRUE) { 1403 hba->linkup_wait_flag = FALSE; 1404 cv_broadcast(&EMLXS_LINKUP_CV); 1405 } 1406 mutex_exit(&EMLXS_LINKUP_LOCK); 1407 } 1408 1409 /* Flush any pending ub buffers */ 1410 emlxs_ub_flush(vport); 1411 } 1412 1413 return; 1414 1415 } /* emlxs_port_online() */ 1416 1417 1418 extern void 1419 emlxs_linkdown(emlxs_hba_t *hba) 1420 { 1421 emlxs_port_t *port = &PPORT; 1422 int i; 1423 1424 mutex_enter(&EMLXS_PORT_LOCK); 1425 1426 HBASTATS.LinkDown++; 1427 emlxs_ffstate_change_locked(hba, FC_LINK_DOWN); 1428 1429 /* Filter hba flags */ 1430 hba->flag &= FC_LINKDOWN_MASK; 1431 hba->discovery_timer = 0; 1432 hba->linkup_timer = 0; 1433 1434 mutex_exit(&EMLXS_PORT_LOCK); 1435 1436 for (i = 0; i < MAX_VPORTS; i++) { 1437 port = &VPORT(i); 1438 1439 if (!(port->flag & EMLXS_PORT_BOUND)) { 1440 continue; 1441 } 1442 1443 (void) emlxs_port_offline(port, 0xffffffff); 1444 1445 } 1446 1447 return; 1448 1449 } /* emlxs_linkdown() */ 1450 1451 1452 extern void 1453 emlxs_linkup(emlxs_hba_t *hba) 1454 { 1455 emlxs_port_t *port = &PPORT; 1456 emlxs_config_t *cfg = &CFG; 1457 1458 mutex_enter(&EMLXS_PORT_LOCK); 1459 1460 HBASTATS.LinkUp++; 1461 emlxs_ffstate_change_locked(hba, FC_LINK_UP); 1462 1463 #ifdef MENLO_SUPPORT 1464 if (hba->flag & FC_MENLO_MODE) { 1465 mutex_exit(&EMLXS_PORT_LOCK); 1466 1467 /* 1468 * Trigger linkup CV and don't start linkup & discovery 1469 * timers 1470 */ 1471 mutex_enter(&EMLXS_LINKUP_LOCK); 1472 cv_broadcast(&EMLXS_LINKUP_CV); 1473 mutex_exit(&EMLXS_LINKUP_LOCK); 1474 1475 return; 1476 } 1477 #endif /* MENLO_SUPPORT */ 1478 1479 /* Set the linkup & discovery timers */ 1480 hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current; 1481 hba->discovery_timer = 1482 hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current + 1483 cfg[CFG_DISC_TIMEOUT].current; 1484 1485 mutex_exit(&EMLXS_PORT_LOCK); 1486 1487 return; 1488 1489 } /* emlxs_linkup() */ 1490 1491 1492 /* 1493 * emlxs_reset_link 1494 * 1495 * Description: 1496 * Called to reset the link with an init_link 1497 * 1498 * Returns: 1499 * 1500 */ 1501 extern int 1502 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup) 1503 { 1504 emlxs_port_t *port = &PPORT; 1505 emlxs_config_t *cfg; 1506 MAILBOX *mb; 1507 1508 /* 1509 * Get a buffer to use for the mailbox command 1510 */ 1511 if ((mb = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == NULL) { 1512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg, 1513 "Unable to allocate mailbox buffer."); 1514 1515 return (1); 1516 } 1517 1518 cfg = &CFG; 1519 1520 if (linkup) { 1521 /* 1522 * Setup and issue mailbox INITIALIZE LINK command 1523 */ 1524 1525 emlxs_mb_init_link(hba, 1526 (MAILBOX *) mb, 1527 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current); 1528 1529 mb->un.varInitLnk.lipsr_AL_PA = 0; 1530 1531 /* Clear the loopback mode */ 1532 mutex_enter(&EMLXS_PORT_LOCK); 1533 hba->flag &= ~FC_LOOPBACK_MODE; 1534 hba->loopback_tics = 0; 1535 mutex_exit(&EMLXS_PORT_LOCK); 1536 1537 if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mb, MBX_NOWAIT, 1538 0) != MBX_BUSY) { 1539 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb); 1540 } 1541 1542 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL); 1543 1544 } else { /* hold link down */ 1545 1546 emlxs_mb_down_link(hba, (MAILBOX *)mb); 1547 1548 if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mb, MBX_NOWAIT, 1549 0) != MBX_BUSY) { 1550 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb); 1551 } 1552 1553 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, 1554 "Disabling link..."); 1555 } 1556 1557 return (0); 1558 1559 } /* emlxs_reset_link() */ 1560 1561 1562 extern int 1563 emlxs_online(emlxs_hba_t *hba) 1564 { 1565 emlxs_port_t *port = &PPORT; 1566 int32_t rval = 0; 1567 uint32_t i = 0; 1568 1569 /* Make sure adapter is offline or exit trying (30 seconds) */ 1570 while (i++ < 30) { 1571 /* Check if adapter is already going online */ 1572 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) { 1573 return (0); 1574 } 1575 1576 mutex_enter(&EMLXS_PORT_LOCK); 1577 1578 /* Check again */ 1579 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) { 1580 mutex_exit(&EMLXS_PORT_LOCK); 1581 return (0); 1582 } 1583 1584 /* Check if adapter is offline */ 1585 if (hba->flag & FC_OFFLINE_MODE) { 1586 /* Mark it going online */ 1587 hba->flag &= ~FC_OFFLINE_MODE; 1588 hba->flag |= FC_ONLINING_MODE; 1589 1590 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */ 1591 mutex_exit(&EMLXS_PORT_LOCK); 1592 break; 1593 } 1594 1595 mutex_exit(&EMLXS_PORT_LOCK); 1596 1597 DELAYMS(1000); 1598 } 1599 1600 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg, 1601 "Going online..."); 1602 1603 if (rval = emlxs_ffinit(hba)) { 1604 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x", 1605 rval); 1606 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL); 1607 1608 /* Set FC_OFFLINE_MODE */ 1609 mutex_enter(&EMLXS_PORT_LOCK); 1610 emlxs_diag_state = DDI_OFFDI; 1611 hba->flag |= FC_OFFLINE_MODE; 1612 hba->flag &= ~FC_ONLINING_MODE; 1613 mutex_exit(&EMLXS_PORT_LOCK); 1614 1615 return (rval); 1616 } 1617 1618 /* Start the timer */ 1619 emlxs_timer_start(hba); 1620 1621 /* Set FC_ONLINE_MODE */ 1622 mutex_enter(&EMLXS_PORT_LOCK); 1623 emlxs_diag_state = DDI_ONDI; 1624 hba->flag |= FC_ONLINE_MODE; 1625 hba->flag &= ~FC_ONLINING_MODE; 1626 mutex_exit(&EMLXS_PORT_LOCK); 1627 1628 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL); 1629 1630 #ifdef SFCT_SUPPORT 1631 (void) emlxs_fct_port_initialize(port); 1632 #endif /* SFCT_SUPPORT */ 1633 1634 return (rval); 1635 1636 } /* emlxs_online() */ 1637 1638 1639 extern int 1640 emlxs_offline(emlxs_hba_t *hba) 1641 { 1642 emlxs_port_t *port = &PPORT; 1643 uint32_t i = 0; 1644 int rval = 1; 1645 1646 /* Make sure adapter is online or exit trying (30 seconds) */ 1647 while (i++ < 30) { 1648 /* Check if adapter is already going offline */ 1649 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) { 1650 return (0); 1651 } 1652 1653 mutex_enter(&EMLXS_PORT_LOCK); 1654 1655 /* Check again */ 1656 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) { 1657 mutex_exit(&EMLXS_PORT_LOCK); 1658 return (0); 1659 } 1660 1661 /* Check if adapter is online */ 1662 if (hba->flag & FC_ONLINE_MODE) { 1663 /* Mark it going offline */ 1664 hba->flag &= ~FC_ONLINE_MODE; 1665 hba->flag |= FC_OFFLINING_MODE; 1666 1667 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */ 1668 mutex_exit(&EMLXS_PORT_LOCK); 1669 break; 1670 } 1671 1672 mutex_exit(&EMLXS_PORT_LOCK); 1673 1674 DELAYMS(1000); 1675 } 1676 1677 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg, 1678 "Going offline..."); 1679 1680 if (port->ini_mode) { 1681 /* Flush all IO */ 1682 emlxs_linkdown(hba); 1683 1684 } 1685 #ifdef SFCT_SUPPORT 1686 else { 1687 (void) emlxs_fct_port_shutdown(port); 1688 } 1689 #endif /* SFCT_SUPPORT */ 1690 1691 /* Check if adapter was shutdown */ 1692 if (hba->flag & FC_HARDWARE_ERROR) { 1693 /* 1694 * Force mailbox cleanup 1695 * This will wake any sleeping or polling threads 1696 */ 1697 emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR); 1698 } 1699 1700 /* Pause here for the IO to settle */ 1701 delay(drv_usectohz(1000000)); /* 1 sec */ 1702 1703 /* Unregister all nodes */ 1704 emlxs_ffcleanup(hba); 1705 1706 1707 if (hba->bus_type == SBUS_FC) { 1708 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba, hba->sbus_csr_addr), 1709 0x9A); 1710 } 1711 1712 /* Stop the timer */ 1713 emlxs_timer_stop(hba); 1714 1715 /* For safety flush every iotag list */ 1716 if (emlxs_iotag_flush(hba)) { 1717 /* Pause here for the IO to flush */ 1718 delay(drv_usectohz(1000)); 1719 } 1720 1721 /* Wait for poll command request to settle */ 1722 while (hba->io_poll_count > 0) { 1723 delay(drv_usectohz(2000000)); /* 2 sec */ 1724 } 1725 1726 emlxs_sli_offline(hba); 1727 1728 /* Free all the shared memory */ 1729 (void) emlxs_mem_free_buffer(hba); 1730 1731 mutex_enter(&EMLXS_PORT_LOCK); 1732 hba->flag |= FC_OFFLINE_MODE; 1733 hba->flag &= ~FC_OFFLINING_MODE; 1734 emlxs_diag_state = DDI_OFFDI; 1735 mutex_exit(&EMLXS_PORT_LOCK); 1736 1737 rval = 0; 1738 1739 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL); 1740 1741 done: 1742 1743 return (rval); 1744 1745 } /* emlxs_offline() */ 1746 1747 1748 1749 extern int 1750 emlxs_power_down(emlxs_hba_t *hba) 1751 { 1752 int32_t rval = 0; 1753 uint32_t *ptr; 1754 uint32_t i; 1755 1756 if ((rval = emlxs_offline(hba))) { 1757 return (rval); 1758 } 1759 1760 /* Save pci config space */ 1761 ptr = (uint32_t *)hba->pm_config; 1762 for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) { 1763 *ptr = 1764 ddi_get32(hba->pci_acc_handle, 1765 (uint32_t *)(hba->pci_addr + i)); 1766 } 1767 1768 /* Put chip in D3 state */ 1769 (void) ddi_put8(hba->pci_acc_handle, 1770 (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER), 1771 (uint8_t)PCI_PM_D3_STATE); 1772 1773 return (0); 1774 1775 } /* End emlxs_power_down */ 1776 1777 1778 extern int 1779 emlxs_power_up(emlxs_hba_t *hba) 1780 { 1781 int32_t rval = 0; 1782 uint32_t *ptr; 1783 uint32_t i; 1784 1785 1786 /* Take chip out of D3 state */ 1787 (void) ddi_put8(hba->pci_acc_handle, 1788 (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER), 1789 (uint8_t)PCI_PM_D0_STATE); 1790 1791 /* Must have at least 10 ms delay here */ 1792 DELAYMS(100); 1793 1794 /* Restore pci config space */ 1795 ptr = (uint32_t *)hba->pm_config; 1796 for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) { 1797 (void) ddi_put32(hba->pci_acc_handle, 1798 (uint32_t *)(hba->pci_addr + i), *ptr); 1799 } 1800 1801 /* Bring adapter online */ 1802 if ((rval = emlxs_online(hba))) { 1803 (void) ddi_put8(hba->pci_acc_handle, 1804 (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER), 1805 (uint8_t)PCI_PM_D3_STATE); 1806 1807 return (rval); 1808 } 1809 1810 return (rval); 1811 1812 } /* End emlxs_power_up */ 1813 1814 1815 /* 1816 * 1817 * NAME: emlxs_ffcleanup 1818 * 1819 * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter 1820 * 1821 * EXECUTION ENVIRONMENT: process only 1822 * 1823 * CALLED FROM: CFG_TERM 1824 * 1825 * INPUT: hba - pointer to the dev_ctl area. 1826 * 1827 * RETURNS: none 1828 */ 1829 extern void 1830 emlxs_ffcleanup(emlxs_hba_t *hba) 1831 { 1832 emlxs_port_t *port = &PPORT; 1833 uint32_t i; 1834 1835 /* Disable all but the mailbox interrupt */ 1836 emlxs_disable_intr(hba, HC_MBINT_ENA); 1837 1838 /* Make sure all port nodes are destroyed */ 1839 for (i = 0; i < MAX_VPORTS; i++) { 1840 port = &VPORT(i); 1841 1842 if (port->node_count) { 1843 (void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0); 1844 } 1845 } 1846 1847 /* Clear all interrupt enable conditions */ 1848 emlxs_disable_intr(hba, 0); 1849 1850 return; 1851 1852 } /* emlxs_ffcleanup() */ 1853 1854 1855 extern uint16_t 1856 emlxs_register_pkt(RING *rp, emlxs_buf_t *sbp) 1857 { 1858 emlxs_hba_t *hba; 1859 emlxs_port_t *port; 1860 uint16_t iotag; 1861 uint32_t i; 1862 1863 hba = rp->hba; 1864 1865 mutex_enter(&EMLXS_FCTAB_LOCK(rp->ringno)); 1866 1867 if (sbp->iotag != 0) { 1868 port = &PPORT; 1869 1870 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 1871 "Pkt already registered! ringo=%d iotag=%d sbp=%p", 1872 sbp->ring, sbp->iotag, sbp); 1873 } 1874 1875 iotag = 0; 1876 for (i = 0; i < rp->max_iotag; i++) { 1877 if (!rp->fc_iotag || rp->fc_iotag >= rp->max_iotag) { 1878 rp->fc_iotag = 1; 1879 } 1880 iotag = rp->fc_iotag++; 1881 1882 if (rp->fc_table[iotag] == 0 || 1883 rp->fc_table[iotag] == STALE_PACKET) { 1884 hba->io_count[rp->ringno]++; 1885 rp->fc_table[iotag] = sbp; 1886 1887 sbp->iotag = iotag; 1888 sbp->ring = rp; 1889 1890 break; 1891 } 1892 iotag = 0; 1893 } 1894 1895 mutex_exit(&EMLXS_FCTAB_LOCK(rp->ringno)); 1896 1897 /* 1898 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, 1899 * "emlxs_register_pkt: ringo=%d iotag=%d sbp=%p", 1900 * rp->ringno, iotag, sbp); 1901 */ 1902 1903 return (iotag); 1904 1905 } /* emlxs_register_pkt() */ 1906 1907 1908 1909 extern emlxs_buf_t * 1910 emlxs_unregister_pkt(RING *rp, uint16_t iotag, uint32_t forced) 1911 { 1912 emlxs_hba_t *hba; 1913 emlxs_buf_t *sbp; 1914 uint32_t ringno; 1915 1916 /* Check the iotag range */ 1917 if ((iotag == 0) || (iotag >= rp->max_iotag)) { 1918 return (NULL); 1919 } 1920 1921 sbp = NULL; 1922 hba = rp->hba; 1923 ringno = rp->ringno; 1924 1925 /* Remove the sbp from the table */ 1926 mutex_enter(&EMLXS_FCTAB_LOCK(ringno)); 1927 sbp = rp->fc_table[iotag]; 1928 1929 if (!sbp || (sbp == STALE_PACKET)) { 1930 mutex_exit(&EMLXS_FCTAB_LOCK(ringno)); 1931 return (sbp); 1932 } 1933 1934 rp->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL); 1935 hba->io_count[ringno]--; 1936 sbp->iotag = 0; 1937 1938 mutex_exit(&EMLXS_FCTAB_LOCK(ringno)); 1939 1940 1941 /* Clean up the sbp */ 1942 mutex_enter(&sbp->mtx); 1943 1944 if (sbp->pkt_flags & PACKET_IN_TXQ) { 1945 sbp->pkt_flags &= ~PACKET_IN_TXQ; 1946 hba->ring_tx_count[ringno]--; 1947 } 1948 1949 if (sbp->pkt_flags & PACKET_IN_CHIPQ) { 1950 sbp->pkt_flags &= ~PACKET_IN_CHIPQ; 1951 } 1952 1953 if (sbp->bmp) { 1954 (void) emlxs_mem_put(hba, MEM_BPL, (uint8_t *)sbp->bmp); 1955 sbp->bmp = 0; 1956 } 1957 1958 mutex_exit(&sbp->mtx); 1959 1960 return (sbp); 1961 1962 } /* emlxs_unregister_pkt() */ 1963 1964 1965 1966 /* Flush all IO's to all nodes for a given ring */ 1967 extern uint32_t 1968 emlxs_tx_ring_flush(emlxs_hba_t *hba, RING *rp, emlxs_buf_t *fpkt) 1969 { 1970 emlxs_port_t *port = &PPORT; 1971 emlxs_buf_t *sbp; 1972 IOCBQ *iocbq; 1973 IOCBQ *next; 1974 IOCB *iocb; 1975 uint32_t ringno; 1976 Q abort; 1977 NODELIST *ndlp; 1978 IOCB *icmd; 1979 MATCHMAP *mp; 1980 uint32_t i; 1981 1982 ringno = rp->ringno; 1983 bzero((void *)&abort, sizeof (Q)); 1984 1985 mutex_enter(&EMLXS_RINGTX_LOCK); 1986 1987 /* While a node needs servicing */ 1988 while (rp->nodeq.q_first) { 1989 ndlp = (NODELIST *) rp->nodeq.q_first; 1990 1991 /* Check if priority queue is not empty */ 1992 if (ndlp->nlp_ptx[ringno].q_first) { 1993 /* Transfer all iocb's to local queue */ 1994 if (abort.q_first == 0) { 1995 abort.q_first = ndlp->nlp_ptx[ringno].q_first; 1996 } else { 1997 ((IOCBQ *)abort.q_last)->next = 1998 (IOCBQ *)ndlp->nlp_ptx[ringno].q_first; 1999 } 2000 2001 abort.q_last = ndlp->nlp_ptx[ringno].q_last; 2002 abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt; 2003 } 2004 2005 /* Check if tx queue is not empty */ 2006 if (ndlp->nlp_tx[ringno].q_first) { 2007 /* Transfer all iocb's to local queue */ 2008 if (abort.q_first == 0) { 2009 abort.q_first = ndlp->nlp_tx[ringno].q_first; 2010 } else { 2011 ((IOCBQ *)abort.q_last)->next = 2012 (IOCBQ *)ndlp->nlp_tx[ringno].q_first; 2013 } 2014 2015 abort.q_last = ndlp->nlp_tx[ringno].q_last; 2016 abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt; 2017 2018 } 2019 2020 /* Clear the queue pointers */ 2021 ndlp->nlp_ptx[ringno].q_first = NULL; 2022 ndlp->nlp_ptx[ringno].q_last = NULL; 2023 ndlp->nlp_ptx[ringno].q_cnt = 0; 2024 2025 ndlp->nlp_tx[ringno].q_first = NULL; 2026 ndlp->nlp_tx[ringno].q_last = NULL; 2027 ndlp->nlp_tx[ringno].q_cnt = 0; 2028 2029 /* Remove node from service queue */ 2030 2031 /* If this is the last node on list */ 2032 if (rp->nodeq.q_last == (void *)ndlp) { 2033 rp->nodeq.q_last = NULL; 2034 rp->nodeq.q_first = NULL; 2035 rp->nodeq.q_cnt = 0; 2036 } else { 2037 /* Remove node from head */ 2038 rp->nodeq.q_first = ndlp->nlp_next[ringno]; 2039 ((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] = 2040 rp->nodeq.q_first; 2041 rp->nodeq.q_cnt--; 2042 } 2043 2044 /* Clear node */ 2045 ndlp->nlp_next[ringno] = NULL; 2046 } 2047 2048 /* First cleanup the iocb's while still holding the lock */ 2049 iocbq = (IOCBQ *) abort.q_first; 2050 while (iocbq) { 2051 /* Free the IoTag and the bmp */ 2052 iocb = &iocbq->iocb; 2053 sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0); 2054 2055 if (sbp && (sbp != STALE_PACKET)) { 2056 mutex_enter(&sbp->mtx); 2057 2058 if (sbp->pkt_flags & PACKET_IN_TXQ) { 2059 sbp->pkt_flags &= ~PACKET_IN_TXQ; 2060 hba->ring_tx_count[ringno]--; 2061 } 2062 sbp->pkt_flags |= PACKET_IN_FLUSH; 2063 2064 /* 2065 * If the fpkt is already set, then we will leave it 2066 * alone. This ensures that this pkt is only accounted 2067 * for on one fpkt->flush_count 2068 */ 2069 if (!sbp->fpkt && fpkt) { 2070 mutex_enter(&fpkt->mtx); 2071 sbp->fpkt = fpkt; 2072 fpkt->flush_count++; 2073 mutex_exit(&fpkt->mtx); 2074 } 2075 2076 mutex_exit(&sbp->mtx); 2077 } 2078 2079 iocbq = (IOCBQ *)iocbq->next; 2080 2081 } /* end of while */ 2082 2083 mutex_exit(&EMLXS_RINGTX_LOCK); 2084 2085 /* Now abort the iocb's */ 2086 iocbq = (IOCBQ *)abort.q_first; 2087 while (iocbq) { 2088 /* Save the next iocbq for now */ 2089 next = (IOCBQ *)iocbq->next; 2090 2091 /* Unlink this iocbq */ 2092 iocbq->next = NULL; 2093 2094 /* Get the pkt */ 2095 sbp = (emlxs_buf_t *)iocbq->sbp; 2096 2097 if (sbp) { 2098 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg, 2099 "tx: sbp=%p node=%p", sbp, sbp->node); 2100 2101 if (hba->state >= FC_LINK_UP) { 2102 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 2103 IOERR_ABORT_REQUESTED, 1); 2104 } else { 2105 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 2106 IOERR_LINK_DOWN, 1); 2107 } 2108 2109 } 2110 /* Free the iocb and its associated buffers */ 2111 else { 2112 icmd = &iocbq->iocb; 2113 if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 2114 icmd->ulpCommand == CMD_QUE_RING_BUF_CN || 2115 icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) { 2116 if ((hba->flag & 2117 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) { 2118 /* HBA is detaching or offlining */ 2119 if (icmd->ulpCommand != 2120 CMD_QUE_RING_LIST64_CN) { 2121 uint8_t *tmp; 2122 2123 for (i = 0; 2124 i < icmd->ulpBdeCount; 2125 i++) { 2126 mp = EMLXS_GET_VADDR( 2127 hba, rp, icmd); 2128 2129 tmp = (uint8_t *)mp; 2130 if (mp) { 2131 (void) emlxs_mem_put( 2132 hba, MEM_BUF, tmp); 2133 } 2134 } 2135 } 2136 2137 (void) emlxs_mem_put(hba, MEM_IOCB, 2138 (uint8_t *)iocbq); 2139 } else { 2140 /* repost the unsolicited buffer */ 2141 emlxs_sli_issue_iocb_cmd(hba, rp, 2142 iocbq); 2143 } 2144 } 2145 } 2146 2147 iocbq = next; 2148 2149 } /* end of while */ 2150 2151 return (abort.q_cnt); 2152 2153 } /* emlxs_tx_ring_flush() */ 2154 2155 2156 /* Flush all IO's on all or a given ring for a given node */ 2157 extern uint32_t 2158 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, RING *ring, 2159 uint32_t shutdown, emlxs_buf_t *fpkt) 2160 { 2161 emlxs_hba_t *hba = HBA; 2162 emlxs_buf_t *sbp; 2163 uint32_t ringno; 2164 RING *rp; 2165 IOCB *icmd; 2166 IOCBQ *iocbq; 2167 NODELIST *prev; 2168 IOCBQ *next; 2169 IOCB *iocb; 2170 Q abort; 2171 uint32_t i; 2172 MATCHMAP *mp; 2173 2174 2175 bzero((void *)&abort, sizeof (Q)); 2176 2177 /* Flush all I/O's on tx queue to this target */ 2178 mutex_enter(&EMLXS_RINGTX_LOCK); 2179 2180 if (!ndlp->nlp_base && shutdown) { 2181 ndlp->nlp_active = 0; 2182 } 2183 2184 for (ringno = 0; ringno < hba->ring_count; ringno++) { 2185 rp = &hba->ring[ringno]; 2186 2187 if (ring && rp != ring) { 2188 continue; 2189 } 2190 2191 if (!ndlp->nlp_base || shutdown) { 2192 /* Check if priority queue is not empty */ 2193 if (ndlp->nlp_ptx[ringno].q_first) { 2194 /* Transfer all iocb's to local queue */ 2195 if (abort.q_first == 0) { 2196 abort.q_first = 2197 ndlp->nlp_ptx[ringno].q_first; 2198 } else { 2199 ((IOCBQ *)abort.q_last)->next = 2200 (IOCBQ *)ndlp->nlp_ptx[ringno]. 2201 q_first; 2202 } 2203 2204 abort.q_last = ndlp->nlp_ptx[ringno].q_last; 2205 abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt; 2206 } 2207 } 2208 2209 /* Check if tx queue is not empty */ 2210 if (ndlp->nlp_tx[ringno].q_first) { 2211 /* Transfer all iocb's to local queue */ 2212 if (abort.q_first == 0) { 2213 abort.q_first = ndlp->nlp_tx[ringno].q_first; 2214 } else { 2215 ((IOCBQ *)abort.q_last)->next = 2216 (IOCBQ *)ndlp->nlp_tx[ringno].q_first; 2217 } 2218 2219 abort.q_last = ndlp->nlp_tx[ringno].q_last; 2220 abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt; 2221 } 2222 2223 /* Clear the queue pointers */ 2224 ndlp->nlp_ptx[ringno].q_first = NULL; 2225 ndlp->nlp_ptx[ringno].q_last = NULL; 2226 ndlp->nlp_ptx[ringno].q_cnt = 0; 2227 2228 ndlp->nlp_tx[ringno].q_first = NULL; 2229 ndlp->nlp_tx[ringno].q_last = NULL; 2230 ndlp->nlp_tx[ringno].q_cnt = 0; 2231 2232 /* If this node was on the ring queue, remove it */ 2233 if (ndlp->nlp_next[ringno]) { 2234 /* If this is the only node on list */ 2235 if (rp->nodeq.q_first == (void *)ndlp && 2236 rp->nodeq.q_last == (void *)ndlp) { 2237 rp->nodeq.q_last = NULL; 2238 rp->nodeq.q_first = NULL; 2239 rp->nodeq.q_cnt = 0; 2240 } else if (rp->nodeq.q_first == (void *)ndlp) { 2241 rp->nodeq.q_first = ndlp->nlp_next[ringno]; 2242 ((NODELIST *) rp->nodeq.q_last)-> 2243 nlp_next[ringno] = rp->nodeq.q_first; 2244 rp->nodeq.q_cnt--; 2245 } else { 2246 /* 2247 * This is a little more difficult find the 2248 * previous node in the circular ring queue 2249 */ 2250 prev = ndlp; 2251 while (prev->nlp_next[ringno] != ndlp) { 2252 prev = prev->nlp_next[ringno]; 2253 } 2254 2255 prev->nlp_next[ringno] = 2256 ndlp->nlp_next[ringno]; 2257 2258 if (rp->nodeq.q_last == (void *)ndlp) { 2259 rp->nodeq.q_last = (void *)prev; 2260 } 2261 rp->nodeq.q_cnt--; 2262 2263 } 2264 2265 /* Clear node */ 2266 ndlp->nlp_next[ringno] = NULL; 2267 } 2268 2269 } 2270 2271 /* First cleanup the iocb's while still holding the lock */ 2272 iocbq = (IOCBQ *) abort.q_first; 2273 while (iocbq) { 2274 /* Free the IoTag and the bmp */ 2275 iocb = &iocbq->iocb; 2276 sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0); 2277 2278 if (sbp && (sbp != STALE_PACKET)) { 2279 mutex_enter(&sbp->mtx); 2280 if (sbp->pkt_flags & PACKET_IN_TXQ) { 2281 sbp->pkt_flags &= ~PACKET_IN_TXQ; 2282 hba->ring_tx_count[ring->ringno]--; 2283 } 2284 sbp->pkt_flags |= PACKET_IN_FLUSH; 2285 2286 /* 2287 * If the fpkt is already set, then we will leave it 2288 * alone. This ensures that this pkt is only accounted 2289 * for on one fpkt->flush_count 2290 */ 2291 if (!sbp->fpkt && fpkt) { 2292 mutex_enter(&fpkt->mtx); 2293 sbp->fpkt = fpkt; 2294 fpkt->flush_count++; 2295 mutex_exit(&fpkt->mtx); 2296 } 2297 2298 mutex_exit(&sbp->mtx); 2299 } 2300 2301 iocbq = (IOCBQ *) iocbq->next; 2302 2303 } /* end of while */ 2304 2305 mutex_exit(&EMLXS_RINGTX_LOCK); 2306 2307 /* Now abort the iocb's outside the locks */ 2308 iocbq = (IOCBQ *)abort.q_first; 2309 while (iocbq) { 2310 /* Save the next iocbq for now */ 2311 next = (IOCBQ *)iocbq->next; 2312 2313 /* Unlink this iocbq */ 2314 iocbq->next = NULL; 2315 2316 /* Get the pkt */ 2317 sbp = (emlxs_buf_t *)iocbq->sbp; 2318 2319 if (sbp) { 2320 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg, 2321 "tx: sbp=%p node=%p", sbp, sbp->node); 2322 2323 if (hba->state >= FC_LINK_UP) { 2324 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 2325 IOERR_ABORT_REQUESTED, 1); 2326 } else { 2327 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 2328 IOERR_LINK_DOWN, 1); 2329 } 2330 2331 } 2332 /* Free the iocb and its associated buffers */ 2333 else { 2334 icmd = &iocbq->iocb; 2335 if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 2336 icmd->ulpCommand == CMD_QUE_RING_BUF_CN || 2337 icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) { 2338 if ((hba->flag & 2339 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) { 2340 /* HBA is detaching or offlining */ 2341 if (icmd->ulpCommand != 2342 CMD_QUE_RING_LIST64_CN) { 2343 uint8_t *tmp; 2344 2345 for (i = 0; 2346 i < icmd->ulpBdeCount; 2347 i++) { 2348 mp = EMLXS_GET_VADDR( 2349 hba, rp, icmd); 2350 2351 tmp = (uint8_t *)mp; 2352 if (mp) { 2353 (void) emlxs_mem_put( 2354 hba, MEM_BUF, tmp); 2355 } 2356 } 2357 } 2358 2359 (void) emlxs_mem_put(hba, MEM_IOCB, 2360 (uint8_t *)iocbq); 2361 } else { 2362 /* repost the unsolicited buffer */ 2363 emlxs_sli_issue_iocb_cmd(hba, rp, 2364 iocbq); 2365 } 2366 } 2367 } 2368 2369 iocbq = next; 2370 2371 } /* end of while */ 2372 2373 return (abort.q_cnt); 2374 2375 } /* emlxs_tx_node_flush() */ 2376 2377 2378 /* Check for IO's on all or a given ring for a given node */ 2379 extern uint32_t 2380 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, RING *ring) 2381 { 2382 emlxs_hba_t *hba = HBA; 2383 uint32_t ringno; 2384 RING *rp; 2385 uint32_t count; 2386 2387 count = 0; 2388 2389 /* Flush all I/O's on tx queue to this target */ 2390 mutex_enter(&EMLXS_RINGTX_LOCK); 2391 2392 for (ringno = 0; ringno < hba->ring_count; ringno++) { 2393 rp = &hba->ring[ringno]; 2394 2395 if (ring && rp != ring) { 2396 continue; 2397 } 2398 2399 /* Check if priority queue is not empty */ 2400 if (ndlp->nlp_ptx[ringno].q_first) { 2401 count += ndlp->nlp_ptx[ringno].q_cnt; 2402 } 2403 2404 /* Check if tx queue is not empty */ 2405 if (ndlp->nlp_tx[ringno].q_first) { 2406 count += ndlp->nlp_tx[ringno].q_cnt; 2407 } 2408 2409 } 2410 2411 mutex_exit(&EMLXS_RINGTX_LOCK); 2412 2413 return (count); 2414 2415 } /* emlxs_tx_node_check() */ 2416 2417 2418 2419 /* Flush all IO's on the FCP ring for a given node's lun */ 2420 extern uint32_t 2421 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun, 2422 emlxs_buf_t *fpkt) 2423 { 2424 emlxs_hba_t *hba = HBA; 2425 emlxs_buf_t *sbp; 2426 uint32_t ringno; 2427 IOCBQ *iocbq; 2428 IOCBQ *prev; 2429 IOCBQ *next; 2430 IOCB *iocb; 2431 IOCB *icmd; 2432 Q abort; 2433 uint32_t i; 2434 MATCHMAP *mp; 2435 RING *rp; 2436 2437 ringno = FC_FCP_RING; 2438 rp = &hba->ring[ringno]; 2439 2440 bzero((void *)&abort, sizeof (Q)); 2441 2442 /* Flush I/O's on txQ to this target's lun */ 2443 mutex_enter(&EMLXS_RINGTX_LOCK); 2444 2445 /* Scan the priority queue first */ 2446 prev = NULL; 2447 iocbq = (IOCBQ *) ndlp->nlp_ptx[ringno].q_first; 2448 2449 while (iocbq) { 2450 next = (IOCBQ *)iocbq->next; 2451 iocb = &iocbq->iocb; 2452 sbp = (emlxs_buf_t *)iocbq->sbp; 2453 2454 /* Check if this IO is for our lun */ 2455 if (sbp->lun == lun) { 2456 /* Remove iocb from the node's tx queue */ 2457 if (next == 0) { 2458 ndlp->nlp_ptx[ringno].q_last = 2459 (uint8_t *)prev; 2460 } 2461 2462 if (prev == 0) { 2463 ndlp->nlp_ptx[ringno].q_first = 2464 (uint8_t *)next; 2465 } else { 2466 prev->next = next; 2467 } 2468 2469 iocbq->next = NULL; 2470 ndlp->nlp_ptx[ringno].q_cnt--; 2471 2472 /* 2473 * Add this iocb to our local abort Q 2474 * This way we don't hold the RINGTX lock too long 2475 */ 2476 if (abort.q_first) { 2477 ((IOCBQ *)abort.q_last)->next = iocbq; 2478 abort.q_last = (uint8_t *)iocbq; 2479 abort.q_cnt++; 2480 } else { 2481 abort.q_first = (uint8_t *)iocbq; 2482 abort.q_last = (uint8_t *)iocbq; 2483 abort.q_cnt = 1; 2484 } 2485 iocbq->next = NULL; 2486 } else { 2487 prev = iocbq; 2488 } 2489 2490 iocbq = next; 2491 2492 } /* while (iocbq) */ 2493 2494 2495 /* Scan the regular queue */ 2496 prev = NULL; 2497 iocbq = (IOCBQ *)ndlp->nlp_tx[ringno].q_first; 2498 2499 while (iocbq) { 2500 next = (IOCBQ *)iocbq->next; 2501 iocb = &iocbq->iocb; 2502 sbp = (emlxs_buf_t *)iocbq->sbp; 2503 2504 /* Check if this IO is for our lun */ 2505 if (sbp->lun == lun) { 2506 /* Remove iocb from the node's tx queue */ 2507 if (next == 0) { 2508 ndlp->nlp_tx[ringno].q_last = 2509 (uint8_t *)prev; 2510 } 2511 2512 if (prev == 0) { 2513 ndlp->nlp_tx[ringno].q_first = 2514 (uint8_t *)next; 2515 } else { 2516 prev->next = next; 2517 } 2518 2519 iocbq->next = NULL; 2520 ndlp->nlp_tx[ringno].q_cnt--; 2521 2522 /* 2523 * Add this iocb to our local abort Q 2524 * This way we don't hold the RINGTX lock too long 2525 */ 2526 if (abort.q_first) { 2527 ((IOCBQ *) abort.q_last)->next = iocbq; 2528 abort.q_last = (uint8_t *)iocbq; 2529 abort.q_cnt++; 2530 } else { 2531 abort.q_first = (uint8_t *)iocbq; 2532 abort.q_last = (uint8_t *)iocbq; 2533 abort.q_cnt = 1; 2534 } 2535 iocbq->next = NULL; 2536 } else { 2537 prev = iocbq; 2538 } 2539 2540 iocbq = next; 2541 2542 } /* while (iocbq) */ 2543 2544 /* First cleanup the iocb's while still holding the lock */ 2545 iocbq = (IOCBQ *)abort.q_first; 2546 while (iocbq) { 2547 /* Free the IoTag and the bmp */ 2548 iocb = &iocbq->iocb; 2549 sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0); 2550 2551 if (sbp && (sbp != STALE_PACKET)) { 2552 mutex_enter(&sbp->mtx); 2553 if (sbp->pkt_flags & PACKET_IN_TXQ) { 2554 sbp->pkt_flags &= ~PACKET_IN_TXQ; 2555 hba->ring_tx_count[ringno]--; 2556 } 2557 sbp->pkt_flags |= PACKET_IN_FLUSH; 2558 2559 /* 2560 * If the fpkt is already set, then we will leave it 2561 * alone. This ensures that this pkt is only accounted 2562 * for on one fpkt->flush_count 2563 */ 2564 if (!sbp->fpkt && fpkt) { 2565 mutex_enter(&fpkt->mtx); 2566 sbp->fpkt = fpkt; 2567 fpkt->flush_count++; 2568 mutex_exit(&fpkt->mtx); 2569 } 2570 2571 mutex_exit(&sbp->mtx); 2572 } 2573 2574 iocbq = (IOCBQ *) iocbq->next; 2575 2576 } /* end of while */ 2577 2578 mutex_exit(&EMLXS_RINGTX_LOCK); 2579 2580 /* Now abort the iocb's outside the locks */ 2581 iocbq = (IOCBQ *)abort.q_first; 2582 while (iocbq) { 2583 /* Save the next iocbq for now */ 2584 next = (IOCBQ *)iocbq->next; 2585 2586 /* Unlink this iocbq */ 2587 iocbq->next = NULL; 2588 2589 /* Get the pkt */ 2590 sbp = (emlxs_buf_t *)iocbq->sbp; 2591 2592 if (sbp) { 2593 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg, 2594 "tx: sbp=%p node=%p", sbp, sbp->node); 2595 2596 if (hba->state >= FC_LINK_UP) { 2597 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 2598 IOERR_ABORT_REQUESTED, 1); 2599 } else { 2600 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 2601 IOERR_LINK_DOWN, 1); 2602 } 2603 } 2604 2605 /* Free the iocb and its associated buffers */ 2606 else { 2607 icmd = &iocbq->iocb; 2608 2609 if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 2610 icmd->ulpCommand == CMD_QUE_RING_BUF_CN || 2611 icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) { 2612 if ((hba->flag & 2613 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) { 2614 /* HBA is detaching or offlining */ 2615 if (icmd->ulpCommand != 2616 CMD_QUE_RING_LIST64_CN) { 2617 uint8_t *tmp; 2618 2619 for (i = 0; 2620 i < icmd->ulpBdeCount; 2621 i++) { 2622 mp = EMLXS_GET_VADDR( 2623 hba, rp, icmd); 2624 2625 tmp = (uint8_t *)mp; 2626 if (mp) { 2627 (void) emlxs_mem_put( 2628 hba, MEM_BUF, tmp); 2629 } 2630 } 2631 } 2632 2633 (void) emlxs_mem_put(hba, MEM_IOCB, 2634 (uint8_t *)iocbq); 2635 } else { 2636 /* repost the unsolicited buffer */ 2637 emlxs_sli_issue_iocb_cmd(hba, rp, 2638 iocbq); 2639 } 2640 } 2641 } 2642 2643 iocbq = next; 2644 2645 } /* end of while */ 2646 2647 2648 return (abort.q_cnt); 2649 2650 } /* emlxs_tx_lun_flush() */ 2651 2652 2653 extern void 2654 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock) 2655 { 2656 emlxs_hba_t *hba; 2657 emlxs_port_t *port; 2658 uint32_t ringno; 2659 NODELIST *nlp; 2660 RING *rp; 2661 emlxs_buf_t *sbp; 2662 2663 port = (emlxs_port_t *)iocbq->port; 2664 hba = HBA; 2665 rp = (RING *)iocbq->ring; 2666 nlp = (NODELIST *)iocbq->node; 2667 ringno = rp->ringno; 2668 sbp = (emlxs_buf_t *)iocbq->sbp; 2669 2670 if (nlp == NULL) { 2671 /* Set node to base node by default */ 2672 nlp = &port->node_base; 2673 2674 iocbq->node = (void *)nlp; 2675 2676 if (sbp) { 2677 sbp->node = (void *)nlp; 2678 } 2679 } 2680 2681 if (lock) { 2682 mutex_enter(&EMLXS_RINGTX_LOCK); 2683 } 2684 2685 if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) { 2686 if (sbp) { 2687 mutex_enter(&sbp->mtx); 2688 2689 if (sbp->pkt_flags & PACKET_IN_TXQ) { 2690 sbp->pkt_flags &= ~PACKET_IN_TXQ; 2691 hba->ring_tx_count[ringno]--; 2692 } 2693 sbp->pkt_flags |= PACKET_IN_FLUSH; 2694 2695 mutex_exit(&sbp->mtx); 2696 2697 /* Free the ulpIoTag and the bmp */ 2698 (void) emlxs_unregister_pkt(rp, sbp->iotag, 0); 2699 2700 if (lock) { 2701 mutex_exit(&EMLXS_RINGTX_LOCK); 2702 } 2703 2704 if (hba->state >= FC_LINK_UP) { 2705 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 2706 IOERR_ABORT_REQUESTED, 1); 2707 } else { 2708 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 2709 IOERR_LINK_DOWN, 1); 2710 } 2711 return; 2712 } else { 2713 if (lock) { 2714 mutex_exit(&EMLXS_RINGTX_LOCK); 2715 } 2716 2717 (void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq); 2718 } 2719 2720 return; 2721 } 2722 2723 if (sbp) { 2724 2725 mutex_enter(&sbp->mtx); 2726 2727 if (sbp->pkt_flags & 2728 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) { 2729 mutex_exit(&sbp->mtx); 2730 if (lock) { 2731 mutex_exit(&EMLXS_RINGTX_LOCK); 2732 } 2733 return; 2734 } 2735 2736 sbp->pkt_flags |= PACKET_IN_TXQ; 2737 hba->ring_tx_count[ringno]++; 2738 2739 mutex_exit(&sbp->mtx); 2740 } 2741 2742 2743 /* Check iocbq priority */ 2744 if (iocbq->flag & IOCB_PRIORITY) { 2745 /* Add the iocb to the bottom of the node's ptx queue */ 2746 if (nlp->nlp_ptx[ringno].q_first) { 2747 ((IOCBQ *)nlp->nlp_ptx[ringno].q_last)->next = iocbq; 2748 nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq; 2749 nlp->nlp_ptx[ringno].q_cnt++; 2750 } else { 2751 nlp->nlp_ptx[ringno].q_first = (uint8_t *)iocbq; 2752 nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq; 2753 nlp->nlp_ptx[ringno].q_cnt = 1; 2754 } 2755 2756 iocbq->next = NULL; 2757 } else { /* Normal priority */ 2758 2759 2760 /* Add the iocb to the bottom of the node's tx queue */ 2761 if (nlp->nlp_tx[ringno].q_first) { 2762 ((IOCBQ *)nlp->nlp_tx[ringno].q_last)->next = iocbq; 2763 nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq; 2764 nlp->nlp_tx[ringno].q_cnt++; 2765 } else { 2766 nlp->nlp_tx[ringno].q_first = (uint8_t *)iocbq; 2767 nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq; 2768 nlp->nlp_tx[ringno].q_cnt = 1; 2769 } 2770 2771 iocbq->next = NULL; 2772 } 2773 2774 2775 /* 2776 * Check if the node is not already on ring queue and 2777 * (is not closed or is a priority request) 2778 */ 2779 if (!nlp->nlp_next[ringno] && (!(nlp->nlp_flag[ringno] & NLP_CLOSED) || 2780 (iocbq->flag & IOCB_PRIORITY))) { 2781 /* If so, then add it to the ring queue */ 2782 if (rp->nodeq.q_first) { 2783 ((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] = 2784 (uint8_t *)nlp; 2785 nlp->nlp_next[ringno] = rp->nodeq.q_first; 2786 2787 /* 2788 * If this is not the base node then add it 2789 * to the tail 2790 */ 2791 if (!nlp->nlp_base) { 2792 rp->nodeq.q_last = (uint8_t *)nlp; 2793 } else { /* Otherwise, add it to the head */ 2794 2795 /* The command node always gets priority */ 2796 rp->nodeq.q_first = (uint8_t *)nlp; 2797 } 2798 2799 rp->nodeq.q_cnt++; 2800 } else { 2801 rp->nodeq.q_first = (uint8_t *)nlp; 2802 rp->nodeq.q_last = (uint8_t *)nlp; 2803 nlp->nlp_next[ringno] = nlp; 2804 rp->nodeq.q_cnt = 1; 2805 } 2806 } 2807 2808 HBASTATS.IocbTxPut[ringno]++; 2809 2810 /* Adjust the ring timeout timer */ 2811 rp->timeout = hba->timer_tics + 5; 2812 2813 if (lock) { 2814 mutex_exit(&EMLXS_RINGTX_LOCK); 2815 } 2816 2817 return; 2818 2819 } /* emlxs_tx_put() */ 2820 2821 2822 extern IOCBQ * 2823 emlxs_tx_get(RING *rp, uint32_t lock) 2824 { 2825 emlxs_hba_t *hba; 2826 uint32_t ringno; 2827 IOCBQ *iocbq; 2828 NODELIST *nlp; 2829 emlxs_buf_t *sbp; 2830 2831 hba = rp->hba; 2832 ringno = rp->ringno; 2833 2834 if (lock) { 2835 mutex_enter(&EMLXS_RINGTX_LOCK); 2836 } 2837 2838 begin: 2839 2840 iocbq = NULL; 2841 2842 /* Check if a node needs servicing */ 2843 if (rp->nodeq.q_first) { 2844 nlp = (NODELIST *)rp->nodeq.q_first; 2845 2846 /* Get next iocb from node's priority queue */ 2847 2848 if (nlp->nlp_ptx[ringno].q_first) { 2849 iocbq = (IOCBQ *)nlp->nlp_ptx[ringno].q_first; 2850 2851 /* Check if this is last entry */ 2852 if (nlp->nlp_ptx[ringno].q_last == (void *)iocbq) { 2853 nlp->nlp_ptx[ringno].q_first = NULL; 2854 nlp->nlp_ptx[ringno].q_last = NULL; 2855 nlp->nlp_ptx[ringno].q_cnt = 0; 2856 } else { 2857 /* Remove iocb from head */ 2858 nlp->nlp_ptx[ringno].q_first = 2859 (void *)iocbq->next; 2860 nlp->nlp_ptx[ringno].q_cnt--; 2861 } 2862 2863 iocbq->next = NULL; 2864 } 2865 2866 /* Get next iocb from node tx queue if node not closed */ 2867 else if (nlp->nlp_tx[ringno].q_first && 2868 !(nlp->nlp_flag[ringno] & NLP_CLOSED)) { 2869 iocbq = (IOCBQ *)nlp->nlp_tx[ringno].q_first; 2870 2871 /* Check if this is last entry */ 2872 if (nlp->nlp_tx[ringno].q_last == (void *)iocbq) { 2873 nlp->nlp_tx[ringno].q_first = NULL; 2874 nlp->nlp_tx[ringno].q_last = NULL; 2875 nlp->nlp_tx[ringno].q_cnt = 0; 2876 } else { 2877 /* Remove iocb from head */ 2878 nlp->nlp_tx[ringno].q_first = 2879 (void *)iocbq->next; 2880 nlp->nlp_tx[ringno].q_cnt--; 2881 } 2882 2883 iocbq->next = NULL; 2884 } 2885 2886 /* Now deal with node itself */ 2887 2888 /* Check if node still needs servicing */ 2889 if ((nlp->nlp_ptx[ringno].q_first) || 2890 (nlp->nlp_tx[ringno].q_first && 2891 !(nlp->nlp_flag[ringno] & NLP_CLOSED))) { 2892 2893 /* 2894 * If this is the base node, then don't shift the 2895 * pointers. We want to drain the base node before 2896 * moving on 2897 */ 2898 if (!nlp->nlp_base) { 2899 /* 2900 * Just shift ring queue pointers to next 2901 * node 2902 */ 2903 rp->nodeq.q_last = (void *)nlp; 2904 rp->nodeq.q_first = nlp->nlp_next[ringno]; 2905 } 2906 } else { 2907 /* Remove node from ring queue */ 2908 2909 /* If this is the last node on list */ 2910 if (rp->nodeq.q_last == (void *)nlp) { 2911 rp->nodeq.q_last = NULL; 2912 rp->nodeq.q_first = NULL; 2913 rp->nodeq.q_cnt = 0; 2914 } else { 2915 /* Remove node from head */ 2916 rp->nodeq.q_first = nlp->nlp_next[ringno]; 2917 ((NODELIST *)rp->nodeq.q_last)-> 2918 nlp_next[ringno] = rp->nodeq.q_first; 2919 rp->nodeq.q_cnt--; 2920 2921 } 2922 2923 /* Clear node */ 2924 nlp->nlp_next[ringno] = NULL; 2925 } 2926 2927 /* 2928 * If no iocbq was found on this node, then it will have 2929 * been removed. So try again. 2930 */ 2931 if (!iocbq) { 2932 goto begin; 2933 } 2934 2935 sbp = (emlxs_buf_t *)iocbq->sbp; 2936 2937 if (sbp) { 2938 /* 2939 * Check flags before we enter mutex in case this 2940 * has been flushed and destroyed 2941 */ 2942 if ((sbp->pkt_flags & 2943 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) || 2944 !(sbp->pkt_flags & PACKET_IN_TXQ)) { 2945 goto begin; 2946 } 2947 2948 mutex_enter(&sbp->mtx); 2949 2950 if ((sbp->pkt_flags & 2951 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) || 2952 !(sbp->pkt_flags & PACKET_IN_TXQ)) { 2953 mutex_exit(&sbp->mtx); 2954 goto begin; 2955 } 2956 2957 sbp->pkt_flags &= ~PACKET_IN_TXQ; 2958 hba->ring_tx_count[ringno]--; 2959 2960 mutex_exit(&sbp->mtx); 2961 } 2962 } 2963 2964 if (iocbq) { 2965 HBASTATS.IocbTxGet[ringno]++; 2966 } 2967 2968 /* Adjust the ring timeout timer */ 2969 rp->timeout = (rp->nodeq.q_first) ? (hba->timer_tics + 5) : 0; 2970 2971 if (lock) { 2972 mutex_exit(&EMLXS_RINGTX_LOCK); 2973 } 2974 2975 return (iocbq); 2976 2977 } /* emlxs_tx_get() */ 2978 2979 2980 2981 extern uint32_t 2982 emlxs_chipq_node_flush(emlxs_port_t *port, RING *ring, NODELIST *ndlp, 2983 emlxs_buf_t *fpkt) 2984 { 2985 emlxs_hba_t *hba = HBA; 2986 emlxs_buf_t *sbp; 2987 IOCBQ *iocbq; 2988 IOCBQ *next; 2989 Q abort; 2990 RING *rp; 2991 uint32_t ringno; 2992 uint8_t flag[MAX_RINGS]; 2993 uint32_t iotag; 2994 2995 bzero((void *)&abort, sizeof (Q)); 2996 bzero((void *)flag, sizeof (flag)); 2997 2998 for (ringno = 0; ringno < hba->ring_count; ringno++) { 2999 rp = &hba->ring[ringno]; 3000 3001 if (ring && rp != ring) { 3002 continue; 3003 } 3004 3005 mutex_enter(&EMLXS_FCTAB_LOCK(ringno)); 3006 3007 for (iotag = 1; iotag < rp->max_iotag; iotag++) { 3008 sbp = rp->fc_table[iotag]; 3009 3010 if (sbp && (sbp != STALE_PACKET) && 3011 (sbp->pkt_flags & PACKET_IN_CHIPQ) && 3012 (sbp->node == ndlp) && 3013 (sbp->ring == rp) && 3014 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) { 3015 emlxs_sbp_abort_add(port, sbp, &abort, flag, 3016 fpkt); 3017 } 3018 3019 } 3020 mutex_exit(&EMLXS_FCTAB_LOCK(ringno)); 3021 3022 } /* for */ 3023 3024 /* Now put the iocb's on the tx queue */ 3025 iocbq = (IOCBQ *)abort.q_first; 3026 while (iocbq) { 3027 /* Save the next iocbq for now */ 3028 next = (IOCBQ *)iocbq->next; 3029 3030 /* Unlink this iocbq */ 3031 iocbq->next = NULL; 3032 3033 /* Send this iocbq */ 3034 emlxs_tx_put(iocbq, 1); 3035 3036 iocbq = next; 3037 } 3038 3039 /* Now trigger ring service */ 3040 for (ringno = 0; ringno < hba->ring_count; ringno++) { 3041 if (!flag[ringno]) { 3042 continue; 3043 } 3044 3045 rp = &hba->ring[ringno]; 3046 3047 emlxs_sli_issue_iocb_cmd(hba, rp, 0); 3048 } 3049 3050 return (abort.q_cnt); 3051 3052 } /* emlxs_chipq_node_flush() */ 3053 3054 3055 /* Flush all IO's left on all iotag lists */ 3056 static uint32_t 3057 emlxs_iotag_flush(emlxs_hba_t *hba) 3058 { 3059 emlxs_port_t *port = &PPORT; 3060 emlxs_buf_t *sbp; 3061 IOCBQ *iocbq; 3062 IOCB *iocb; 3063 Q abort; 3064 RING *rp; 3065 uint32_t ringno; 3066 uint32_t iotag; 3067 uint32_t count; 3068 3069 count = 0; 3070 for (ringno = 0; ringno < hba->ring_count; ringno++) { 3071 rp = &hba->ring[ringno]; 3072 3073 bzero((void *)&abort, sizeof (Q)); 3074 3075 mutex_enter(&EMLXS_FCTAB_LOCK(ringno)); 3076 3077 for (iotag = 1; iotag < rp->max_iotag; iotag++) { 3078 sbp = rp->fc_table[iotag]; 3079 3080 if (!sbp || (sbp == STALE_PACKET)) { 3081 continue; 3082 } 3083 3084 /* Unregister the packet */ 3085 rp->fc_table[iotag] = STALE_PACKET; 3086 hba->io_count[ringno]--; 3087 sbp->iotag = 0; 3088 3089 /* Clean up the sbp */ 3090 mutex_enter(&sbp->mtx); 3091 3092 /* Set IOCB status */ 3093 iocbq = &sbp->iocbq; 3094 iocb = &iocbq->iocb; 3095 3096 iocb->ulpStatus = IOSTAT_LOCAL_REJECT; 3097 iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN; 3098 iocb->ulpLe = 1; 3099 iocbq->next = NULL; 3100 3101 if (sbp->pkt_flags & PACKET_IN_TXQ) { 3102 sbp->pkt_flags &= ~PACKET_IN_TXQ; 3103 hba->ring_tx_count[ringno]--; 3104 } 3105 3106 if (sbp->pkt_flags & PACKET_IN_CHIPQ) { 3107 sbp->pkt_flags &= ~PACKET_IN_CHIPQ; 3108 } 3109 3110 if (sbp->bmp) { 3111 (void) emlxs_mem_put(hba, MEM_BPL, 3112 (uint8_t *)sbp->bmp); 3113 sbp->bmp = 0; 3114 } 3115 3116 /* At this point all nodes are assumed destroyed */ 3117 sbp->node = 0; 3118 3119 mutex_exit(&sbp->mtx); 3120 3121 /* Add this iocb to our local abort Q */ 3122 if (abort.q_first) { 3123 ((IOCBQ *)abort.q_last)->next = iocbq; 3124 abort.q_last = (uint8_t *)iocbq; 3125 abort.q_cnt++; 3126 } else { 3127 abort.q_first = (uint8_t *)iocbq; 3128 abort.q_last = (uint8_t *)iocbq; 3129 abort.q_cnt = 1; 3130 } 3131 } 3132 3133 mutex_exit(&EMLXS_FCTAB_LOCK(ringno)); 3134 3135 /* Trigger deferred completion */ 3136 if (abort.q_first) { 3137 mutex_enter(&rp->rsp_lock); 3138 if (rp->rsp_head == NULL) { 3139 rp->rsp_head = (IOCBQ *)abort.q_first; 3140 rp->rsp_tail = (IOCBQ *)abort.q_last; 3141 } else { 3142 rp->rsp_tail->next = (IOCBQ *)abort.q_first; 3143 rp->rsp_tail = (IOCBQ *)abort.q_last; 3144 } 3145 mutex_exit(&rp->rsp_lock); 3146 3147 emlxs_thread_trigger2(&rp->intr_thread, 3148 emlxs_proc_ring, rp); 3149 3150 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg, 3151 "Forced iotag completion. ring=%d count=%d", 3152 ringno, abort.q_cnt); 3153 3154 count += abort.q_cnt; 3155 } 3156 } 3157 3158 return (count); 3159 3160 } /* emlxs_iotag_flush() */ 3161 3162 3163 3164 /* Checks for IO's on all or a given ring for a given node */ 3165 extern uint32_t 3166 emlxs_chipq_node_check(emlxs_port_t *port, RING *ring, NODELIST *ndlp) 3167 { 3168 emlxs_hba_t *hba = HBA; 3169 emlxs_buf_t *sbp; 3170 RING *rp; 3171 uint32_t ringno; 3172 uint32_t count; 3173 uint32_t iotag; 3174 3175 count = 0; 3176 3177 for (ringno = 0; ringno < hba->ring_count; ringno++) { 3178 rp = &hba->ring[ringno]; 3179 3180 if (ring && rp != ring) { 3181 continue; 3182 } 3183 3184 mutex_enter(&EMLXS_FCTAB_LOCK(ringno)); 3185 3186 for (iotag = 1; iotag < rp->max_iotag; iotag++) { 3187 sbp = rp->fc_table[iotag]; 3188 3189 if (sbp && (sbp != STALE_PACKET) && 3190 (sbp->pkt_flags & PACKET_IN_CHIPQ) && 3191 (sbp->node == ndlp) && 3192 (sbp->ring == rp) && 3193 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) { 3194 count++; 3195 } 3196 3197 } 3198 mutex_exit(&EMLXS_FCTAB_LOCK(ringno)); 3199 3200 } /* for */ 3201 3202 return (count); 3203 3204 } /* emlxs_chipq_node_check() */ 3205 3206 3207 3208 /* Flush all IO's for a given node's lun (FC_FCP_RING only) */ 3209 extern uint32_t 3210 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun, 3211 emlxs_buf_t *fpkt) 3212 { 3213 emlxs_hba_t *hba = HBA; 3214 emlxs_buf_t *sbp; 3215 RING *rp; 3216 IOCBQ *iocbq; 3217 IOCBQ *next; 3218 Q abort; 3219 uint32_t iotag; 3220 uint8_t flag[MAX_RINGS]; 3221 3222 bzero((void *)flag, sizeof (flag)); 3223 bzero((void *)&abort, sizeof (Q)); 3224 rp = &hba->ring[FC_FCP_RING]; 3225 3226 mutex_enter(&EMLXS_FCTAB_LOCK(FC_FCP_RING)); 3227 for (iotag = 1; iotag < rp->max_iotag; iotag++) { 3228 sbp = rp->fc_table[iotag]; 3229 3230 if (sbp && (sbp != STALE_PACKET) && 3231 sbp->pkt_flags & PACKET_IN_CHIPQ && 3232 sbp->node == ndlp && 3233 sbp->ring == rp && 3234 sbp->lun == lun && 3235 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) { 3236 emlxs_sbp_abort_add(port, sbp, &abort, flag, fpkt); 3237 } 3238 } 3239 mutex_exit(&EMLXS_FCTAB_LOCK(FC_FCP_RING)); 3240 3241 /* Now put the iocb's on the tx queue */ 3242 iocbq = (IOCBQ *)abort.q_first; 3243 while (iocbq) { 3244 /* Save the next iocbq for now */ 3245 next = (IOCBQ *)iocbq->next; 3246 3247 /* Unlink this iocbq */ 3248 iocbq->next = NULL; 3249 3250 /* Send this iocbq */ 3251 emlxs_tx_put(iocbq, 1); 3252 3253 iocbq = next; 3254 } 3255 3256 /* Now trigger ring service */ 3257 if (abort.q_cnt) { 3258 emlxs_sli_issue_iocb_cmd(hba, rp, 0); 3259 } 3260 3261 return (abort.q_cnt); 3262 3263 } /* emlxs_chipq_lun_flush() */ 3264 3265 3266 3267 /* 3268 * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued. 3269 * This must be called while holding the EMLXS_FCCTAB_LOCK 3270 */ 3271 extern IOCBQ * 3272 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp, 3273 uint16_t iotag, RING *rp, uint8_t class, int32_t flag) 3274 { 3275 emlxs_hba_t *hba = HBA; 3276 IOCBQ *iocbq; 3277 IOCB *iocb; 3278 uint16_t abort_iotag; 3279 3280 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) { 3281 return (NULL); 3282 } 3283 3284 iocbq->ring = (void *)rp; 3285 iocbq->port = (void *)port; 3286 iocbq->node = (void *)ndlp; 3287 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL); 3288 iocb = &iocbq->iocb; 3289 3290 /* 3291 * set up an iotag using special Abort iotags 3292 */ 3293 if ((rp->fc_abort_iotag < rp->max_iotag)) { 3294 rp->fc_abort_iotag = rp->max_iotag; 3295 } 3296 3297 abort_iotag = rp->fc_abort_iotag++; 3298 3299 3300 iocb->ulpIoTag = abort_iotag; 3301 iocb->un.acxri.abortType = flag; 3302 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi; 3303 iocb->un.acxri.abortIoTag = iotag; 3304 iocb->ulpLe = 1; 3305 iocb->ulpClass = class; 3306 iocb->ulpCommand = CMD_ABORT_XRI_CN; 3307 iocb->ulpOwner = OWN_CHIP; 3308 3309 return (iocbq); 3310 3311 } /* emlxs_create_abort_xri_cn() */ 3312 3313 3314 extern IOCBQ * 3315 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid, 3316 RING *rp, uint8_t class, int32_t flag) 3317 { 3318 emlxs_hba_t *hba = HBA; 3319 IOCBQ *iocbq; 3320 IOCB *iocb; 3321 uint16_t abort_iotag; 3322 3323 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) { 3324 return (NULL); 3325 } 3326 3327 iocbq->ring = (void *)rp; 3328 iocbq->port = (void *)port; 3329 iocbq->node = (void *)ndlp; 3330 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL); 3331 iocb = &iocbq->iocb; 3332 3333 /* 3334 * set up an iotag using special Abort iotags 3335 */ 3336 if ((rp->fc_abort_iotag < rp->max_iotag)) { 3337 rp->fc_abort_iotag = rp->max_iotag; 3338 } 3339 3340 abort_iotag = rp->fc_abort_iotag++; 3341 3342 iocb->ulpContext = xid; 3343 iocb->ulpIoTag = abort_iotag; 3344 iocb->un.acxri.abortType = flag; 3345 iocb->ulpLe = 1; 3346 iocb->ulpClass = class; 3347 iocb->ulpCommand = CMD_ABORT_XRI_CX; 3348 iocb->ulpOwner = OWN_CHIP; 3349 3350 return (iocbq); 3351 3352 } /* emlxs_create_abort_xri_cx() */ 3353 3354 3355 3356 /* This must be called while holding the EMLXS_FCCTAB_LOCK */ 3357 extern IOCBQ * 3358 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp, 3359 uint16_t iotag, RING *rp) 3360 { 3361 emlxs_hba_t *hba = HBA; 3362 IOCBQ *iocbq; 3363 IOCB *iocb; 3364 uint16_t abort_iotag; 3365 3366 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) { 3367 return (NULL); 3368 } 3369 3370 iocbq->ring = (void *)rp; 3371 iocbq->port = (void *)port; 3372 iocbq->node = (void *)ndlp; 3373 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL); 3374 iocb = &iocbq->iocb; 3375 3376 /* 3377 * set up an iotag using special Abort iotags 3378 */ 3379 if ((rp->fc_abort_iotag < rp->max_iotag)) { 3380 rp->fc_abort_iotag = rp->max_iotag; 3381 } 3382 3383 abort_iotag = rp->fc_abort_iotag++; 3384 3385 iocb->ulpIoTag = abort_iotag; 3386 iocb->un.acxri.abortType = 0; 3387 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi; 3388 iocb->un.acxri.abortIoTag = iotag; 3389 iocb->ulpLe = 1; 3390 iocb->ulpClass = 0; 3391 iocb->ulpCommand = CMD_CLOSE_XRI_CN; 3392 iocb->ulpOwner = OWN_CHIP; 3393 3394 return (iocbq); 3395 3396 } /* emlxs_create_close_xri_cn() */ 3397 3398 3399 /* This must be called while holding the EMLXS_FCCTAB_LOCK */ 3400 extern IOCBQ * 3401 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid, 3402 RING *rp) 3403 { 3404 emlxs_hba_t *hba = HBA; 3405 IOCBQ *iocbq; 3406 IOCB *iocb; 3407 uint16_t abort_iotag; 3408 3409 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) { 3410 return (NULL); 3411 } 3412 3413 iocbq->ring = (void *)rp; 3414 iocbq->port = (void *)port; 3415 iocbq->node = (void *)ndlp; 3416 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL); 3417 iocb = &iocbq->iocb; 3418 3419 /* 3420 * set up an iotag using special Abort iotags 3421 */ 3422 if ((rp->fc_abort_iotag < rp->max_iotag)) { 3423 rp->fc_abort_iotag = rp->max_iotag; 3424 } 3425 3426 abort_iotag = rp->fc_abort_iotag++; 3427 3428 iocb->ulpContext = xid; 3429 iocb->ulpIoTag = abort_iotag; 3430 iocb->ulpLe = 1; 3431 iocb->ulpClass = 0; 3432 iocb->ulpCommand = CMD_CLOSE_XRI_CX; 3433 iocb->ulpOwner = OWN_CHIP; 3434 3435 return (iocbq); 3436 3437 } /* emlxs_create_close_xri_cx() */ 3438 3439 3440 void 3441 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid) 3442 { 3443 RING *rp; 3444 IOCBQ *iocbq; 3445 3446 rp = &hba->ring[FC_CT_RING]; 3447 3448 /* Create the abort IOCB */ 3449 if (hba->state >= FC_LINK_UP) { 3450 iocbq = 3451 emlxs_create_abort_xri_cx(port, NULL, rxid, rp, CLASS3, 3452 ABORT_TYPE_ABTS); 3453 } else { 3454 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, rp); 3455 } 3456 if (iocbq) { 3457 emlxs_sli_issue_iocb_cmd(hba, rp, iocbq); 3458 } 3459 } 3460 3461 3462 /* This must be called while holding the EMLXS_FCCTAB_LOCK */ 3463 static void 3464 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort, 3465 uint8_t *flag, emlxs_buf_t *fpkt) 3466 { 3467 emlxs_hba_t *hba = HBA; 3468 IOCBQ *iocbq; 3469 RING *rp; 3470 NODELIST *ndlp; 3471 3472 rp = (RING *)sbp->ring; 3473 ndlp = sbp->node; 3474 3475 /* Create the close XRI IOCB */ 3476 iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, rp); 3477 3478 /* 3479 * Add this iocb to our local abort Q 3480 * This way we don't hold the CHIPQ lock too long 3481 */ 3482 if (iocbq) { 3483 if (abort->q_first) { 3484 ((IOCBQ *)abort->q_last)->next = iocbq; 3485 abort->q_last = (uint8_t *)iocbq; 3486 abort->q_cnt++; 3487 } else { 3488 abort->q_first = (uint8_t *)iocbq; 3489 abort->q_last = (uint8_t *)iocbq; 3490 abort->q_cnt = 1; 3491 } 3492 iocbq->next = NULL; 3493 } 3494 3495 /* set the flags */ 3496 mutex_enter(&sbp->mtx); 3497 3498 sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED); 3499 sbp->ticks = hba->timer_tics + 10; 3500 sbp->abort_attempts++; 3501 3502 flag[rp->ringno] = 1; 3503 3504 /* 3505 * If the fpkt is already set, then we will leave it alone 3506 * This ensures that this pkt is only accounted for on one 3507 * fpkt->flush_count 3508 */ 3509 if (!sbp->fpkt && fpkt) { 3510 mutex_enter(&fpkt->mtx); 3511 sbp->fpkt = fpkt; 3512 fpkt->flush_count++; 3513 mutex_exit(&fpkt->mtx); 3514 } 3515 3516 mutex_exit(&sbp->mtx); 3517 3518 return; 3519 3520 } /* emlxs_sbp_abort_add() */ 3521