1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* Copyright 2010 QLogic Corporation */ 23 24 /* 25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 26 */ 27 28 /* 29 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file. 30 * 31 * *********************************************************************** 32 * * ** 33 * * NOTICE ** 34 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION ** 35 * * ALL RIGHTS RESERVED ** 36 * * ** 37 * *********************************************************************** 38 * 39 */ 40 41 #include <ql_apps.h> 42 #include <ql_api.h> 43 #include <ql_debug.h> 44 #include <ql_iocb.h> 45 #include <ql_isr.h> 46 #include <ql_init.h> 47 #include <ql_mbx.h> 48 #include <ql_nx.h> 49 #include <ql_xioctl.h> 50 51 /* 52 * Local Function Prototypes. 53 */ 54 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, uint32_t, 55 uint32_t *); 56 static void ql_spurious_intr(ql_adapter_state_t *, int); 57 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint32_t *, 58 uint32_t *, int); 59 static void ql_async_event(ql_adapter_state_t *, uint32_t, ql_head_t *, 60 uint32_t *, uint32_t *, int); 61 static void ql_fast_fcp_post(ql_srb_t *); 62 static void ql_response_pkt(ql_adapter_state_t *, ql_head_t *, uint32_t *, 63 uint32_t *, int); 64 static void ql_error_entry(ql_adapter_state_t *, response_t *, ql_head_t *, 65 uint32_t *, uint32_t *); 66 static int ql_status_entry(ql_adapter_state_t *, sts_entry_t *, ql_head_t *, 67 uint32_t *, uint32_t *); 68 static int ql_24xx_status_entry(ql_adapter_state_t *, sts_24xx_entry_t *, 69 ql_head_t *, uint32_t *, uint32_t *); 70 static int ql_status_error(ql_adapter_state_t *, ql_srb_t *, sts_entry_t *, 71 ql_head_t *, uint32_t *, uint32_t *); 72 static void ql_status_cont_entry(ql_adapter_state_t *, sts_cont_entry_t *, 73 ql_head_t *, uint32_t *, uint32_t *); 74 static void ql_ip_entry(ql_adapter_state_t *, ip_entry_t *, ql_head_t *, 75 uint32_t *, uint32_t *); 76 static void ql_ip_rcv_entry(ql_adapter_state_t *, ip_rcv_entry_t *, 77 ql_head_t *, uint32_t *, uint32_t *); 78 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *, 79 ip_rcv_cont_entry_t *, ql_head_t *, uint32_t *, uint32_t *); 80 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ip_rcv_24xx_entry_t *, 81 ql_head_t *, uint32_t *, uint32_t *); 82 static void ql_ms_entry(ql_adapter_state_t *, ms_entry_t *, ql_head_t *, 83 uint32_t *, uint32_t *); 84 static void ql_report_id_entry(ql_adapter_state_t *, report_id_1_t *, 85 ql_head_t *, uint32_t *, uint32_t *); 86 static void ql_els_passthru_entry(ql_adapter_state_t *, 87 els_passthru_entry_rsp_t *, ql_head_t *, uint32_t *, uint32_t *); 88 static ql_srb_t *ql_verify_preprocessed_cmd(ql_adapter_state_t *, uint32_t *, 89 uint32_t *, uint32_t *); 90 static void ql_signal_abort(ql_adapter_state_t *ha, uint32_t *set_flags); 91 92 /* 93 * Spurious interrupt counter 94 */ 95 uint32_t ql_spurious_cnt = 4; 96 uint32_t ql_max_intr_loop = 16; 97 98 /* 99 * ql_isr 100 * Process all INTX intr types. 101 * 102 * Input: 103 * arg1: adapter state pointer. 104 * 105 * Returns: 106 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED 107 * 108 * Context: 109 * Interrupt or Kernel context, no mailbox commands allowed. 110 */ 111 /* ARGSUSED */ 112 uint_t 113 ql_isr(caddr_t arg1) 114 { 115 return (ql_isr_aif(arg1, 0)); 116 } 117 118 /* 119 * ql_isr_default 120 * Process unknown/unvectored intr types 121 * 122 * Input: 123 * arg1: adapter state pointer. 124 * arg2: interrupt vector. 125 * 126 * Returns: 127 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED 128 * 129 * Context: 130 * Interrupt or Kernel context, no mailbox commands allowed. 131 */ 132 /* ARGSUSED */ 133 uint_t 134 ql_isr_default(caddr_t arg1, caddr_t arg2) 135 { 136 ql_adapter_state_t *ha = (void *)arg1; 137 138 EL(ha, "isr_default called: idx=%x\n", arg2); 139 return (ql_isr_aif(arg1, arg2)); 140 } 141 142 /* 143 * ql_isr_aif 144 * Process mailbox and I/O command completions. 145 * 146 * Input: 147 * arg: adapter state pointer. 148 * intvec: interrupt vector. 149 * 150 * Returns: 151 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED 152 * 153 * Context: 154 * Interrupt or Kernel context, no mailbox commands allowed. 155 */ 156 /* ARGSUSED */ 157 uint_t 158 ql_isr_aif(caddr_t arg, caddr_t intvec) 159 { 160 uint16_t mbx; 161 uint32_t stat; 162 ql_adapter_state_t *ha = (void *)arg; 163 uint32_t set_flags = 0; 164 uint32_t reset_flags = 0; 165 ql_head_t isr_done_q = {NULL, NULL}; 166 uint_t rval = DDI_INTR_UNCLAIMED; 167 int spurious_intr = 0; 168 boolean_t intr = B_FALSE, daemon = B_FALSE; 169 int intr_loop = 4; 170 boolean_t clear_spurious = B_TRUE; 171 172 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 173 174 QL_PM_LOCK(ha); 175 if (ha->power_level != PM_LEVEL_D0) { 176 /* 177 * Looks like we are about to go down soon, exit early. 178 */ 179 QL_PM_UNLOCK(ha); 180 QL_PRINT_3(CE_CONT, "(%d): power down exit\n", ha->instance); 181 return (DDI_INTR_UNCLAIMED); 182 } 183 ha->busy++; 184 QL_PM_UNLOCK(ha); 185 186 /* Acquire interrupt lock. */ 187 INTR_LOCK(ha); 188 189 if (CFG_IST(ha, CFG_CTRL_2200)) { 190 while (RD16_IO_REG(ha, istatus) & RISC_INT) { 191 /* Reset idle timer. */ 192 ha->idle_timer = 0; 193 rval = DDI_INTR_CLAIMED; 194 if (intr_loop) { 195 intr_loop--; 196 } 197 198 /* Special Fast Post 2200. */ 199 stat = 0; 200 if (ha->task_daemon_flags & FIRMWARE_LOADED && 201 ha->flags & ONLINE) { 202 ql_srb_t *sp; 203 204 mbx = RD16_IO_REG(ha, mailbox_out[23]); 205 206 if ((mbx & 3) == MBX23_SCSI_COMPLETION) { 207 /* Release mailbox registers. */ 208 WRT16_IO_REG(ha, semaphore, 0); 209 210 if (intr_loop) { 211 WRT16_IO_REG(ha, hccr, 212 HC_CLR_RISC_INT); 213 } 214 215 /* Get handle. */ 216 mbx >>= 4; 217 stat = mbx & OSC_INDEX_MASK; 218 219 /* Validate handle. */ 220 sp = stat < MAX_OUTSTANDING_COMMANDS ? 221 ha->outstanding_cmds[stat] : NULL; 222 223 if (sp != NULL && (sp->handle & 0xfff) 224 == mbx) { 225 ha->outstanding_cmds[stat] = 226 NULL; 227 sp->handle = 0; 228 sp->flags &= 229 ~SRB_IN_TOKEN_ARRAY; 230 231 /* Set completed status. */ 232 sp->flags |= SRB_ISP_COMPLETED; 233 234 /* Set completion status */ 235 sp->pkt->pkt_reason = 236 CS_COMPLETE; 237 238 ql_fast_fcp_post(sp); 239 } else if (mbx != 240 (QL_FCA_BRAND & 0xfff)) { 241 if (sp == NULL) { 242 EL(ha, "unknown IOCB" 243 " handle=%xh\n", 244 mbx); 245 } else { 246 EL(ha, "mismatch IOCB" 247 " handle pkt=%xh, " 248 "sp=%xh\n", mbx, 249 sp->handle & 0xfff); 250 } 251 252 (void) ql_binary_fw_dump(ha, 253 FALSE); 254 255 if (!(ha->task_daemon_flags & 256 (ISP_ABORT_NEEDED | 257 ABORT_ISP_ACTIVE))) { 258 EL(ha, "ISP Invalid " 259 "handle, " 260 "isp_abort_needed" 261 "\n"); 262 set_flags |= 263 ISP_ABORT_NEEDED; 264 } 265 } 266 } 267 } 268 269 if (stat == 0) { 270 /* Check for mailbox interrupt. */ 271 mbx = RD16_IO_REG(ha, semaphore); 272 if (mbx & BIT_0) { 273 /* Release mailbox registers. */ 274 WRT16_IO_REG(ha, semaphore, 0); 275 276 /* Get mailbox data. */ 277 mbx = RD16_IO_REG(ha, mailbox_out[0]); 278 if (mbx > 0x3fff && mbx < 0x8000) { 279 ql_mbx_completion(ha, mbx, 280 &set_flags, &reset_flags, 281 intr_loop); 282 } else if (mbx > 0x7fff && 283 mbx < 0xc000) { 284 ql_async_event(ha, mbx, 285 &isr_done_q, &set_flags, 286 &reset_flags, intr_loop); 287 } else { 288 EL(ha, "UNKNOWN interrupt " 289 "type\n"); 290 intr = B_TRUE; 291 } 292 } else { 293 ha->isp_rsp_index = RD16_IO_REG(ha, 294 resp_in); 295 296 if (ha->isp_rsp_index != 297 ha->rsp_ring_index) { 298 ql_response_pkt(ha, 299 &isr_done_q, &set_flags, 300 &reset_flags, intr_loop); 301 } else if (++spurious_intr == 302 MAX_SPURIOUS_INTR) { 303 /* 304 * Process excessive 305 * spurious intrrupts 306 */ 307 ql_spurious_intr(ha, 308 intr_loop); 309 EL(ha, "excessive spurious " 310 "interrupts, " 311 "isp_abort_needed\n"); 312 set_flags |= ISP_ABORT_NEEDED; 313 } else { 314 intr = B_TRUE; 315 } 316 } 317 } 318 319 /* Clear RISC interrupt */ 320 if (intr || intr_loop == 0) { 321 intr = B_FALSE; 322 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 323 } 324 325 if (set_flags != 0 || reset_flags != 0) { 326 TASK_DAEMON_LOCK(ha); 327 ha->task_daemon_flags |= set_flags; 328 ha->task_daemon_flags &= ~reset_flags; 329 TASK_DAEMON_UNLOCK(ha); 330 set_flags = 0; 331 reset_flags = 0; 332 daemon = B_TRUE; 333 } 334 } 335 } else { 336 uint32_t ql_max_intr_loop_cnt = 0; 337 338 if (CFG_IST(ha, CFG_CTRL_8021)) { 339 ql_8021_clr_hw_intr(ha); 340 intr_loop = 1; 341 } 342 while (((stat = RD32_IO_REG(ha, risc2host)) & RH_RISC_INT) && 343 (++ql_max_intr_loop_cnt < ql_max_intr_loop)) { 344 345 clear_spurious = B_TRUE; /* assume ok */ 346 347 /* Capture FW defined interrupt info */ 348 mbx = MSW(stat); 349 350 /* Reset idle timer. */ 351 ha->idle_timer = 0; 352 rval = DDI_INTR_CLAIMED; 353 354 if (CFG_IST(ha, CFG_CTRL_8021) && 355 (RD32_IO_REG(ha, nx_risc_int) == 0 || 356 intr_loop == 0)) { 357 break; 358 } 359 360 if (intr_loop) { 361 intr_loop--; 362 } 363 364 switch (stat & 0x1ff) { 365 case ROM_MBX_SUCCESS: 366 case ROM_MBX_ERR: 367 ql_mbx_completion(ha, mbx, &set_flags, 368 &reset_flags, intr_loop); 369 370 /* Release mailbox registers. */ 371 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 372 WRT16_IO_REG(ha, semaphore, 0); 373 } 374 break; 375 376 case MBX_SUCCESS: 377 case MBX_ERR: 378 /* Sun FW, Release mailbox registers. */ 379 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 380 WRT16_IO_REG(ha, semaphore, 0); 381 } 382 ql_mbx_completion(ha, mbx, &set_flags, 383 &reset_flags, intr_loop); 384 break; 385 386 case ASYNC_EVENT: 387 /* Sun FW, Release mailbox registers. */ 388 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 389 WRT16_IO_REG(ha, semaphore, 0); 390 } 391 ql_async_event(ha, (uint32_t)mbx, &isr_done_q, 392 &set_flags, &reset_flags, intr_loop); 393 break; 394 395 case RESP_UPDATE: 396 if (mbx != ha->rsp_ring_index) { 397 ha->isp_rsp_index = mbx; 398 ql_response_pkt(ha, &isr_done_q, 399 &set_flags, &reset_flags, 400 intr_loop); 401 } else if (++spurious_intr == 402 ql_spurious_cnt) { 403 /* Process excessive spurious intr. */ 404 ql_spurious_intr(ha, intr_loop); 405 EL(ha, "excessive spurious " 406 "interrupts, isp_abort_needed\n"); 407 set_flags |= ISP_ABORT_NEEDED; 408 clear_spurious = B_FALSE; 409 } else { 410 QL_PRINT_10(CE_CONT, "(%d): response " 411 "ring index same as before\n", 412 ha->instance); 413 intr = B_TRUE; 414 clear_spurious = B_FALSE; 415 } 416 break; 417 418 case SCSI_FAST_POST_16: 419 stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT; 420 ql_async_event(ha, stat, &isr_done_q, 421 &set_flags, &reset_flags, intr_loop); 422 break; 423 424 case SCSI_FAST_POST_32: 425 stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT; 426 ql_async_event(ha, stat, &isr_done_q, 427 &set_flags, &reset_flags, intr_loop); 428 break; 429 430 case CTIO_FAST_POST: 431 stat = (stat & 0xffff0000) | 432 MBA_CTIO_COMPLETION; 433 ql_async_event(ha, stat, &isr_done_q, 434 &set_flags, &reset_flags, intr_loop); 435 break; 436 437 case IP_FAST_POST_XMT: 438 stat = (stat & 0xffff0000) | MBA_IP_COMPLETION; 439 ql_async_event(ha, stat, &isr_done_q, 440 &set_flags, &reset_flags, intr_loop); 441 break; 442 443 case IP_FAST_POST_RCV: 444 stat = (stat & 0xffff0000) | MBA_IP_RECEIVE; 445 ql_async_event(ha, stat, &isr_done_q, 446 &set_flags, &reset_flags, intr_loop); 447 break; 448 449 case IP_FAST_POST_BRD: 450 stat = (stat & 0xffff0000) | MBA_IP_BROADCAST; 451 ql_async_event(ha, stat, &isr_done_q, 452 &set_flags, &reset_flags, intr_loop); 453 break; 454 455 case IP_FAST_POST_RCV_ALN: 456 stat = (stat & 0xffff0000) | 457 MBA_IP_HDR_DATA_SPLIT; 458 ql_async_event(ha, stat, &isr_done_q, 459 &set_flags, &reset_flags, intr_loop); 460 break; 461 462 case ATIO_UPDATE: 463 EL(ha, "unsupported ATIO queue update" 464 " interrupt, status=%xh\n", stat); 465 intr = B_TRUE; 466 break; 467 468 case ATIO_RESP_UPDATE: 469 EL(ha, "unsupported ATIO response queue " 470 "update interrupt, status=%xh\n", stat); 471 intr = B_TRUE; 472 break; 473 474 default: 475 ql_handle_uncommon_risc_intr(ha, stat, 476 &set_flags); 477 intr = B_TRUE; 478 break; 479 } 480 481 /* Clear RISC interrupt */ 482 if (intr || intr_loop == 0) { 483 intr = B_FALSE; 484 if (CFG_IST(ha, CFG_CTRL_8021)) { 485 ql_8021_clr_fw_intr(ha); 486 } else if (CFG_IST(ha, CFG_CTRL_242581)) { 487 WRT32_IO_REG(ha, hccr, 488 HC24_CLR_RISC_INT); 489 } else { 490 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 491 } 492 } 493 494 if (set_flags != 0 || reset_flags != 0) { 495 TASK_DAEMON_LOCK(ha); 496 ha->task_daemon_flags |= set_flags; 497 ha->task_daemon_flags &= ~reset_flags; 498 TASK_DAEMON_UNLOCK(ha); 499 set_flags = 0; 500 reset_flags = 0; 501 daemon = B_TRUE; 502 } 503 504 if (ha->flags & PARITY_ERROR) { 505 EL(ha, "parity/pause exit\n"); 506 mbx = RD16_IO_REG(ha, hccr); /* PCI posting */ 507 break; 508 } 509 510 if (clear_spurious) { 511 spurious_intr = 0; 512 } 513 } 514 } 515 516 /* Process claimed interrupts during polls. */ 517 if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) { 518 ha->intr_claimed = B_FALSE; 519 rval = DDI_INTR_CLAIMED; 520 } 521 522 /* Release interrupt lock. */ 523 INTR_UNLOCK(ha); 524 525 if (daemon) { 526 ql_awaken_task_daemon(ha, NULL, 0, 0); 527 } 528 529 if (isr_done_q.first != NULL) { 530 ql_done(isr_done_q.first); 531 } 532 533 if (rval == DDI_INTR_CLAIMED) { 534 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 535 ha->xioctl->TotalInterrupts++; 536 } else { 537 /*EMPTY*/ 538 QL_PRINT_3(CE_CONT, "(%d): interrupt not claimed\n", 539 ha->instance); 540 } 541 542 QL_PM_LOCK(ha); 543 ha->busy--; 544 QL_PM_UNLOCK(ha); 545 546 return (rval); 547 } 548 549 /* 550 * ql_handle_uncommon_risc_intr 551 * Handle an uncommon RISC interrupt. 552 * 553 * Input: 554 * ha: adapter state pointer. 555 * stat: interrupt status 556 * 557 * Context: 558 * Interrupt or Kernel context, no mailbox commands allowed. 559 */ 560 static void 561 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, uint32_t stat, 562 uint32_t *set_flags) 563 { 564 uint16_t hccr_reg; 565 566 hccr_reg = RD16_IO_REG(ha, hccr); 567 568 if (stat & RH_RISC_PAUSED || 569 (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) { 570 571 ADAPTER_STATE_LOCK(ha); 572 ha->flags |= PARITY_ERROR; 573 ADAPTER_STATE_UNLOCK(ha); 574 575 if (ha->parity_pause_errors == 0 || 576 ha->parity_hccr_err != hccr_reg || 577 ha->parity_stat_err != stat) { 578 cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/" 579 "Pause Error - hccr=%xh, stat=%xh, count=%d", 580 ha->instance, hccr_reg, stat, 581 ha->parity_pause_errors); 582 ha->parity_hccr_err = hccr_reg; 583 ha->parity_stat_err = stat; 584 } 585 586 EL(ha, "parity/pause error, isp_abort_needed\n"); 587 588 if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) { 589 ql_reset_chip(ha); 590 } 591 592 if (ha->parity_pause_errors == 0) { 593 ha->log_parity_pause = B_TRUE; 594 } 595 596 if (ha->parity_pause_errors < 0xffffffff) { 597 ha->parity_pause_errors++; 598 } 599 600 *set_flags |= ISP_ABORT_NEEDED; 601 602 /* Disable ISP interrupts. */ 603 CFG_IST(ha, CFG_CTRL_8021) ? ql_8021_disable_intrs(ha) : 604 WRT16_IO_REG(ha, ictrl, 0); 605 ADAPTER_STATE_LOCK(ha); 606 ha->flags &= ~INTERRUPTS_ENABLED; 607 ADAPTER_STATE_UNLOCK(ha); 608 } else { 609 EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n", 610 stat, hccr_reg); 611 } 612 } 613 614 /* 615 * ql_spurious_intr 616 * Inform Solaris of spurious interrupts. 617 * 618 * Input: 619 * ha: adapter state pointer. 620 * intr_clr: early interrupt clear 621 * 622 * Context: 623 * Interrupt or Kernel context, no mailbox commands allowed. 624 */ 625 static void 626 ql_spurious_intr(ql_adapter_state_t *ha, int intr_clr) 627 { 628 ddi_devstate_t state; 629 630 EL(ha, "Spurious interrupt\n"); 631 632 /* Disable ISP interrupts. */ 633 WRT16_IO_REG(ha, ictrl, 0); 634 ADAPTER_STATE_LOCK(ha); 635 ha->flags &= ~INTERRUPTS_ENABLED; 636 ADAPTER_STATE_UNLOCK(ha); 637 638 /* Clear RISC interrupt */ 639 if (intr_clr) { 640 if (CFG_IST(ha, CFG_CTRL_8021)) { 641 ql_8021_clr_fw_intr(ha); 642 } else if (CFG_IST(ha, CFG_CTRL_242581)) { 643 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 644 } else { 645 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 646 } 647 } 648 649 state = ddi_get_devstate(ha->dip); 650 if (state == DDI_DEVSTATE_UP) { 651 /*EMPTY*/ 652 ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED, 653 DDI_DEVICE_FAULT, "spurious interrupts"); 654 } 655 } 656 657 /* 658 * ql_mbx_completion 659 * Processes mailbox completions. 660 * 661 * Input: 662 * ha: adapter state pointer. 663 * mb0: Mailbox 0 contents. 664 * set_flags: task daemon flags to set. 665 * reset_flags: task daemon flags to reset. 666 * intr_clr: early interrupt clear 667 * 668 * Context: 669 * Interrupt context. 670 */ 671 /* ARGSUSED */ 672 static void 673 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint32_t *set_flags, 674 uint32_t *reset_flags, int intr_clr) 675 { 676 uint32_t index; 677 uint16_t cnt; 678 679 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 680 681 /* Load return mailbox registers. */ 682 MBX_REGISTER_LOCK(ha); 683 684 if (ha->mcp != NULL) { 685 ha->mcp->mb[0] = mb0; 686 index = ha->mcp->in_mb & ~MBX_0; 687 688 for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) { 689 index >>= 1; 690 if (index & MBX_0) { 691 ha->mcp->mb[cnt] = RD16_IO_REG(ha, 692 mailbox_out[cnt]); 693 } 694 } 695 696 } else { 697 EL(ha, "mcp == NULL\n"); 698 } 699 700 if (intr_clr) { 701 /* Clear RISC interrupt. */ 702 if (CFG_IST(ha, CFG_CTRL_8021)) { 703 ql_8021_clr_fw_intr(ha); 704 } else if (CFG_IST(ha, CFG_CTRL_242581)) { 705 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 706 } else { 707 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 708 } 709 } 710 711 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT); 712 if (ha->flags & INTERRUPTS_ENABLED) { 713 cv_broadcast(&ha->cv_mbx_intr); 714 } 715 716 MBX_REGISTER_UNLOCK(ha); 717 718 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 719 } 720 721 /* 722 * ql_async_event 723 * Processes asynchronous events. 724 * 725 * Input: 726 * ha: adapter state pointer. 727 * mbx: Mailbox 0 register. 728 * done_q: head pointer to done queue. 729 * set_flags: task daemon flags to set. 730 * reset_flags: task daemon flags to reset. 731 * intr_clr: early interrupt clear 732 * 733 * Context: 734 * Interrupt or Kernel context, no mailbox commands allowed. 735 */ 736 static void 737 ql_async_event(ql_adapter_state_t *ha, uint32_t mbx, ql_head_t *done_q, 738 uint32_t *set_flags, uint32_t *reset_flags, int intr_clr) 739 { 740 uint32_t handle; 741 uint32_t index; 742 uint16_t cnt; 743 uint16_t mb[MAX_MBOX_COUNT]; 744 ql_srb_t *sp; 745 port_id_t s_id; 746 ql_tgt_t *tq; 747 boolean_t intr = B_TRUE; 748 ql_adapter_state_t *vha; 749 750 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 751 752 /* Setup to process fast completion. */ 753 mb[0] = LSW(mbx); 754 switch (mb[0]) { 755 case MBA_SCSI_COMPLETION: 756 handle = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox_out[1]), 757 RD16_IO_REG(ha, mailbox_out[2])); 758 break; 759 760 case MBA_CMPLT_1_16BIT: 761 handle = MSW(mbx); 762 mb[0] = MBA_SCSI_COMPLETION; 763 break; 764 765 case MBA_CMPLT_1_32BIT: 766 handle = SHORT_TO_LONG(MSW(mbx), 767 RD16_IO_REG(ha, mailbox_out[2])); 768 mb[0] = MBA_SCSI_COMPLETION; 769 break; 770 771 case MBA_CTIO_COMPLETION: 772 case MBA_IP_COMPLETION: 773 handle = CFG_IST(ha, CFG_CTRL_2200) ? SHORT_TO_LONG( 774 RD16_IO_REG(ha, mailbox_out[1]), 775 RD16_IO_REG(ha, mailbox_out[2])) : 776 SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox_out[2])); 777 mb[0] = MBA_SCSI_COMPLETION; 778 break; 779 780 default: 781 break; 782 } 783 784 /* Handle asynchronous event */ 785 switch (mb[0]) { 786 case MBA_SCSI_COMPLETION: 787 QL_PRINT_5(CE_CONT, "(%d): Fast post completion\n", 788 ha->instance); 789 790 if (intr_clr) { 791 /* Clear RISC interrupt */ 792 if (CFG_IST(ha, CFG_CTRL_8021)) { 793 ql_8021_clr_fw_intr(ha); 794 } else if (CFG_IST(ha, CFG_CTRL_242581)) { 795 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 796 } else { 797 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 798 } 799 intr = B_FALSE; 800 } 801 802 if ((ha->flags & ONLINE) == 0) { 803 break; 804 } 805 806 /* Get handle. */ 807 index = handle & OSC_INDEX_MASK; 808 809 /* Validate handle. */ 810 sp = index < MAX_OUTSTANDING_COMMANDS ? 811 ha->outstanding_cmds[index] : NULL; 812 813 if (sp != NULL && sp->handle == handle) { 814 ha->outstanding_cmds[index] = NULL; 815 sp->handle = 0; 816 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 817 818 /* Set completed status. */ 819 sp->flags |= SRB_ISP_COMPLETED; 820 821 /* Set completion status */ 822 sp->pkt->pkt_reason = CS_COMPLETE; 823 824 if (!(sp->flags & SRB_FCP_CMD_PKT)) { 825 /* Place block on done queue */ 826 ql_add_link_b(done_q, &sp->cmd); 827 } else { 828 ql_fast_fcp_post(sp); 829 } 830 } else if (handle != QL_FCA_BRAND) { 831 if (sp == NULL) { 832 EL(ha, "%xh unknown IOCB handle=%xh\n", 833 mb[0], handle); 834 } else { 835 EL(ha, "%xh mismatch IOCB handle pkt=%xh, " 836 "sp=%xh\n", mb[0], handle, sp->handle); 837 } 838 839 EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, mbx3=%xh," 840 "mbx6=%xh, mbx7=%xh\n", mb[0], 841 RD16_IO_REG(ha, mailbox_out[1]), 842 RD16_IO_REG(ha, mailbox_out[2]), 843 RD16_IO_REG(ha, mailbox_out[3]), 844 RD16_IO_REG(ha, mailbox_out[6]), 845 RD16_IO_REG(ha, mailbox_out[7])); 846 847 (void) ql_binary_fw_dump(ha, FALSE); 848 849 if (!(ha->task_daemon_flags & 850 (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) { 851 EL(ha, "%xh ISP Invalid handle, " 852 "isp_abort_needed\n", mb[0]); 853 *set_flags |= ISP_ABORT_NEEDED; 854 } 855 } 856 break; 857 858 case MBA_RESET: /* Reset */ 859 EL(ha, "%xh Reset received\n", mb[0]); 860 *set_flags |= RESET_MARKER_NEEDED; 861 break; 862 863 case MBA_SYSTEM_ERR: /* System Error */ 864 mb[1] = RD16_IO_REG(ha, mailbox_out[1]); 865 mb[2] = RD16_IO_REG(ha, mailbox_out[2]); 866 mb[3] = RD16_IO_REG(ha, mailbox_out[3]); 867 mb[7] = RD16_IO_REG(ha, mailbox_out[7]); 868 869 EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, " 870 "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n " 871 "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, " 872 "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3], 873 RD16_IO_REG(ha, mailbox_out[4]), 874 RD16_IO_REG(ha, mailbox_out[5]), 875 RD16_IO_REG(ha, mailbox_out[6]), mb[7], 876 RD16_IO_REG(ha, mailbox_out[8]), 877 RD16_IO_REG(ha, mailbox_out[9]), 878 RD16_IO_REG(ha, mailbox_out[10]), 879 RD16_IO_REG(ha, mailbox_out[11]), 880 RD16_IO_REG(ha, mailbox_out[12])); 881 882 EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, " 883 "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n" 884 "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n", 885 mb[0], RD16_IO_REG(ha, mailbox_out[13]), 886 RD16_IO_REG(ha, mailbox_out[14]), 887 RD16_IO_REG(ha, mailbox_out[15]), 888 RD16_IO_REG(ha, mailbox_out[16]), 889 RD16_IO_REG(ha, mailbox_out[17]), 890 RD16_IO_REG(ha, mailbox_out[18]), 891 RD16_IO_REG(ha, mailbox_out[19]), 892 RD16_IO_REG(ha, mailbox_out[20]), 893 RD16_IO_REG(ha, mailbox_out[21]), 894 RD16_IO_REG(ha, mailbox_out[22]), 895 RD16_IO_REG(ha, mailbox_out[23])); 896 897 if (ha->reg_off->mbox_cnt > 24) { 898 EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, " 899 "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, " 900 "mbx30=%xh, mbx31=%xh\n", mb[0], 901 RD16_IO_REG(ha, mailbox_out[24]), 902 RD16_IO_REG(ha, mailbox_out[25]), 903 RD16_IO_REG(ha, mailbox_out[26]), 904 RD16_IO_REG(ha, mailbox_out[27]), 905 RD16_IO_REG(ha, mailbox_out[28]), 906 RD16_IO_REG(ha, mailbox_out[29]), 907 RD16_IO_REG(ha, mailbox_out[30]), 908 RD16_IO_REG(ha, mailbox_out[31])); 909 } 910 911 (void) ql_binary_fw_dump(ha, FALSE); 912 913 (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8002, mb[1], 914 mb[2], mb[3]); 915 916 if (CFG_IST(ha, CFG_CTRL_81XX) && mb[7] & SE_MPI_RISC) { 917 ADAPTER_STATE_LOCK(ha); 918 ha->flags |= MPI_RESET_NEEDED; 919 ADAPTER_STATE_UNLOCK(ha); 920 } 921 922 *set_flags |= ISP_ABORT_NEEDED; 923 ha->xioctl->ControllerErrorCount++; 924 break; 925 926 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 927 EL(ha, "%xh Request Transfer Error received, " 928 "isp_abort_needed\n", mb[0]); 929 930 (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8003, 931 RD16_IO_REG(ha, mailbox_out[1]), 932 RD16_IO_REG(ha, mailbox_out[2]), 933 RD16_IO_REG(ha, mailbox_out[3])); 934 935 *set_flags |= ISP_ABORT_NEEDED; 936 ha->xioctl->ControllerErrorCount++; 937 break; 938 939 case MBA_RSP_TRANSFER_ERR: /* Response Xfer Err */ 940 EL(ha, "%xh Response Transfer Error received," 941 " isp_abort_needed\n", mb[0]); 942 943 (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8004, 944 RD16_IO_REG(ha, mailbox_out[1]), 945 RD16_IO_REG(ha, mailbox_out[2]), 946 RD16_IO_REG(ha, mailbox_out[3])); 947 948 *set_flags |= ISP_ABORT_NEEDED; 949 ha->xioctl->ControllerErrorCount++; 950 break; 951 952 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 953 EL(ha, "%xh Request Queue Wake-up received\n", 954 mb[0]); 955 break; 956 957 case MBA_MENLO_ALERT: /* Menlo Alert Notification */ 958 mb[1] = RD16_IO_REG(ha, mailbox_out[1]); 959 mb[2] = RD16_IO_REG(ha, mailbox_out[2]); 960 mb[3] = RD16_IO_REG(ha, mailbox_out[3]); 961 962 EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh," 963 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]); 964 965 switch (mb[1]) { 966 case MLA_LOGIN_OPERATIONAL_FW: 967 ADAPTER_STATE_LOCK(ha); 968 ha->flags |= MENLO_LOGIN_OPERATIONAL; 969 ADAPTER_STATE_UNLOCK(ha); 970 break; 971 case MLA_PANIC_RECOVERY: 972 case MLA_LOGIN_DIAGNOSTIC_FW: 973 case MLA_LOGIN_GOLDEN_FW: 974 case MLA_REJECT_RESPONSE: 975 default: 976 break; 977 } 978 break; 979 980 case MBA_LIP_F8: /* Received a LIP F8. */ 981 case MBA_LIP_RESET: /* LIP reset occurred. */ 982 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 983 if (CFG_IST(ha, CFG_CTRL_8081)) { 984 EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh" 985 "\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]), 986 RD16_IO_REG(ha, mailbox_out[2])); 987 } else { 988 EL(ha, "%xh LIP received\n", mb[0]); 989 } 990 991 ADAPTER_STATE_LOCK(ha); 992 ha->flags &= ~POINT_TO_POINT; 993 ADAPTER_STATE_UNLOCK(ha); 994 995 if (!(ha->task_daemon_flags & LOOP_DOWN)) { 996 *set_flags |= LOOP_DOWN; 997 } 998 ql_port_state(ha, FC_STATE_OFFLINE, 999 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN); 1000 1001 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) { 1002 ha->loop_down_timer = LOOP_DOWN_TIMER_START; 1003 } 1004 1005 ha->adapter_stats->lip_count++; 1006 1007 /* Update AEN queue. */ 1008 ha->xioctl->TotalLipResets++; 1009 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 1010 ql_enqueue_aen(ha, mb[0], NULL); 1011 } 1012 break; 1013 1014 case MBA_LOOP_UP: 1015 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 | 1016 CFG_CTRL_24258081))) { 1017 ha->iidma_rate = RD16_IO_REG(ha, mailbox_out[1]); 1018 if (ha->iidma_rate == IIDMA_RATE_1GB) { 1019 ha->state = FC_PORT_STATE_MASK( 1020 ha->state) | FC_STATE_1GBIT_SPEED; 1021 index = 1; 1022 } else if (ha->iidma_rate == IIDMA_RATE_2GB) { 1023 ha->state = FC_PORT_STATE_MASK( 1024 ha->state) | FC_STATE_2GBIT_SPEED; 1025 index = 2; 1026 } else if (ha->iidma_rate == IIDMA_RATE_4GB) { 1027 ha->state = FC_PORT_STATE_MASK( 1028 ha->state) | FC_STATE_4GBIT_SPEED; 1029 index = 4; 1030 } else if (ha->iidma_rate == IIDMA_RATE_8GB) { 1031 ha->state = FC_PORT_STATE_MASK( 1032 ha->state) | FC_STATE_8GBIT_SPEED; 1033 index = 8; 1034 } else if (ha->iidma_rate == IIDMA_RATE_10GB) { 1035 ha->state = FC_PORT_STATE_MASK( 1036 ha->state) | FC_STATE_10GBIT_SPEED; 1037 index = 10; 1038 } else { 1039 ha->state = FC_PORT_STATE_MASK( 1040 ha->state); 1041 index = 0; 1042 } 1043 } else { 1044 ha->iidma_rate = IIDMA_RATE_1GB; 1045 ha->state = FC_PORT_STATE_MASK(ha->state) | 1046 FC_STATE_FULL_SPEED; 1047 index = 1; 1048 } 1049 1050 for (vha = ha; vha != NULL; vha = vha->vp_next) { 1051 vha->state = FC_PORT_STATE_MASK(vha->state) | 1052 FC_PORT_SPEED_MASK(ha->state); 1053 } 1054 EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]); 1055 1056 /* Update AEN queue. */ 1057 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 1058 ql_enqueue_aen(ha, mb[0], NULL); 1059 } 1060 break; 1061 1062 case MBA_LOOP_DOWN: 1063 EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, mbx3=%xh, " 1064 "mbx4=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]), 1065 RD16_IO_REG(ha, mailbox_out[2]), 1066 RD16_IO_REG(ha, mailbox_out[3]), 1067 RD16_IO_REG(ha, mailbox_out[4])); 1068 1069 if (!(ha->task_daemon_flags & LOOP_DOWN)) { 1070 *set_flags |= LOOP_DOWN; 1071 } 1072 ql_port_state(ha, FC_STATE_OFFLINE, 1073 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN); 1074 1075 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) { 1076 ha->loop_down_timer = LOOP_DOWN_TIMER_START; 1077 } 1078 1079 if (CFG_IST(ha, CFG_CTRL_258081)) { 1080 ha->sfp_stat = RD16_IO_REG(ha, mailbox_out[2]); 1081 } 1082 1083 /* Update AEN queue. */ 1084 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 1085 ql_enqueue_aen(ha, mb[0], NULL); 1086 } 1087 break; 1088 1089 case MBA_PORT_UPDATE: 1090 mb[1] = RD16_IO_REG(ha, mailbox_out[1]); 1091 mb[2] = RD16_IO_REG(ha, mailbox_out[2]); 1092 mb[3] = (uint16_t)(ha->flags & VP_ENABLED ? 1093 RD16_IO_REG(ha, mailbox_out[3]) : 0); 1094 1095 /* Locate port state structure. */ 1096 for (vha = ha; vha != NULL; vha = vha->vp_next) { 1097 if (vha->vp_index == LSB(mb[3])) { 1098 break; 1099 } 1100 } 1101 if (vha == NULL) { 1102 break; 1103 } 1104 1105 if (CFG_IST(ha, CFG_CTRL_8081) && mb[1] == 0xffff && 1106 mb[2] == 7 && (MSB(mb[3]) == 0xe || MSB(mb[3]) == 0x1a || 1107 MSB(mb[3]) == 0x1c || MSB(mb[3]) == 0x1d || 1108 MSB(mb[3]) == 0x1e)) { 1109 /* 1110 * received FLOGI reject 1111 * received FLOGO 1112 * FCF configuration changed 1113 * FIP Clear Virtual Link received 1114 * FKA timeout 1115 */ 1116 if (!(ha->task_daemon_flags & LOOP_DOWN)) { 1117 *set_flags |= LOOP_DOWN; 1118 } 1119 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE | 1120 COMMAND_WAIT_NEEDED | LOOP_DOWN); 1121 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) { 1122 ha->loop_down_timer = LOOP_DOWN_TIMER_START; 1123 } 1124 /* 1125 * In N port 2 N port topology the FW provides a port 1126 * database entry at loop_id 0x7fe which we use to 1127 * acquire the Ports WWPN. 1128 */ 1129 } else if ((mb[1] != 0x7fe) && 1130 ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE || 1131 (CFG_IST(ha, CFG_CTRL_24258081) && 1132 (mb[1] != 0xffff || mb[2] != 6 || mb[3] != 0))))) { 1133 EL(ha, "%xh Port Database Update, Login/Logout " 1134 "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n", 1135 mb[0], mb[1], mb[2], mb[3]); 1136 } else { 1137 EL(ha, "%xh Port Database Update received, mbx1=%xh," 1138 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], 1139 mb[3]); 1140 *set_flags |= LOOP_RESYNC_NEEDED; 1141 *set_flags &= ~LOOP_DOWN; 1142 *reset_flags |= LOOP_DOWN; 1143 *reset_flags &= ~LOOP_RESYNC_NEEDED; 1144 vha->loop_down_timer = LOOP_DOWN_TIMER_OFF; 1145 TASK_DAEMON_LOCK(ha); 1146 vha->task_daemon_flags |= LOOP_RESYNC_NEEDED; 1147 vha->task_daemon_flags &= ~LOOP_DOWN; 1148 TASK_DAEMON_UNLOCK(ha); 1149 ADAPTER_STATE_LOCK(ha); 1150 vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO; 1151 ADAPTER_STATE_UNLOCK(ha); 1152 } 1153 1154 /* Update AEN queue. */ 1155 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 1156 ql_enqueue_aen(ha, mb[0], NULL); 1157 } 1158 break; 1159 1160 case MBA_RSCN_UPDATE: 1161 mb[1] = RD16_IO_REG(ha, mailbox_out[1]); 1162 mb[2] = RD16_IO_REG(ha, mailbox_out[2]); 1163 mb[3] = (uint16_t)(ha->flags & VP_ENABLED ? 1164 RD16_IO_REG(ha, mailbox_out[3]) : 0); 1165 1166 /* Locate port state structure. */ 1167 for (vha = ha; vha != NULL; vha = vha->vp_next) { 1168 if (vha->vp_index == LSB(mb[3])) { 1169 break; 1170 } 1171 } 1172 1173 if (vha == NULL) { 1174 break; 1175 } 1176 1177 if (LSB(mb[1]) == vha->d_id.b.domain && 1178 MSB(mb[2]) == vha->d_id.b.area && 1179 LSB(mb[2]) == vha->d_id.b.al_pa) { 1180 EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, " 1181 "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]); 1182 } else { 1183 EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, " 1184 "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]); 1185 if (FC_PORT_STATE_MASK(vha->state) != 1186 FC_STATE_OFFLINE) { 1187 ql_rcv_rscn_els(vha, &mb[0], done_q); 1188 TASK_DAEMON_LOCK(ha); 1189 vha->task_daemon_flags |= RSCN_UPDATE_NEEDED; 1190 TASK_DAEMON_UNLOCK(ha); 1191 *set_flags |= RSCN_UPDATE_NEEDED; 1192 } 1193 } 1194 1195 /* Update AEN queue. */ 1196 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 1197 ql_enqueue_aen(ha, mb[0], NULL); 1198 } 1199 break; 1200 1201 case MBA_LIP_ERROR: /* Loop initialization errors. */ 1202 EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0], 1203 RD16_IO_REG(ha, mailbox_out[1])); 1204 break; 1205 1206 case MBA_IP_RECEIVE: 1207 case MBA_IP_BROADCAST: 1208 mb[1] = RD16_IO_REG(ha, mailbox_out[1]); 1209 mb[2] = RD16_IO_REG(ha, mailbox_out[2]); 1210 mb[3] = RD16_IO_REG(ha, mailbox_out[3]); 1211 1212 EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, " 1213 "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]); 1214 1215 /* Locate device queue. */ 1216 s_id.b.al_pa = LSB(mb[2]); 1217 s_id.b.area = MSB(mb[2]); 1218 s_id.b.domain = LSB(mb[1]); 1219 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) { 1220 EL(ha, "Unknown IP device=%xh\n", s_id.b24); 1221 break; 1222 } 1223 1224 cnt = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ? 1225 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0], 1226 ha->ip_init_ctrl_blk.cb24.buf_size[1]) : 1227 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0], 1228 ha->ip_init_ctrl_blk.cb.buf_size[1])); 1229 1230 tq->ub_sequence_length = mb[3]; 1231 tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt); 1232 if (mb[3] % cnt) { 1233 tq->ub_total_seg_cnt++; 1234 } 1235 cnt = (uint16_t)(tq->ub_total_seg_cnt + 10); 1236 1237 for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt; 1238 index++) { 1239 mb[index] = RD16_IO_REG(ha, mailbox_out[index]); 1240 } 1241 1242 tq->ub_seq_id = ++ha->ub_seq_id; 1243 tq->ub_seq_cnt = 0; 1244 tq->ub_frame_ro = 0; 1245 tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ? 1246 (CFG_IST(ha, CFG_CTRL_24258081) ? BROADCAST_24XX_HDL : 1247 IP_BROADCAST_LOOP_ID) : tq->loop_id); 1248 ha->rcv_dev_q = tq; 1249 1250 for (cnt = 10; cnt < ha->reg_off->mbox_cnt && 1251 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) { 1252 if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) != 1253 QL_SUCCESS) { 1254 EL(ha, "ql_ub_frame_hdr failed, " 1255 "isp_abort_needed\n"); 1256 *set_flags |= ISP_ABORT_NEEDED; 1257 break; 1258 } 1259 } 1260 break; 1261 1262 case MBA_IP_LOW_WATER_MARK: 1263 case MBA_IP_RCV_BUFFER_EMPTY: 1264 EL(ha, "%xh IP low water mark / RCV buffer empty received\n", 1265 mb[0]); 1266 *set_flags |= NEED_UNSOLICITED_BUFFERS; 1267 break; 1268 1269 case MBA_IP_HDR_DATA_SPLIT: 1270 EL(ha, "%xh IP HDR data split received\n", mb[0]); 1271 break; 1272 1273 case MBA_ERROR_LOGGING_DISABLED: 1274 EL(ha, "%xh error logging disabled received, " 1275 "mbx1=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1])); 1276 break; 1277 1278 case MBA_POINT_TO_POINT: 1279 /* case MBA_DCBX_COMPLETED: */ 1280 if (CFG_IST(ha, CFG_CTRL_8081)) { 1281 EL(ha, "%xh DCBX completed received\n", mb[0]); 1282 } else { 1283 EL(ha, "%xh Point to Point Mode received\n", mb[0]); 1284 } 1285 ADAPTER_STATE_LOCK(ha); 1286 ha->flags |= POINT_TO_POINT; 1287 ADAPTER_STATE_UNLOCK(ha); 1288 break; 1289 1290 case MBA_FCF_CONFIG_ERROR: 1291 EL(ha, "%xh FCF configuration Error received, mbx1=%xh\n", 1292 mb[0], RD16_IO_REG(ha, mailbox_out[1])); 1293 break; 1294 1295 case MBA_DCBX_PARAM_CHANGED: 1296 EL(ha, "%xh DCBX parameters changed received, mbx1=%xh\n", 1297 mb[0], RD16_IO_REG(ha, mailbox_out[1])); 1298 break; 1299 1300 case MBA_CHG_IN_CONNECTION: 1301 mb[1] = RD16_IO_REG(ha, mailbox_out[1]); 1302 if (mb[1] == 2) { 1303 EL(ha, "%xh Change In Connection received, " 1304 "mbx1=%xh\n", mb[0], mb[1]); 1305 ADAPTER_STATE_LOCK(ha); 1306 ha->flags &= ~POINT_TO_POINT; 1307 ADAPTER_STATE_UNLOCK(ha); 1308 if (ha->topology & QL_N_PORT) { 1309 ha->topology = (uint8_t)(ha->topology & 1310 ~QL_N_PORT); 1311 ha->topology = (uint8_t)(ha->topology | 1312 QL_NL_PORT); 1313 } 1314 } else { 1315 EL(ha, "%xh Change In Connection received, " 1316 "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]); 1317 *set_flags |= ISP_ABORT_NEEDED; 1318 } 1319 break; 1320 1321 case MBA_ZIO_UPDATE: 1322 EL(ha, "%xh ZIO response received\n", mb[0]); 1323 1324 ha->isp_rsp_index = RD16_IO_REG(ha, resp_in); 1325 ql_response_pkt(ha, done_q, set_flags, reset_flags, intr_clr); 1326 intr = B_FALSE; 1327 break; 1328 1329 case MBA_PORT_BYPASS_CHANGED: 1330 EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n", 1331 mb[0], RD16_IO_REG(ha, mailbox_out[1])); 1332 /* 1333 * Event generated when there is a transition on 1334 * port bypass of crystal+. 1335 * Mailbox 1: Bit 0 - External. 1336 * Bit 2 - Internal. 1337 * When the bit is 0, the port is bypassed. 1338 * 1339 * For now we will generate a LIP for all cases. 1340 */ 1341 *set_flags |= HANDLE_PORT_BYPASS_CHANGE; 1342 break; 1343 1344 case MBA_RECEIVE_ERROR: 1345 EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n", 1346 mb[0], RD16_IO_REG(ha, mailbox_out[1]), 1347 RD16_IO_REG(ha, mailbox_out[2])); 1348 break; 1349 1350 case MBA_LS_RJT_SENT: 1351 EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0], 1352 RD16_IO_REG(ha, mailbox_out[1])); 1353 break; 1354 1355 case MBA_FW_RESTART_COMP: 1356 EL(ha, "%xh firmware restart complete received mb1=%xh\n", 1357 mb[0], RD16_IO_REG(ha, mailbox_out[1])); 1358 break; 1359 1360 /* 1361 * MBA_IDC_COMPLETE & MBA_IDC_NOTIFICATION: We won't get another 1362 * IDC async event until we ACK the current one. 1363 */ 1364 case MBA_IDC_COMPLETE: 1365 ha->idc_mb[0] = mb[0]; 1366 ha->idc_mb[1] = RD16_IO_REG(ha, mailbox_out[1]); 1367 ha->idc_mb[2] = RD16_IO_REG(ha, mailbox_out[2]); 1368 ha->idc_mb[3] = RD16_IO_REG(ha, mailbox_out[3]); 1369 ha->idc_mb[4] = RD16_IO_REG(ha, mailbox_out[4]); 1370 ha->idc_mb[5] = RD16_IO_REG(ha, mailbox_out[5]); 1371 ha->idc_mb[6] = RD16_IO_REG(ha, mailbox_out[6]); 1372 ha->idc_mb[7] = RD16_IO_REG(ha, mailbox_out[7]); 1373 EL(ha, "%xh Inter-driver communication complete received, " 1374 " mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh," 1375 " mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1], 1376 ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5], 1377 ha->idc_mb[6], ha->idc_mb[7]); 1378 *set_flags |= IDC_EVENT; 1379 break; 1380 1381 case MBA_IDC_NOTIFICATION: 1382 ha->idc_mb[0] = mb[0]; 1383 ha->idc_mb[1] = RD16_IO_REG(ha, mailbox_out[1]); 1384 ha->idc_mb[2] = RD16_IO_REG(ha, mailbox_out[2]); 1385 ha->idc_mb[3] = RD16_IO_REG(ha, mailbox_out[3]); 1386 ha->idc_mb[4] = RD16_IO_REG(ha, mailbox_out[4]); 1387 ha->idc_mb[5] = RD16_IO_REG(ha, mailbox_out[5]); 1388 ha->idc_mb[6] = RD16_IO_REG(ha, mailbox_out[6]); 1389 ha->idc_mb[7] = RD16_IO_REG(ha, mailbox_out[7]); 1390 EL(ha, "%xh Inter-driver communication request notification " 1391 "received, mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, " 1392 "mbx5=%xh, mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1], 1393 ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5], 1394 ha->idc_mb[6], ha->idc_mb[7]); 1395 *set_flags |= IDC_EVENT; 1396 break; 1397 1398 case MBA_IDC_TIME_EXTENDED: 1399 EL(ha, "%xh Inter-driver communication time extended received," 1400 " mbx1=%xh, mbx2=%xh\n", mb[0], 1401 RD16_IO_REG(ha, mailbox_out[1]), 1402 RD16_IO_REG(ha, mailbox_out[2])); 1403 break; 1404 1405 default: 1406 EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, " 1407 "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]), 1408 RD16_IO_REG(ha, mailbox_out[2]), 1409 RD16_IO_REG(ha, mailbox_out[3])); 1410 break; 1411 } 1412 1413 /* Clear RISC interrupt */ 1414 if (intr && intr_clr) { 1415 if (CFG_IST(ha, CFG_CTRL_8021)) { 1416 ql_8021_clr_fw_intr(ha); 1417 } else if (CFG_IST(ha, CFG_CTRL_242581)) { 1418 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 1419 } else { 1420 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 1421 } 1422 } 1423 1424 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1425 } 1426 1427 /* 1428 * ql_fast_fcp_post 1429 * Fast path for good SCSI I/O completion. 1430 * 1431 * Input: 1432 * sp: SRB pointer. 1433 * 1434 * Context: 1435 * Interrupt or Kernel context, no mailbox commands allowed. 1436 */ 1437 static void 1438 ql_fast_fcp_post(ql_srb_t *sp) 1439 { 1440 ql_adapter_state_t *ha = sp->ha; 1441 ql_lun_t *lq = sp->lun_queue; 1442 ql_tgt_t *tq = lq->target_queue; 1443 1444 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1445 1446 /* Acquire device queue lock. */ 1447 DEVICE_QUEUE_LOCK(tq); 1448 1449 /* Decrement outstanding commands on device. */ 1450 if (tq->outcnt != 0) { 1451 tq->outcnt--; 1452 } 1453 1454 if (sp->flags & SRB_FCP_CMD_PKT) { 1455 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) { 1456 /* 1457 * Clear the flag for this LUN so that 1458 * untagged commands can be submitted 1459 * for it. 1460 */ 1461 lq->flags &= ~LQF_UNTAGGED_PENDING; 1462 } 1463 1464 if (lq->lun_outcnt != 0) { 1465 lq->lun_outcnt--; 1466 } 1467 } 1468 1469 /* Reset port down retry count on good completion. */ 1470 tq->port_down_retry_count = ha->port_down_retry_count; 1471 tq->qfull_retry_count = ha->qfull_retry_count; 1472 ha->pha->timeout_cnt = 0; 1473 1474 /* Remove command from watchdog queue. */ 1475 if (sp->flags & SRB_WATCHDOG_ENABLED) { 1476 ql_remove_link(&tq->wdg, &sp->wdg); 1477 sp->flags &= ~SRB_WATCHDOG_ENABLED; 1478 } 1479 1480 if (lq->cmd.first != NULL) { 1481 ql_next(ha, lq); 1482 } else { 1483 /* Release LU queue specific lock. */ 1484 DEVICE_QUEUE_UNLOCK(tq); 1485 if (ha->pha->pending_cmds.first != NULL) { 1486 ql_start_iocb(ha, NULL); 1487 } 1488 } 1489 1490 /* Sync buffers if required. */ 1491 if (sp->flags & SRB_MS_PKT) { 1492 (void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0, 1493 DDI_DMA_SYNC_FORCPU); 1494 } 1495 1496 /* Map ISP completion codes. */ 1497 sp->pkt->pkt_expln = FC_EXPLN_NONE; 1498 sp->pkt->pkt_action = FC_ACTION_RETRYABLE; 1499 sp->pkt->pkt_state = FC_PKT_SUCCESS; 1500 1501 /* Now call the pkt completion callback */ 1502 if (sp->flags & SRB_POLL) { 1503 sp->flags &= ~SRB_POLL; 1504 } else if (sp->pkt->pkt_comp) { 1505 INTR_UNLOCK(ha); 1506 (*sp->pkt->pkt_comp)(sp->pkt); 1507 INTR_LOCK(ha); 1508 } 1509 1510 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1511 } 1512 1513 /* 1514 * ql_response_pkt 1515 * Processes response entry. 1516 * 1517 * Input: 1518 * ha: adapter state pointer. 1519 * done_q: head pointer to done queue. 1520 * set_flags: task daemon flags to set. 1521 * reset_flags: task daemon flags to reset. 1522 * intr_clr: early interrupt clear 1523 * 1524 * Context: 1525 * Interrupt or Kernel context, no mailbox commands allowed. 1526 */ 1527 static void 1528 ql_response_pkt(ql_adapter_state_t *ha, ql_head_t *done_q, uint32_t *set_flags, 1529 uint32_t *reset_flags, int intr_clr) 1530 { 1531 response_t *pkt; 1532 uint32_t dma_sync_size_1 = 0; 1533 uint32_t dma_sync_size_2 = 0; 1534 int status = 0; 1535 1536 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1537 1538 /* Clear RISC interrupt */ 1539 if (intr_clr) { 1540 if (CFG_IST(ha, CFG_CTRL_8021)) { 1541 ql_8021_clr_fw_intr(ha); 1542 } else if (CFG_IST(ha, CFG_CTRL_242581)) { 1543 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 1544 } else { 1545 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 1546 } 1547 } 1548 1549 if (ha->isp_rsp_index >= RESPONSE_ENTRY_CNT) { 1550 EL(ha, "index error = %xh, isp_abort_needed", 1551 ha->isp_rsp_index); 1552 *set_flags |= ISP_ABORT_NEEDED; 1553 return; 1554 } 1555 1556 if ((ha->flags & ONLINE) == 0) { 1557 QL_PRINT_3(CE_CONT, "(%d): not onlne, done\n", ha->instance); 1558 return; 1559 } 1560 1561 /* Calculate size of response queue entries to sync. */ 1562 if (ha->isp_rsp_index > ha->rsp_ring_index) { 1563 dma_sync_size_1 = (uint32_t) 1564 ((uint32_t)(ha->isp_rsp_index - ha->rsp_ring_index) * 1565 RESPONSE_ENTRY_SIZE); 1566 } else if (ha->isp_rsp_index == 0) { 1567 dma_sync_size_1 = (uint32_t) 1568 ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) * 1569 RESPONSE_ENTRY_SIZE); 1570 } else { 1571 /* Responses wrap around the Q */ 1572 dma_sync_size_1 = (uint32_t) 1573 ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) * 1574 RESPONSE_ENTRY_SIZE); 1575 dma_sync_size_2 = (uint32_t) 1576 (ha->isp_rsp_index * RESPONSE_ENTRY_SIZE); 1577 } 1578 1579 /* Sync DMA buffer. */ 1580 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 1581 (off_t)(ha->rsp_ring_index * RESPONSE_ENTRY_SIZE + 1582 RESPONSE_Q_BUFFER_OFFSET), dma_sync_size_1, 1583 DDI_DMA_SYNC_FORKERNEL); 1584 if (dma_sync_size_2) { 1585 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 1586 RESPONSE_Q_BUFFER_OFFSET, dma_sync_size_2, 1587 DDI_DMA_SYNC_FORKERNEL); 1588 } 1589 1590 while (ha->rsp_ring_index != ha->isp_rsp_index) { 1591 pkt = ha->response_ring_ptr; 1592 1593 QL_PRINT_5(CE_CONT, "(%d): ha->rsp_rg_idx=%xh, mbx[5]=%xh\n", 1594 ha->instance, ha->rsp_ring_index, ha->isp_rsp_index); 1595 QL_DUMP_5((uint8_t *)ha->response_ring_ptr, 8, 1596 RESPONSE_ENTRY_SIZE); 1597 1598 /* Adjust ring index. */ 1599 ha->rsp_ring_index++; 1600 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) { 1601 ha->rsp_ring_index = 0; 1602 ha->response_ring_ptr = ha->response_ring_bp; 1603 } else { 1604 ha->response_ring_ptr++; 1605 } 1606 1607 /* Process packet. */ 1608 if (ha->status_srb != NULL && pkt->entry_type != 1609 STATUS_CONT_TYPE) { 1610 ql_add_link_b(done_q, &ha->status_srb->cmd); 1611 ha->status_srb = NULL; 1612 } 1613 1614 pkt->entry_status = (uint8_t)(CFG_IST(ha, CFG_CTRL_24258081) ? 1615 pkt->entry_status & 0x3c : pkt->entry_status & 0x7e); 1616 1617 if (pkt->entry_status != 0) { 1618 ql_error_entry(ha, pkt, done_q, set_flags, 1619 reset_flags); 1620 } else { 1621 switch (pkt->entry_type) { 1622 case STATUS_TYPE: 1623 status |= CFG_IST(ha, CFG_CTRL_24258081) ? 1624 ql_24xx_status_entry(ha, 1625 (sts_24xx_entry_t *)pkt, done_q, set_flags, 1626 reset_flags) : 1627 ql_status_entry(ha, (sts_entry_t *)pkt, 1628 done_q, set_flags, reset_flags); 1629 break; 1630 case STATUS_CONT_TYPE: 1631 ql_status_cont_entry(ha, 1632 (sts_cont_entry_t *)pkt, done_q, set_flags, 1633 reset_flags); 1634 break; 1635 case IP_TYPE: 1636 case IP_A64_TYPE: 1637 case IP_CMD_TYPE: 1638 ql_ip_entry(ha, (ip_entry_t *)pkt, done_q, 1639 set_flags, reset_flags); 1640 break; 1641 case IP_RECEIVE_TYPE: 1642 ql_ip_rcv_entry(ha, 1643 (ip_rcv_entry_t *)pkt, done_q, set_flags, 1644 reset_flags); 1645 break; 1646 case IP_RECEIVE_CONT_TYPE: 1647 ql_ip_rcv_cont_entry(ha, 1648 (ip_rcv_cont_entry_t *)pkt, done_q, 1649 set_flags, reset_flags); 1650 break; 1651 case IP_24XX_RECEIVE_TYPE: 1652 ql_ip_24xx_rcv_entry(ha, 1653 (ip_rcv_24xx_entry_t *)pkt, done_q, 1654 set_flags, reset_flags); 1655 break; 1656 case MS_TYPE: 1657 ql_ms_entry(ha, (ms_entry_t *)pkt, done_q, 1658 set_flags, reset_flags); 1659 break; 1660 case REPORT_ID_TYPE: 1661 ql_report_id_entry(ha, (report_id_1_t *)pkt, 1662 done_q, set_flags, reset_flags); 1663 break; 1664 case ELS_PASSTHRU_TYPE: 1665 ql_els_passthru_entry(ha, 1666 (els_passthru_entry_rsp_t *)pkt, 1667 done_q, set_flags, reset_flags); 1668 break; 1669 case IP_BUF_POOL_TYPE: 1670 case MARKER_TYPE: 1671 case VP_MODIFY_TYPE: 1672 case VP_CONTROL_TYPE: 1673 break; 1674 default: 1675 EL(ha, "Unknown IOCB entry type=%xh\n", 1676 pkt->entry_type); 1677 break; 1678 } 1679 } 1680 } 1681 1682 /* Inform RISC of processed responses. */ 1683 WRT16_IO_REG(ha, resp_out, ha->rsp_ring_index); 1684 1685 /* RESET packet received delay for possible async event. */ 1686 if (status & BIT_0) { 1687 drv_usecwait(500000); 1688 } 1689 1690 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1691 } 1692 1693 /* 1694 * ql_error_entry 1695 * Processes error entry. 1696 * 1697 * Input: 1698 * ha = adapter state pointer. 1699 * pkt = entry pointer. 1700 * done_q = head pointer to done queue. 1701 * set_flags = task daemon flags to set. 1702 * reset_flags = task daemon flags to reset. 1703 * 1704 * Context: 1705 * Interrupt or Kernel context, no mailbox commands allowed. 1706 */ 1707 /* ARGSUSED */ 1708 static void 1709 ql_error_entry(ql_adapter_state_t *ha, response_t *pkt, ql_head_t *done_q, 1710 uint32_t *set_flags, uint32_t *reset_flags) 1711 { 1712 ql_srb_t *sp; 1713 uint32_t index, resp_identifier; 1714 1715 if (pkt->entry_type == INVALID_ENTRY_TYPE) { 1716 EL(ha, "Aborted command\n"); 1717 return; 1718 } 1719 1720 QL_PRINT_2(CE_CONT, "(%d): started, packet:\n", ha->instance); 1721 QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE); 1722 1723 if (pkt->entry_status & BIT_6) { 1724 EL(ha, "Request Queue DMA error\n"); 1725 } else if (pkt->entry_status & BIT_5) { 1726 EL(ha, "Invalid Entry Order\n"); 1727 } else if (pkt->entry_status & BIT_4) { 1728 EL(ha, "Invalid Entry Count\n"); 1729 } else if (pkt->entry_status & BIT_3) { 1730 EL(ha, "Invalid Entry Parameter\n"); 1731 } else if (pkt->entry_status & BIT_2) { 1732 EL(ha, "Invalid Entry Type\n"); 1733 } else if (pkt->entry_status & BIT_1) { 1734 EL(ha, "Busy\n"); 1735 } else { 1736 EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status); 1737 } 1738 1739 /* Validate the response entry handle. */ 1740 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle); 1741 index = resp_identifier & OSC_INDEX_MASK; 1742 if (index < MAX_OUTSTANDING_COMMANDS) { 1743 /* the index seems reasonable */ 1744 sp = ha->outstanding_cmds[index]; 1745 if (sp != NULL) { 1746 if (sp->handle == resp_identifier) { 1747 /* Neo, you're the one... */ 1748 ha->outstanding_cmds[index] = NULL; 1749 sp->handle = 0; 1750 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 1751 } else { 1752 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n", 1753 resp_identifier, sp->handle); 1754 sp = NULL; 1755 ql_signal_abort(ha, set_flags); 1756 } 1757 } else { 1758 sp = ql_verify_preprocessed_cmd(ha, 1759 (uint32_t *)&pkt->handle, set_flags, reset_flags); 1760 } 1761 } else { 1762 EL(ha, "osc index out of range, index=%xh, handle=%xh\n", 1763 index, resp_identifier); 1764 ql_signal_abort(ha, set_flags); 1765 } 1766 1767 if (sp != NULL) { 1768 /* Bad payload or header */ 1769 if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) { 1770 /* Bad payload or header, set error status. */ 1771 sp->pkt->pkt_reason = CS_BAD_PAYLOAD; 1772 } else if (pkt->entry_status & BIT_1) /* FULL flag */ { 1773 sp->pkt->pkt_reason = CS_QUEUE_FULL; 1774 } else { 1775 /* Set error status. */ 1776 sp->pkt->pkt_reason = CS_UNKNOWN; 1777 } 1778 1779 /* Set completed status. */ 1780 sp->flags |= SRB_ISP_COMPLETED; 1781 1782 /* Place command on done queue. */ 1783 ql_add_link_b(done_q, &sp->cmd); 1784 1785 } 1786 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1787 } 1788 1789 /* 1790 * ql_status_entry 1791 * Processes received ISP2200-2300 status entry. 1792 * 1793 * Input: 1794 * ha: adapter state pointer. 1795 * pkt: entry pointer. 1796 * done_q: done queue pointer. 1797 * set_flags: task daemon flags to set. 1798 * reset_flags: task daemon flags to reset. 1799 * 1800 * Returns: 1801 * BIT_0 = CS_RESET status received. 1802 * 1803 * Context: 1804 * Interrupt or Kernel context, no mailbox commands allowed. 1805 */ 1806 /* ARGSUSED */ 1807 static int 1808 ql_status_entry(ql_adapter_state_t *ha, sts_entry_t *pkt, 1809 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 1810 { 1811 ql_srb_t *sp; 1812 uint32_t index, resp_identifier; 1813 uint16_t comp_status; 1814 int rval = 0; 1815 1816 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1817 1818 /* Validate the response entry handle. */ 1819 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle); 1820 index = resp_identifier & OSC_INDEX_MASK; 1821 if (index < MAX_OUTSTANDING_COMMANDS) { 1822 /* the index seems reasonable */ 1823 sp = ha->outstanding_cmds[index]; 1824 if (sp != NULL) { 1825 if (sp->handle == resp_identifier) { 1826 /* Neo, you're the one... */ 1827 ha->outstanding_cmds[index] = NULL; 1828 sp->handle = 0; 1829 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 1830 } else { 1831 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n", 1832 resp_identifier, sp->handle); 1833 sp = NULL; 1834 ql_signal_abort(ha, set_flags); 1835 } 1836 } else { 1837 sp = ql_verify_preprocessed_cmd(ha, 1838 (uint32_t *)&pkt->handle, set_flags, reset_flags); 1839 } 1840 } else { 1841 EL(ha, "osc index out of range, index=%xh, handle=%xh\n", 1842 index, resp_identifier); 1843 ql_signal_abort(ha, set_flags); 1844 } 1845 1846 if (sp != NULL) { 1847 comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 1848 &pkt->comp_status); 1849 1850 /* 1851 * We dont care about SCSI QFULLs. 1852 */ 1853 if (comp_status == CS_QUEUE_FULL) { 1854 EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n", 1855 sp->lun_queue->target_queue->d_id.b24, 1856 sp->lun_queue->lun_no); 1857 comp_status = CS_COMPLETE; 1858 } 1859 1860 /* 1861 * 2300 firmware marks completion status as data underrun 1862 * for scsi qfulls. Make it transport complete. 1863 */ 1864 if ((CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) && 1865 (comp_status == CS_DATA_UNDERRUN) && 1866 (pkt->scsi_status_l != 0)) { 1867 comp_status = CS_COMPLETE; 1868 } 1869 1870 /* 1871 * Workaround T3 issue where we do not get any data xferred 1872 * but get back a good status. 1873 */ 1874 if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 && 1875 comp_status == CS_COMPLETE && 1876 pkt->scsi_status_l == 0 && 1877 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 && 1878 pkt->residual_length == 0 && 1879 sp->fcp && 1880 sp->fcp->fcp_data_len != 0 && 1881 (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) == 1882 SF_DATA_OUT) { 1883 comp_status = CS_ABORTED; 1884 } 1885 1886 if (sp->flags & SRB_MS_PKT) { 1887 /* 1888 * Ideally it should never be true. But there 1889 * is a bug in FW which upon receiving invalid 1890 * parameters in MS IOCB returns it as 1891 * status entry and not as ms entry type. 1892 */ 1893 ql_ms_entry(ha, (ms_entry_t *)pkt, done_q, 1894 set_flags, reset_flags); 1895 QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n", 1896 ha->instance); 1897 return (0); 1898 } 1899 1900 /* 1901 * Fast path to good SCSI I/O completion 1902 */ 1903 if ((comp_status == CS_COMPLETE) & 1904 (!pkt->scsi_status_l) & 1905 (!(pkt->scsi_status_h & FCP_RSP_MASK))) { 1906 /* Set completed status. */ 1907 sp->flags |= SRB_ISP_COMPLETED; 1908 sp->pkt->pkt_reason = comp_status; 1909 ql_fast_fcp_post(sp); 1910 QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n", 1911 ha->instance); 1912 return (0); 1913 } 1914 rval = ql_status_error(ha, sp, pkt, done_q, set_flags, 1915 reset_flags); 1916 } 1917 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1918 1919 return (rval); 1920 } 1921 1922 /* 1923 * ql_24xx_status_entry 1924 * Processes received ISP24xx status entry. 1925 * 1926 * Input: 1927 * ha: adapter state pointer. 1928 * pkt: entry pointer. 1929 * done_q: done queue pointer. 1930 * set_flags: task daemon flags to set. 1931 * reset_flags: task daemon flags to reset. 1932 * 1933 * Returns: 1934 * BIT_0 = CS_RESET status received. 1935 * 1936 * Context: 1937 * Interrupt or Kernel context, no mailbox commands allowed. 1938 */ 1939 /* ARGSUSED */ 1940 static int 1941 ql_24xx_status_entry(ql_adapter_state_t *ha, sts_24xx_entry_t *pkt, 1942 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 1943 { 1944 ql_srb_t *sp = NULL; 1945 uint16_t comp_status; 1946 uint32_t index, resp_identifier; 1947 int rval = 0; 1948 1949 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1950 1951 /* Validate the response entry handle. */ 1952 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle); 1953 index = resp_identifier & OSC_INDEX_MASK; 1954 if (index < MAX_OUTSTANDING_COMMANDS) { 1955 /* the index seems reasonable */ 1956 sp = ha->outstanding_cmds[index]; 1957 if (sp != NULL) { 1958 if (sp->handle == resp_identifier) { 1959 /* Neo, you're the one... */ 1960 ha->outstanding_cmds[index] = NULL; 1961 sp->handle = 0; 1962 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 1963 } else { 1964 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n", 1965 resp_identifier, sp->handle); 1966 sp = NULL; 1967 ql_signal_abort(ha, set_flags); 1968 } 1969 } else { 1970 sp = ql_verify_preprocessed_cmd(ha, 1971 (uint32_t *)&pkt->handle, set_flags, reset_flags); 1972 } 1973 } else { 1974 EL(ha, "osc index out of range, index=%xh, handle=%xh\n", 1975 index, resp_identifier); 1976 ql_signal_abort(ha, set_flags); 1977 } 1978 1979 if (sp != NULL) { 1980 comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 1981 &pkt->comp_status); 1982 1983 /* We dont care about SCSI QFULLs. */ 1984 if (comp_status == CS_QUEUE_FULL) { 1985 EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n", 1986 sp->lun_queue->target_queue->d_id.b24, 1987 sp->lun_queue->lun_no); 1988 comp_status = CS_COMPLETE; 1989 } 1990 1991 /* 1992 * 2300 firmware marks completion status as data underrun 1993 * for scsi qfulls. Make it transport complete. 1994 */ 1995 if ((comp_status == CS_DATA_UNDERRUN) && 1996 (pkt->scsi_status_l != 0)) { 1997 comp_status = CS_COMPLETE; 1998 } 1999 2000 /* 2001 * Workaround T3 issue where we do not get any data xferred 2002 * but get back a good status. 2003 */ 2004 if (comp_status == CS_COMPLETE && 2005 pkt->scsi_status_l == 0 && 2006 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 && 2007 pkt->residual_length != 0 && 2008 sp->fcp && 2009 sp->fcp->fcp_data_len != 0 && 2010 sp->fcp->fcp_cntl.cntl_write_data) { 2011 comp_status = CS_ABORTED; 2012 } 2013 2014 /* 2015 * Fast path to good SCSI I/O completion 2016 */ 2017 if ((comp_status == CS_COMPLETE) & 2018 (!pkt->scsi_status_l) & 2019 (!(pkt->scsi_status_h & FCP_RSP_MASK))) { 2020 /* Set completed status. */ 2021 sp->flags |= SRB_ISP_COMPLETED; 2022 sp->pkt->pkt_reason = comp_status; 2023 ql_fast_fcp_post(sp); 2024 QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n", 2025 ha->instance); 2026 return (0); 2027 } 2028 rval = ql_status_error(ha, sp, (sts_entry_t *)pkt, done_q, 2029 set_flags, reset_flags); 2030 } 2031 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2032 2033 return (rval); 2034 } 2035 2036 /* 2037 * ql_verify_preprocessed_cmd 2038 * Handles preprocessed cmds.. 2039 * 2040 * Input: 2041 * ha: adapter state pointer. 2042 * pkt_handle: handle pointer. 2043 * set_flags: task daemon flags to set. 2044 * reset_flags: task daemon flags to reset. 2045 * 2046 * Returns: 2047 * srb pointer or NULL 2048 * 2049 * Context: 2050 * Interrupt or Kernel context, no mailbox commands allowed. 2051 */ 2052 /* ARGSUSED */ 2053 ql_srb_t * 2054 ql_verify_preprocessed_cmd(ql_adapter_state_t *ha, uint32_t *pkt_handle, 2055 uint32_t *set_flags, uint32_t *reset_flags) 2056 { 2057 ql_srb_t *sp = NULL; 2058 uint32_t index, resp_identifier; 2059 uint32_t get_handle = 10; 2060 2061 while (get_handle) { 2062 /* Get handle. */ 2063 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, pkt_handle); 2064 index = resp_identifier & OSC_INDEX_MASK; 2065 /* Validate handle. */ 2066 if (index < MAX_OUTSTANDING_COMMANDS) { 2067 sp = ha->outstanding_cmds[index]; 2068 } 2069 2070 if (sp != NULL) { 2071 EL(ha, "sp=%xh, resp_id=%xh, get=%d, index=%xh\n", sp, 2072 resp_identifier, get_handle, index); 2073 break; 2074 } else { 2075 get_handle -= 1; 2076 drv_usecwait(10000); 2077 if (get_handle == 1) { 2078 /* Last chance, Sync whole DMA buffer. */ 2079 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 2080 RESPONSE_Q_BUFFER_OFFSET, 2081 RESPONSE_QUEUE_SIZE, 2082 DDI_DMA_SYNC_FORKERNEL); 2083 EL(ha, "last chance DMA sync, index=%xh\n", 2084 index); 2085 } 2086 } 2087 } 2088 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2089 2090 return (sp); 2091 } 2092 2093 2094 /* 2095 * ql_status_error 2096 * Processes received ISP status entry error. 2097 * 2098 * Input: 2099 * ha: adapter state pointer. 2100 * sp: SRB pointer. 2101 * pkt: entry pointer. 2102 * done_q: done queue pointer. 2103 * set_flags: task daemon flags to set. 2104 * reset_flags: task daemon flags to reset. 2105 * 2106 * Returns: 2107 * BIT_0 = CS_RESET status received. 2108 * 2109 * Context: 2110 * Interrupt or Kernel context, no mailbox commands allowed. 2111 */ 2112 /* ARGSUSED */ 2113 static int 2114 ql_status_error(ql_adapter_state_t *ha, ql_srb_t *sp, sts_entry_t *pkt23, 2115 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 2116 { 2117 uint32_t sense_sz = 0; 2118 uint32_t cnt; 2119 ql_tgt_t *tq; 2120 fcp_rsp_t *fcpr; 2121 struct fcp_rsp_info *rsp; 2122 int rval = 0; 2123 2124 struct { 2125 uint8_t *rsp_info; 2126 uint8_t *req_sense_data; 2127 uint32_t residual_length; 2128 uint32_t fcp_residual_length; 2129 uint32_t rsp_info_length; 2130 uint32_t req_sense_length; 2131 uint16_t comp_status; 2132 uint8_t state_flags_l; 2133 uint8_t state_flags_h; 2134 uint8_t scsi_status_l; 2135 uint8_t scsi_status_h; 2136 } sts; 2137 2138 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2139 2140 if (CFG_IST(ha, CFG_CTRL_24258081)) { 2141 sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23; 2142 2143 /* Setup status. */ 2144 sts.comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 2145 &pkt24->comp_status); 2146 sts.scsi_status_l = pkt24->scsi_status_l; 2147 sts.scsi_status_h = pkt24->scsi_status_h; 2148 2149 /* Setup firmware residuals. */ 2150 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ? 2151 ddi_get32(ha->hba_buf.acc_handle, 2152 (uint32_t *)&pkt24->residual_length) : 0; 2153 2154 /* Setup FCP residuals. */ 2155 sts.fcp_residual_length = sts.scsi_status_h & 2156 (FCP_RESID_UNDER | FCP_RESID_OVER) ? 2157 ddi_get32(ha->hba_buf.acc_handle, 2158 (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0; 2159 2160 if ((sts.comp_status == CS_DATA_UNDERRUN) && 2161 (sts.scsi_status_h & FCP_RESID_UNDER) && 2162 (sts.residual_length != pkt24->fcp_rsp_residual_count)) { 2163 2164 EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n", 2165 sts.residual_length, 2166 pkt24->fcp_rsp_residual_count); 2167 sts.scsi_status_h = (uint8_t) 2168 (sts.scsi_status_h & ~FCP_RESID_UNDER); 2169 } 2170 2171 /* Setup state flags. */ 2172 sts.state_flags_l = pkt24->state_flags_l; 2173 sts.state_flags_h = pkt24->state_flags_h; 2174 2175 if (sp->fcp->fcp_data_len && 2176 (sts.comp_status != CS_DATA_UNDERRUN || 2177 sts.residual_length != sp->fcp->fcp_data_len)) { 2178 sts.state_flags_h = (uint8_t) 2179 (sts.state_flags_h | SF_GOT_BUS | 2180 SF_GOT_TARGET | SF_SENT_CMD | 2181 SF_XFERRED_DATA | SF_GOT_STATUS); 2182 } else { 2183 sts.state_flags_h = (uint8_t) 2184 (sts.state_flags_h | SF_GOT_BUS | 2185 SF_GOT_TARGET | SF_SENT_CMD | 2186 SF_GOT_STATUS); 2187 } 2188 if (sp->fcp->fcp_cntl.cntl_write_data) { 2189 sts.state_flags_l = (uint8_t) 2190 (sts.state_flags_l | SF_DATA_OUT); 2191 } else if (sp->fcp->fcp_cntl.cntl_read_data) { 2192 sts.state_flags_l = (uint8_t) 2193 (sts.state_flags_l | SF_DATA_IN); 2194 } 2195 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) { 2196 sts.state_flags_l = (uint8_t) 2197 (sts.state_flags_l | SF_HEAD_OF_Q); 2198 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) { 2199 sts.state_flags_l = (uint8_t) 2200 (sts.state_flags_l | SF_ORDERED_Q); 2201 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) { 2202 sts.state_flags_l = (uint8_t) 2203 (sts.state_flags_l | SF_SIMPLE_Q); 2204 } 2205 2206 /* Setup FCP response info. */ 2207 sts.rsp_info = &pkt24->rsp_sense_data[0]; 2208 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) { 2209 sts.rsp_info_length = ddi_get32(ha->hba_buf.acc_handle, 2210 (uint32_t *)&pkt24->fcp_rsp_data_length); 2211 if (sts.rsp_info_length > 2212 sizeof (struct fcp_rsp_info)) { 2213 sts.rsp_info_length = 2214 sizeof (struct fcp_rsp_info); 2215 } 2216 for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) { 2217 ql_chg_endian(sts.rsp_info + cnt, 4); 2218 } 2219 } else { 2220 sts.rsp_info_length = 0; 2221 } 2222 2223 /* Setup sense data. */ 2224 sts.req_sense_data = 2225 &pkt24->rsp_sense_data[sts.rsp_info_length]; 2226 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) { 2227 sts.req_sense_length = 2228 ddi_get32(ha->hba_buf.acc_handle, 2229 (uint32_t *)&pkt24->fcp_sense_length); 2230 sts.state_flags_h = (uint8_t) 2231 (sts.state_flags_h | SF_ARQ_DONE); 2232 sense_sz = (uint32_t) 2233 (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) - 2234 (uintptr_t)sts.req_sense_data); 2235 for (cnt = 0; cnt < sense_sz; cnt += 4) { 2236 ql_chg_endian(sts.req_sense_data + cnt, 4); 2237 } 2238 } else { 2239 sts.req_sense_length = 0; 2240 } 2241 } else { 2242 /* Setup status. */ 2243 sts.comp_status = (uint16_t)ddi_get16( 2244 ha->hba_buf.acc_handle, &pkt23->comp_status); 2245 sts.scsi_status_l = pkt23->scsi_status_l; 2246 sts.scsi_status_h = pkt23->scsi_status_h; 2247 2248 /* Setup firmware residuals. */ 2249 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ? 2250 ddi_get32(ha->hba_buf.acc_handle, 2251 (uint32_t *)&pkt23->residual_length) : 0; 2252 2253 /* Setup FCP residuals. */ 2254 sts.fcp_residual_length = sts.scsi_status_h & 2255 (FCP_RESID_UNDER | FCP_RESID_OVER) ? 2256 sts.residual_length : 0; 2257 2258 /* Setup state flags. */ 2259 sts.state_flags_l = pkt23->state_flags_l; 2260 sts.state_flags_h = pkt23->state_flags_h; 2261 2262 /* Setup FCP response info. */ 2263 sts.rsp_info = &pkt23->rsp_info[0]; 2264 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) { 2265 sts.rsp_info_length = ddi_get16( 2266 ha->hba_buf.acc_handle, 2267 (uint16_t *)&pkt23->rsp_info_length); 2268 if (sts.rsp_info_length > 2269 sizeof (struct fcp_rsp_info)) { 2270 sts.rsp_info_length = 2271 sizeof (struct fcp_rsp_info); 2272 } 2273 } else { 2274 sts.rsp_info_length = 0; 2275 } 2276 2277 /* Setup sense data. */ 2278 sts.req_sense_data = &pkt23->req_sense_data[0]; 2279 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ? 2280 ddi_get16(ha->hba_buf.acc_handle, 2281 (uint16_t *)&pkt23->req_sense_length) : 0; 2282 } 2283 2284 bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen); 2285 2286 fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp; 2287 rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp + 2288 sizeof (fcp_rsp_t)); 2289 2290 tq = sp->lun_queue->target_queue; 2291 2292 fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l; 2293 if (sts.scsi_status_h & FCP_RSP_LEN_VALID) { 2294 fcpr->fcp_u.fcp_status.rsp_len_set = 1; 2295 } 2296 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) { 2297 fcpr->fcp_u.fcp_status.sense_len_set = 1; 2298 } 2299 if (sts.scsi_status_h & FCP_RESID_OVER) { 2300 fcpr->fcp_u.fcp_status.resid_over = 1; 2301 } 2302 if (sts.scsi_status_h & FCP_RESID_UNDER) { 2303 fcpr->fcp_u.fcp_status.resid_under = 1; 2304 } 2305 fcpr->fcp_u.fcp_status.reserved_1 = 0; 2306 2307 /* Set ISP completion status */ 2308 sp->pkt->pkt_reason = sts.comp_status; 2309 2310 /* Update statistics. */ 2311 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) && 2312 (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) { 2313 2314 sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t); 2315 if (sense_sz > sts.rsp_info_length) { 2316 sense_sz = sts.rsp_info_length; 2317 } 2318 2319 /* copy response information data. */ 2320 if (sense_sz) { 2321 ddi_rep_get8(ha->hba_buf.acc_handle, (uint8_t *)rsp, 2322 sts.rsp_info, sense_sz, DDI_DEV_AUTOINCR); 2323 } 2324 fcpr->fcp_response_len = sense_sz; 2325 2326 rsp = (struct fcp_rsp_info *)((caddr_t)rsp + 2327 fcpr->fcp_response_len); 2328 2329 switch (*(sts.rsp_info + 3)) { 2330 case FCP_NO_FAILURE: 2331 break; 2332 case FCP_DL_LEN_MISMATCH: 2333 ha->adapter_stats->d_stats[lobyte( 2334 tq->loop_id)].dl_len_mismatches++; 2335 break; 2336 case FCP_CMND_INVALID: 2337 break; 2338 case FCP_DATA_RO_MISMATCH: 2339 ha->adapter_stats->d_stats[lobyte( 2340 tq->loop_id)].data_ro_mismatches++; 2341 break; 2342 case FCP_TASK_MGMT_NOT_SUPPTD: 2343 break; 2344 case FCP_TASK_MGMT_FAILED: 2345 ha->adapter_stats->d_stats[lobyte( 2346 tq->loop_id)].task_mgmt_failures++; 2347 break; 2348 default: 2349 break; 2350 } 2351 } else { 2352 /* 2353 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n", 2354 * sts.scsi_status_h, sp->pkt->pkt_rsplen); 2355 */ 2356 fcpr->fcp_response_len = 0; 2357 } 2358 2359 /* Set reset status received. */ 2360 if (sts.comp_status == CS_RESET && LOOP_READY(ha)) { 2361 rval |= BIT_0; 2362 } 2363 2364 if (!(tq->flags & TQF_TAPE_DEVICE) && 2365 (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) || 2366 ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) && 2367 ha->task_daemon_flags & LOOP_DOWN) { 2368 EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n", 2369 tq->d_id.b24, sp->lun_queue->lun_no); 2370 2371 /* Set retry status. */ 2372 sp->flags |= SRB_RETRY; 2373 } else if (!(tq->flags & TQF_TAPE_DEVICE) && 2374 tq->port_down_retry_count != 0 && 2375 (sts.comp_status == CS_INCOMPLETE || 2376 sts.comp_status == CS_PORT_UNAVAILABLE || 2377 sts.comp_status == CS_PORT_LOGGED_OUT || 2378 sts.comp_status == CS_PORT_CONFIG_CHG || 2379 sts.comp_status == CS_PORT_BUSY)) { 2380 EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d" 2381 "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no, 2382 tq->port_down_retry_count); 2383 2384 /* Set retry status. */ 2385 sp->flags |= SRB_RETRY; 2386 2387 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) { 2388 /* Acquire device queue lock. */ 2389 DEVICE_QUEUE_LOCK(tq); 2390 2391 tq->flags |= TQF_QUEUE_SUSPENDED; 2392 2393 /* Decrement port down count. */ 2394 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) { 2395 tq->port_down_retry_count--; 2396 } 2397 2398 DEVICE_QUEUE_UNLOCK(tq); 2399 2400 if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE) 2401 == 0 && 2402 (sts.comp_status == CS_PORT_LOGGED_OUT || 2403 sts.comp_status == CS_PORT_UNAVAILABLE)) { 2404 sp->ha->adapter_stats->d_stats[lobyte( 2405 tq->loop_id)].logouts_recvd++; 2406 ql_send_logo(sp->ha, tq, done_q); 2407 } 2408 2409 ADAPTER_STATE_LOCK(ha); 2410 if (ha->port_retry_timer == 0) { 2411 if ((ha->port_retry_timer = 2412 ha->port_down_retry_delay) == 0) { 2413 *set_flags |= 2414 PORT_RETRY_NEEDED; 2415 } 2416 } 2417 ADAPTER_STATE_UNLOCK(ha); 2418 } 2419 } else if (!(tq->flags & TQF_TAPE_DEVICE) && 2420 (sts.comp_status == CS_RESET || 2421 (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) || 2422 (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) { 2423 if (sts.comp_status == CS_RESET) { 2424 EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n", 2425 tq->d_id.b24, sp->lun_queue->lun_no); 2426 } else if (sts.comp_status == CS_QUEUE_FULL) { 2427 EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, " 2428 "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no, 2429 tq->qfull_retry_count); 2430 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) { 2431 tq->flags |= TQF_QUEUE_SUSPENDED; 2432 2433 tq->qfull_retry_count--; 2434 2435 ADAPTER_STATE_LOCK(ha); 2436 if (ha->port_retry_timer == 0) { 2437 if ((ha->port_retry_timer = 2438 ha->qfull_retry_delay) == 2439 0) { 2440 *set_flags |= 2441 PORT_RETRY_NEEDED; 2442 } 2443 } 2444 ADAPTER_STATE_UNLOCK(ha); 2445 } 2446 } else { 2447 EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n", 2448 tq->d_id.b24, sp->lun_queue->lun_no); 2449 } 2450 2451 /* Set retry status. */ 2452 sp->flags |= SRB_RETRY; 2453 } else { 2454 fcpr->fcp_resid = 2455 sts.fcp_residual_length > sp->fcp->fcp_data_len ? 2456 sp->fcp->fcp_data_len : sts.fcp_residual_length; 2457 2458 if ((sts.comp_status == CS_DATA_UNDERRUN) && 2459 (sts.scsi_status_h & FCP_RESID_UNDER) == 0) { 2460 2461 if (sts.scsi_status_l == STATUS_CHECK) { 2462 sp->pkt->pkt_reason = CS_COMPLETE; 2463 } else { 2464 EL(ha, "transport error - " 2465 "underrun & invalid resid\n"); 2466 EL(ha, "ssh=%xh, ssl=%xh\n", 2467 sts.scsi_status_h, sts.scsi_status_l); 2468 sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR; 2469 } 2470 } 2471 2472 /* Ignore firmware underrun error. */ 2473 if (sts.comp_status == CS_DATA_UNDERRUN && 2474 (sts.scsi_status_h & FCP_RESID_UNDER || 2475 (sts.scsi_status_l != STATUS_CHECK && 2476 sts.scsi_status_l != STATUS_GOOD))) { 2477 sp->pkt->pkt_reason = CS_COMPLETE; 2478 } 2479 2480 if (sp->pkt->pkt_reason != CS_COMPLETE) { 2481 ha->xioctl->DeviceErrorCount++; 2482 EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh" 2483 "\n", sts.comp_status, tq->d_id.b24, 2484 sp->lun_queue->lun_no); 2485 } 2486 2487 /* Set target request sense data. */ 2488 if (sts.scsi_status_l == STATUS_CHECK) { 2489 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) { 2490 2491 if (sp->pkt->pkt_reason == CS_COMPLETE && 2492 sts.req_sense_data[2] != KEY_NO_SENSE && 2493 sts.req_sense_data[2] != 2494 KEY_UNIT_ATTENTION) { 2495 ha->xioctl->DeviceErrorCount++; 2496 } 2497 2498 sense_sz = sts.req_sense_length; 2499 2500 /* Insure data does not exceed buf. */ 2501 if (sp->pkt->pkt_rsplen <= 2502 (uint32_t)sizeof (fcp_rsp_t) + 2503 fcpr->fcp_response_len) { 2504 sp->request_sense_length = 0; 2505 } else { 2506 sp->request_sense_length = (uint32_t) 2507 (sp->pkt->pkt_rsplen - 2508 sizeof (fcp_rsp_t) - 2509 fcpr->fcp_response_len); 2510 } 2511 2512 if (sense_sz < 2513 sp->request_sense_length) { 2514 sp->request_sense_length = 2515 sense_sz; 2516 } 2517 2518 sp->request_sense_ptr = (caddr_t)rsp; 2519 2520 sense_sz = (uint32_t) 2521 (((uintptr_t)pkt23 + 2522 sizeof (sts_entry_t)) - 2523 (uintptr_t)sts.req_sense_data); 2524 if (sp->request_sense_length < 2525 sense_sz) { 2526 sense_sz = 2527 sp->request_sense_length; 2528 } 2529 2530 fcpr->fcp_sense_len = sense_sz; 2531 2532 /* Move sense data. */ 2533 ddi_rep_get8(ha->hba_buf.acc_handle, 2534 (uint8_t *)sp->request_sense_ptr, 2535 sts.req_sense_data, 2536 (size_t)sense_sz, 2537 DDI_DEV_AUTOINCR); 2538 2539 sp->request_sense_ptr += sense_sz; 2540 sp->request_sense_length -= sense_sz; 2541 if (sp->request_sense_length != 0 && 2542 !(CFG_IST(ha, CFG_CTRL_8021))) { 2543 ha->status_srb = sp; 2544 } 2545 } 2546 2547 if (sense_sz != 0) { 2548 EL(sp->ha, "check condition sense data, " 2549 "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh" 2550 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh" 2551 "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24, 2552 sp->lun_queue->lun_no, 2553 sts.req_sense_data[0], 2554 sts.req_sense_data[1], 2555 sts.req_sense_data[2], 2556 sts.req_sense_data[3], 2557 sts.req_sense_data[4], 2558 sts.req_sense_data[5], 2559 sts.req_sense_data[6], 2560 sts.req_sense_data[7], 2561 sts.req_sense_data[8], 2562 sts.req_sense_data[9], 2563 sts.req_sense_data[10], 2564 sts.req_sense_data[11], 2565 sts.req_sense_data[12], 2566 sts.req_sense_data[13], 2567 sts.req_sense_data[14], 2568 sts.req_sense_data[15], 2569 sts.req_sense_data[16], 2570 sts.req_sense_data[17]); 2571 } else { 2572 EL(sp->ha, "check condition, d_id=%xh, lun=%xh" 2573 "\n", tq->d_id.b24, sp->lun_queue->lun_no); 2574 } 2575 } 2576 } 2577 2578 /* Set completed status. */ 2579 sp->flags |= SRB_ISP_COMPLETED; 2580 2581 /* Place command on done queue. */ 2582 if (ha->status_srb == NULL) { 2583 ql_add_link_b(done_q, &sp->cmd); 2584 } 2585 2586 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2587 2588 return (rval); 2589 } 2590 2591 /* 2592 * ql_status_cont_entry 2593 * Processes status continuation entry. 2594 * 2595 * Input: 2596 * ha: adapter state pointer. 2597 * pkt: entry pointer. 2598 * done_q: done queue pointer. 2599 * set_flags: task daemon flags to set. 2600 * reset_flags: task daemon flags to reset. 2601 * 2602 * Context: 2603 * Interrupt or Kernel context, no mailbox commands allowed. 2604 */ 2605 /* ARGSUSED */ 2606 static void 2607 ql_status_cont_entry(ql_adapter_state_t *ha, sts_cont_entry_t *pkt, 2608 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 2609 { 2610 uint32_t sense_sz, index; 2611 ql_srb_t *sp = ha->status_srb; 2612 2613 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2614 2615 if (sp != NULL && sp->request_sense_length) { 2616 if (sp->request_sense_length > sizeof (pkt->req_sense_data)) { 2617 sense_sz = sizeof (pkt->req_sense_data); 2618 } else { 2619 sense_sz = sp->request_sense_length; 2620 } 2621 2622 if (CFG_IST(ha, CFG_CTRL_24258081)) { 2623 for (index = 0; index < sense_sz; index += 4) { 2624 ql_chg_endian((uint8_t *) 2625 &pkt->req_sense_data[0] + index, 4); 2626 } 2627 } 2628 2629 /* Move sense data. */ 2630 ddi_rep_get8(ha->hba_buf.acc_handle, 2631 (uint8_t *)sp->request_sense_ptr, 2632 (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz, 2633 DDI_DEV_AUTOINCR); 2634 2635 sp->request_sense_ptr += sense_sz; 2636 sp->request_sense_length -= sense_sz; 2637 2638 /* Place command on done queue. */ 2639 if (sp->request_sense_length == 0) { 2640 ql_add_link_b(done_q, &sp->cmd); 2641 ha->status_srb = NULL; 2642 } 2643 } 2644 2645 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2646 } 2647 2648 /* 2649 * ql_ip_entry 2650 * Processes received ISP IP entry. 2651 * 2652 * Input: 2653 * ha: adapter state pointer. 2654 * pkt: entry pointer. 2655 * done_q: done queue pointer. 2656 * set_flags: task daemon flags to set. 2657 * reset_flags: task daemon flags to reset. 2658 * 2659 * Context: 2660 * Interrupt or Kernel context, no mailbox commands allowed. 2661 */ 2662 /* ARGSUSED */ 2663 static void 2664 ql_ip_entry(ql_adapter_state_t *ha, ip_entry_t *pkt23, ql_head_t *done_q, 2665 uint32_t *set_flags, uint32_t *reset_flags) 2666 { 2667 ql_srb_t *sp; 2668 uint32_t index, resp_identifier; 2669 ql_tgt_t *tq; 2670 2671 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2672 2673 /* Validate the response entry handle. */ 2674 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle); 2675 index = resp_identifier & OSC_INDEX_MASK; 2676 if (index < MAX_OUTSTANDING_COMMANDS) { 2677 /* the index seems reasonable */ 2678 sp = ha->outstanding_cmds[index]; 2679 if (sp != NULL) { 2680 if (sp->handle == resp_identifier) { 2681 /* Neo, you're the one... */ 2682 ha->outstanding_cmds[index] = NULL; 2683 sp->handle = 0; 2684 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 2685 } else { 2686 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n", 2687 resp_identifier, sp->handle); 2688 sp = NULL; 2689 ql_signal_abort(ha, set_flags); 2690 } 2691 } else { 2692 sp = ql_verify_preprocessed_cmd(ha, 2693 (uint32_t *)&pkt23->handle, set_flags, reset_flags); 2694 } 2695 } else { 2696 EL(ha, "osc index out of range, index=%xh, handle=%xh\n", 2697 index, resp_identifier); 2698 ql_signal_abort(ha, set_flags); 2699 } 2700 2701 if (sp != NULL) { 2702 tq = sp->lun_queue->target_queue; 2703 2704 /* Set ISP completion status */ 2705 if (CFG_IST(ha, CFG_CTRL_24258081)) { 2706 ip_cmd_entry_t *pkt24 = (ip_cmd_entry_t *)pkt23; 2707 2708 sp->pkt->pkt_reason = ddi_get16( 2709 ha->hba_buf.acc_handle, &pkt24->hdl_status); 2710 } else { 2711 sp->pkt->pkt_reason = ddi_get16( 2712 ha->hba_buf.acc_handle, &pkt23->comp_status); 2713 } 2714 2715 if (ha->task_daemon_flags & LOOP_DOWN) { 2716 EL(ha, "Loop Not Ready Retry, d_id=%xh\n", 2717 tq->d_id.b24); 2718 2719 /* Set retry status. */ 2720 sp->flags |= SRB_RETRY; 2721 2722 } else if (tq->port_down_retry_count && 2723 (sp->pkt->pkt_reason == CS_INCOMPLETE || 2724 sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE || 2725 sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT || 2726 sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG || 2727 sp->pkt->pkt_reason == CS_PORT_BUSY)) { 2728 EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n", 2729 sp->pkt->pkt_reason, tq->d_id.b24, 2730 tq->port_down_retry_count); 2731 2732 /* Set retry status. */ 2733 sp->flags |= SRB_RETRY; 2734 2735 if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT || 2736 sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) { 2737 ha->adapter_stats->d_stats[lobyte( 2738 tq->loop_id)].logouts_recvd++; 2739 ql_send_logo(ha, tq, done_q); 2740 } 2741 2742 /* Acquire device queue lock. */ 2743 DEVICE_QUEUE_LOCK(tq); 2744 2745 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) { 2746 tq->flags |= TQF_QUEUE_SUSPENDED; 2747 2748 tq->port_down_retry_count--; 2749 2750 ADAPTER_STATE_LOCK(ha); 2751 if (ha->port_retry_timer == 0) { 2752 if ((ha->port_retry_timer = 2753 ha->port_down_retry_delay) == 0) { 2754 *set_flags |= 2755 PORT_RETRY_NEEDED; 2756 } 2757 } 2758 ADAPTER_STATE_UNLOCK(ha); 2759 } 2760 2761 /* Release device queue specific lock. */ 2762 DEVICE_QUEUE_UNLOCK(tq); 2763 2764 } else if (sp->pkt->pkt_reason == CS_RESET) { 2765 EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24); 2766 2767 /* Set retry status. */ 2768 sp->flags |= SRB_RETRY; 2769 } else { 2770 if (sp->pkt->pkt_reason != CS_COMPLETE) { 2771 EL(ha, "Cmplt status err=%xh, d_id=%xh\n", 2772 sp->pkt->pkt_reason, tq->d_id.b24); 2773 } 2774 } 2775 2776 /* Set completed status. */ 2777 sp->flags |= SRB_ISP_COMPLETED; 2778 2779 ql_add_link_b(done_q, &sp->cmd); 2780 2781 } 2782 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2783 } 2784 2785 /* 2786 * ql_ip_rcv_entry 2787 * Processes received ISP IP buffers entry. 2788 * 2789 * Input: 2790 * ha: adapter state pointer. 2791 * pkt: entry pointer. 2792 * done_q: done queue pointer. 2793 * set_flags: task daemon flags to set. 2794 * reset_flags: task daemon flags to reset. 2795 * 2796 * Context: 2797 * Interrupt or Kernel context, no mailbox commands allowed. 2798 */ 2799 /* ARGSUSED */ 2800 static void 2801 ql_ip_rcv_entry(ql_adapter_state_t *ha, ip_rcv_entry_t *pkt, 2802 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 2803 { 2804 port_id_t s_id; 2805 uint16_t index; 2806 uint8_t cnt; 2807 ql_tgt_t *tq; 2808 2809 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2810 2811 /* Locate device queue. */ 2812 s_id.b.al_pa = pkt->s_id[0]; 2813 s_id.b.area = pkt->s_id[1]; 2814 s_id.b.domain = pkt->s_id[2]; 2815 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) { 2816 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24); 2817 return; 2818 } 2819 2820 tq->ub_sequence_length = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 2821 &pkt->seq_length); 2822 tq->ub_total_seg_cnt = pkt->segment_count; 2823 tq->ub_seq_id = ++ha->ub_seq_id; 2824 tq->ub_seq_cnt = 0; 2825 tq->ub_frame_ro = 0; 2826 tq->ub_loop_id = pkt->loop_id; 2827 ha->rcv_dev_q = tq; 2828 2829 for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt < 2830 tq->ub_total_seg_cnt; cnt++) { 2831 2832 index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 2833 &pkt->buffer_handle[cnt]); 2834 2835 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) { 2836 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n"); 2837 *set_flags |= ISP_ABORT_NEEDED; 2838 break; 2839 } 2840 } 2841 2842 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2843 } 2844 2845 /* 2846 * ql_ip_rcv_cont_entry 2847 * Processes received ISP IP buffers continuation entry. 2848 * 2849 * Input: 2850 * ha: adapter state pointer. 2851 * pkt: entry pointer. 2852 * done_q: done queue pointer. 2853 * set_flags: task daemon flags to set. 2854 * reset_flags: task daemon flags to reset. 2855 * 2856 * Context: 2857 * Interrupt or Kernel context, no mailbox commands allowed. 2858 */ 2859 /* ARGSUSED */ 2860 static void 2861 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ip_rcv_cont_entry_t *pkt, 2862 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 2863 { 2864 uint16_t index; 2865 uint8_t cnt; 2866 ql_tgt_t *tq; 2867 2868 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2869 2870 if ((tq = ha->rcv_dev_q) == NULL) { 2871 EL(ha, "No IP receive device\n"); 2872 return; 2873 } 2874 2875 for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES && 2876 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) { 2877 2878 index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 2879 &pkt->buffer_handle[cnt]); 2880 2881 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) { 2882 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n"); 2883 *set_flags |= ISP_ABORT_NEEDED; 2884 break; 2885 } 2886 } 2887 2888 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2889 } 2890 2891 /* 2892 * ip_rcv_24xx_entry_t 2893 * Processes received ISP24xx IP buffers entry. 2894 * 2895 * Input: 2896 * ha: adapter state pointer. 2897 * pkt: entry pointer. 2898 * done_q: done queue pointer. 2899 * set_flags: task daemon flags to set. 2900 * reset_flags: task daemon flags to reset. 2901 * 2902 * Context: 2903 * Interrupt or Kernel context, no mailbox commands allowed. 2904 */ 2905 /* ARGSUSED */ 2906 static void 2907 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ip_rcv_24xx_entry_t *pkt, 2908 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 2909 { 2910 port_id_t s_id; 2911 uint16_t index; 2912 uint8_t cnt; 2913 ql_tgt_t *tq; 2914 2915 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2916 2917 /* Locate device queue. */ 2918 s_id.b.al_pa = pkt->s_id[0]; 2919 s_id.b.area = pkt->s_id[1]; 2920 s_id.b.domain = pkt->s_id[2]; 2921 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) { 2922 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24); 2923 return; 2924 } 2925 2926 if (tq->ub_total_seg_cnt == 0) { 2927 tq->ub_sequence_length = (uint16_t)ddi_get16( 2928 ha->hba_buf.acc_handle, &pkt->seq_length); 2929 tq->ub_total_seg_cnt = pkt->segment_count; 2930 tq->ub_seq_id = ++ha->ub_seq_id; 2931 tq->ub_seq_cnt = 0; 2932 tq->ub_frame_ro = 0; 2933 tq->ub_loop_id = (uint16_t)ddi_get16( 2934 ha->hba_buf.acc_handle, &pkt->n_port_hdl); 2935 } 2936 2937 for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt < 2938 tq->ub_total_seg_cnt; cnt++) { 2939 2940 index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 2941 &pkt->buffer_handle[cnt]); 2942 2943 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) { 2944 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n"); 2945 *set_flags |= ISP_ABORT_NEEDED; 2946 break; 2947 } 2948 } 2949 2950 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2951 } 2952 2953 /* 2954 * ql_ms_entry 2955 * Processes received Name/Management/CT Pass-Through entry. 2956 * 2957 * Input: 2958 * ha: adapter state pointer. 2959 * pkt23: entry pointer. 2960 * done_q: done queue pointer. 2961 * set_flags: task daemon flags to set. 2962 * reset_flags: task daemon flags to reset. 2963 * 2964 * Context: 2965 * Interrupt or Kernel context, no mailbox commands allowed. 2966 */ 2967 /* ARGSUSED */ 2968 static void 2969 ql_ms_entry(ql_adapter_state_t *ha, ms_entry_t *pkt23, ql_head_t *done_q, 2970 uint32_t *set_flags, uint32_t *reset_flags) 2971 { 2972 ql_srb_t *sp; 2973 uint32_t index, cnt, resp_identifier; 2974 ql_tgt_t *tq; 2975 ct_passthru_entry_t *pkt24 = (ct_passthru_entry_t *)pkt23; 2976 2977 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2978 2979 /* Validate the response entry handle. */ 2980 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle); 2981 index = resp_identifier & OSC_INDEX_MASK; 2982 if (index < MAX_OUTSTANDING_COMMANDS) { 2983 /* the index seems reasonable */ 2984 sp = ha->outstanding_cmds[index]; 2985 if (sp != NULL) { 2986 if (sp->handle == resp_identifier) { 2987 /* Neo, you're the one... */ 2988 ha->outstanding_cmds[index] = NULL; 2989 sp->handle = 0; 2990 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 2991 } else { 2992 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n", 2993 resp_identifier, sp->handle); 2994 sp = NULL; 2995 ql_signal_abort(ha, set_flags); 2996 } 2997 } else { 2998 sp = ql_verify_preprocessed_cmd(ha, 2999 (uint32_t *)&pkt23->handle, set_flags, reset_flags); 3000 } 3001 } else { 3002 EL(ha, "osc index out of range, index=%xh, handle=%xh\n", 3003 index, resp_identifier); 3004 ql_signal_abort(ha, set_flags); 3005 } 3006 3007 if (sp != NULL) { 3008 if (!(sp->flags & SRB_MS_PKT)) { 3009 EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed", 3010 sp->flags); 3011 *set_flags |= ISP_ABORT_NEEDED; 3012 return; 3013 } 3014 3015 tq = sp->lun_queue->target_queue; 3016 3017 /* Set ISP completion status */ 3018 if (CFG_IST(ha, CFG_CTRL_24258081)) { 3019 sp->pkt->pkt_reason = ddi_get16( 3020 ha->hba_buf.acc_handle, &pkt24->status); 3021 } else { 3022 sp->pkt->pkt_reason = ddi_get16( 3023 ha->hba_buf.acc_handle, &pkt23->comp_status); 3024 } 3025 3026 if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE && 3027 sp->retry_count) { 3028 EL(ha, "Resouce Unavailable Retry = %d\n", 3029 sp->retry_count); 3030 3031 /* Set retry status. */ 3032 sp->retry_count--; 3033 sp->flags |= SRB_RETRY; 3034 3035 /* Acquire device queue lock. */ 3036 DEVICE_QUEUE_LOCK(tq); 3037 3038 if (!(tq->flags & TQF_QUEUE_SUSPENDED)) { 3039 tq->flags |= TQF_QUEUE_SUSPENDED; 3040 3041 ADAPTER_STATE_LOCK(ha); 3042 if (ha->port_retry_timer == 0) { 3043 ha->port_retry_timer = 2; 3044 } 3045 ADAPTER_STATE_UNLOCK(ha); 3046 } 3047 3048 /* Release device queue specific lock. */ 3049 DEVICE_QUEUE_UNLOCK(tq); 3050 3051 } else if (tq->port_down_retry_count && 3052 (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG || 3053 sp->pkt->pkt_reason == CS_PORT_BUSY)) { 3054 EL(ha, "Port Down Retry\n"); 3055 3056 /* Set retry status. */ 3057 sp->flags |= SRB_RETRY; 3058 3059 /* Acquire device queue lock. */ 3060 DEVICE_QUEUE_LOCK(tq); 3061 3062 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) { 3063 tq->flags |= TQF_QUEUE_SUSPENDED; 3064 3065 tq->port_down_retry_count--; 3066 3067 ADAPTER_STATE_LOCK(ha); 3068 if (ha->port_retry_timer == 0) { 3069 if ((ha->port_retry_timer = 3070 ha->port_down_retry_delay) == 0) { 3071 *set_flags |= 3072 PORT_RETRY_NEEDED; 3073 } 3074 } 3075 ADAPTER_STATE_UNLOCK(ha); 3076 } 3077 /* Release device queue specific lock. */ 3078 DEVICE_QUEUE_UNLOCK(tq); 3079 3080 } else if (sp->pkt->pkt_reason == CS_RESET) { 3081 EL(ha, "Reset Retry\n"); 3082 3083 /* Set retry status. */ 3084 sp->flags |= SRB_RETRY; 3085 3086 } else if (CFG_IST(ha, CFG_CTRL_24258081) && 3087 sp->pkt->pkt_reason == CS_DATA_UNDERRUN) { 3088 cnt = ddi_get32(ha->hba_buf.acc_handle, 3089 &pkt24->resp_byte_count); 3090 if (cnt < sizeof (fc_ct_header_t)) { 3091 EL(ha, "Data underrun\n"); 3092 } else { 3093 sp->pkt->pkt_reason = CS_COMPLETE; 3094 } 3095 3096 } else if (sp->pkt->pkt_reason != CS_COMPLETE) { 3097 EL(ha, "status err=%xh\n", sp->pkt->pkt_reason); 3098 } 3099 3100 if (sp->pkt->pkt_reason == CS_COMPLETE) { 3101 /*EMPTY*/ 3102 QL_PRINT_3(CE_CONT, "(%d): ct_cmdrsp=%x%02xh resp\n", 3103 ha->instance, sp->pkt->pkt_cmd[8], 3104 sp->pkt->pkt_cmd[9]); 3105 QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen); 3106 } 3107 3108 /* For nameserver restore command, management change header. */ 3109 if ((sp->flags & SRB_RETRY) == 0) { 3110 tq->d_id.b24 == 0xfffffc ? 3111 ql_cthdr_endian(sp->pkt->pkt_cmd_acc, 3112 sp->pkt->pkt_cmd, B_TRUE) : 3113 ql_cthdr_endian(sp->pkt->pkt_resp_acc, 3114 sp->pkt->pkt_resp, B_TRUE); 3115 } 3116 3117 /* Set completed status. */ 3118 sp->flags |= SRB_ISP_COMPLETED; 3119 3120 /* Place command on done queue. */ 3121 ql_add_link_b(done_q, &sp->cmd); 3122 3123 } 3124 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3125 } 3126 3127 /* 3128 * ql_report_id_entry 3129 * Processes received Name/Management/CT Pass-Through entry. 3130 * 3131 * Input: 3132 * ha: adapter state pointer. 3133 * pkt: entry pointer. 3134 * done_q: done queue pointer. 3135 * set_flags: task daemon flags to set. 3136 * reset_flags: task daemon flags to reset. 3137 * 3138 * Context: 3139 * Interrupt or Kernel context, no mailbox commands allowed. 3140 */ 3141 /* ARGSUSED */ 3142 static void 3143 ql_report_id_entry(ql_adapter_state_t *ha, report_id_1_t *pkt, 3144 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 3145 { 3146 ql_adapter_state_t *vha; 3147 3148 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3149 3150 EL(ha, "format=%d, vp=%d, status=%d\n", 3151 pkt->format, pkt->vp_index, pkt->status); 3152 3153 if (pkt->format == 1) { 3154 /* Locate port state structure. */ 3155 for (vha = ha; vha != NULL; vha = vha->vp_next) { 3156 if (vha->vp_index == pkt->vp_index) { 3157 break; 3158 } 3159 } 3160 if (vha != NULL && vha->vp_index != 0 && 3161 (pkt->status == CS_COMPLETE || 3162 pkt->status == CS_PORT_ID_CHANGE)) { 3163 *set_flags |= LOOP_RESYNC_NEEDED; 3164 *reset_flags &= ~LOOP_RESYNC_NEEDED; 3165 vha->loop_down_timer = LOOP_DOWN_TIMER_OFF; 3166 TASK_DAEMON_LOCK(ha); 3167 vha->task_daemon_flags |= LOOP_RESYNC_NEEDED; 3168 vha->task_daemon_flags &= ~LOOP_DOWN; 3169 TASK_DAEMON_UNLOCK(ha); 3170 } 3171 } 3172 3173 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3174 } 3175 3176 /* 3177 * ql_els_entry 3178 * Processes received ELS Pass-Through entry. 3179 * 3180 * Input: 3181 * ha: adapter state pointer. 3182 * pkt23: entry pointer. 3183 * done_q: done queue pointer. 3184 * set_flags: task daemon flags to set. 3185 * reset_flags: task daemon flags to reset. 3186 * 3187 * Context: 3188 * Interrupt or Kernel context, no mailbox commands allowed. 3189 */ 3190 /* ARGSUSED */ 3191 static void 3192 ql_els_passthru_entry(ql_adapter_state_t *ha, els_passthru_entry_rsp_t *rsp, 3193 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 3194 { 3195 ql_tgt_t *tq; 3196 port_id_t d_id, s_id; 3197 ql_srb_t *srb; 3198 uint32_t index, resp_identifier; 3199 3200 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3201 3202 /* Validate the response entry handle. */ 3203 resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &rsp->handle); 3204 index = resp_identifier & OSC_INDEX_MASK; 3205 if (index < MAX_OUTSTANDING_COMMANDS) { 3206 /* the index seems reasonable */ 3207 srb = ha->outstanding_cmds[index]; 3208 if (srb != NULL) { 3209 if (srb->handle == resp_identifier) { 3210 /* Neo, you're the one... */ 3211 ha->outstanding_cmds[index] = NULL; 3212 srb->handle = 0; 3213 srb->flags &= ~SRB_IN_TOKEN_ARRAY; 3214 } else { 3215 EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n", 3216 resp_identifier, srb->handle); 3217 srb = NULL; 3218 ql_signal_abort(ha, set_flags); 3219 } 3220 } else { 3221 srb = ql_verify_preprocessed_cmd(ha, 3222 (uint32_t *)&rsp->handle, set_flags, reset_flags); 3223 } 3224 } else { 3225 EL(ha, "osc index out of range, index=%xh, handle=%xh\n", 3226 index, resp_identifier); 3227 ql_signal_abort(ha, set_flags); 3228 } 3229 3230 if (srb != NULL) { 3231 if (!(srb->flags & SRB_ELS_PKT)) { 3232 EL(ha, "Not SRB_ELS_PKT flags=%xh, isp_abort_needed", 3233 srb->flags); 3234 *set_flags |= ISP_ABORT_NEEDED; 3235 return; 3236 } 3237 3238 (void) ddi_dma_sync(srb->pkt->pkt_resp_dma, 0, 0, 3239 DDI_DMA_SYNC_FORKERNEL); 3240 3241 /* Set ISP completion status */ 3242 srb->pkt->pkt_reason = ddi_get16( 3243 ha->hba_buf.acc_handle, &rsp->comp_status); 3244 3245 if (srb->pkt->pkt_reason != CS_COMPLETE) { 3246 la_els_rjt_t rjt; 3247 EL(ha, "status err=%xh\n", srb->pkt->pkt_reason); 3248 3249 if (srb->pkt->pkt_reason == CS_LOGIN_LOGOUT_ERROR) { 3250 EL(ha, "e1=%xh e2=%xh\n", 3251 rsp->error_subcode1, rsp->error_subcode2); 3252 } 3253 3254 srb->pkt->pkt_state = FC_PKT_TRAN_ERROR; 3255 3256 /* Build RJT in the response. */ 3257 rjt.ls_code.ls_code = LA_ELS_RJT; 3258 rjt.reason = FC_REASON_NO_CONNECTION; 3259 3260 ddi_rep_put8(srb->pkt->pkt_resp_acc, (uint8_t *)&rjt, 3261 (uint8_t *)srb->pkt->pkt_resp, 3262 sizeof (rjt), DDI_DEV_AUTOINCR); 3263 3264 srb->pkt->pkt_state = FC_PKT_TRAN_ERROR; 3265 srb->pkt->pkt_reason = FC_REASON_NO_CONNECTION; 3266 } 3267 3268 if (srb->pkt->pkt_reason == CS_COMPLETE) { 3269 uint8_t opcode; 3270 uint16_t loop_id; 3271 3272 /* Indicate ISP completion */ 3273 srb->flags |= SRB_ISP_COMPLETED; 3274 3275 loop_id = ddi_get16(ha->hba_buf.acc_handle, 3276 &rsp->n_port_hdl); 3277 3278 if (ha->topology & QL_N_PORT) { 3279 /* create a target Q if there isn't one */ 3280 tq = ql_loop_id_to_queue(ha, loop_id); 3281 if (tq == NULL) { 3282 d_id.b.al_pa = rsp->d_id_7_0; 3283 d_id.b.area = rsp->d_id_15_8; 3284 d_id.b.domain = rsp->d_id_23_16; 3285 /* Acquire adapter state lock. */ 3286 ADAPTER_STATE_LOCK(ha); 3287 3288 tq = ql_dev_init(ha, d_id, loop_id); 3289 EL(ha, " tq = %x\n", tq); 3290 3291 ADAPTER_STATE_UNLOCK(ha); 3292 } 3293 3294 /* on plogi success assume the chosen s_id */ 3295 opcode = ddi_get8(ha->hba_buf.acc_handle, 3296 &rsp->els_cmd_opcode); 3297 3298 EL(ha, "els_cmd_opcode=%x srb->pkt=%x\n", 3299 opcode, srb->pkt); 3300 3301 if (opcode == LA_ELS_PLOGI) { 3302 s_id.b.al_pa = rsp->s_id_7_0; 3303 s_id.b.area = rsp->s_id_15_8; 3304 s_id.b.domain = rsp->s_id_23_16; 3305 3306 ha->d_id.b24 = s_id.b24; 3307 EL(ha, "Set port's source ID %xh\n", 3308 ha->d_id.b24); 3309 } 3310 } 3311 ql_isp_els_handle_rsp_endian(ha, srb); 3312 3313 if (ha != srb->ha) { 3314 EL(ha, "ha=%x srb->ha=%x\n", ha, srb->ha); 3315 } 3316 3317 if (tq != NULL) { 3318 tq->logout_sent = 0; 3319 tq->flags &= ~TQF_NEED_AUTHENTICATION; 3320 3321 if (CFG_IST(ha, CFG_CTRL_24258081)) { 3322 tq->flags |= TQF_IIDMA_NEEDED; 3323 } 3324 srb->pkt->pkt_state = FC_PKT_SUCCESS; 3325 } 3326 } 3327 /* invoke the callback */ 3328 ql_awaken_task_daemon(ha, srb, 0, 0); 3329 } 3330 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3331 } 3332 3333 /* 3334 * ql_signal_abort 3335 * Signal to the task daemon that a condition warranting an 3336 * isp reset has been detected. 3337 * 3338 * Input: 3339 * ha: adapter state pointer. 3340 * set_flags: task daemon flags to set. 3341 * 3342 * Context: 3343 * Interrupt or Kernel context, no mailbox commands allowed. 3344 */ 3345 static void 3346 ql_signal_abort(ql_adapter_state_t *ha, uint32_t *set_flags) 3347 { 3348 if (!CFG_IST(ha, CFG_CTRL_8021) && 3349 !(ha->task_daemon_flags & (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) { 3350 *set_flags |= ISP_ABORT_NEEDED; 3351 } 3352 } 3353