1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/conf.h> 27 #include <sys/ddi.h> 28 #include <sys/stat.h> 29 #include <sys/pci.h> 30 #include <sys/sunddi.h> 31 #include <sys/modctl.h> 32 #include <sys/file.h> 33 #include <sys/cred.h> 34 #include <sys/byteorder.h> 35 #include <sys/atomic.h> 36 #include <sys/scsi/scsi.h> 37 38 #include <stmf_defines.h> 39 #include <fct_defines.h> 40 #include <stmf.h> 41 #include <portif.h> 42 #include <fct.h> 43 #include <qlt.h> 44 #include <qlt_dma.h> 45 #include <qlt_ioctl.h> 46 #include <stmf_ioctl.h> 47 48 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 49 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 50 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt, 51 int reset_only); 52 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr, 53 uint32_t word_count, uint32_t risc_addr); 54 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt); 55 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt, 56 uint32_t dma_size); 57 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp); 58 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp); 59 static uint_t qlt_isr(caddr_t arg, caddr_t arg2); 60 static fct_status_t qlt_initialize_adapter(fct_local_port_t *port); 61 static fct_status_t qlt_firmware_dump(fct_local_port_t *port, 62 stmf_state_change_info_t *ssci); 63 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot); 64 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp); 65 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio); 66 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp); 67 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp); 68 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp); 69 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp); 70 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, 71 uint8_t *rsp); 72 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp); 73 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp); 74 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp); 75 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt, 76 int reset_only); 77 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr, 78 uint32_t word_count, uint32_t risc_addr); 79 static fct_status_t qlt_read_nvram(qlt_state_t *qlt); 80 fct_status_t qlt_port_start(caddr_t arg); 81 fct_status_t qlt_port_stop(caddr_t arg); 82 fct_status_t qlt_port_online(qlt_state_t *qlt); 83 fct_status_t qlt_port_offline(qlt_state_t *qlt); 84 static fct_status_t qlt_get_link_info(fct_local_port_t *port, 85 fct_link_info_t *li); 86 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg); 87 static fct_status_t qlt_do_flogi(struct fct_local_port *port, 88 fct_flogi_xchg_t *fx); 89 void qlt_handle_atio_queue_update(qlt_state_t *qlt); 90 void qlt_handle_resp_queue_update(qlt_state_t *qlt); 91 fct_status_t qlt_register_remote_port(fct_local_port_t *port, 92 fct_remote_port_t *rp, fct_cmd_t *login); 93 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port, 94 fct_remote_port_t *rp); 95 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags); 96 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd); 97 fct_status_t qlt_send_abts_response(qlt_state_t *qlt, 98 fct_cmd_t *cmd, int terminate); 99 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot); 100 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf); 101 fct_status_t qlt_abort_cmd(struct fct_local_port *port, 102 fct_cmd_t *cmd, uint32_t flags); 103 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd); 104 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd); 105 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd); 106 fct_status_t qlt_send_cmd(fct_cmd_t *cmd); 107 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd); 108 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd); 109 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd, 110 stmf_data_buf_t *dbuf, uint32_t ioflags); 111 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd); 112 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp); 113 static void qlt_release_intr(qlt_state_t *qlt); 114 static int qlt_setup_interrupts(qlt_state_t *qlt); 115 static void qlt_destroy_mutex(qlt_state_t *qlt); 116 117 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, 118 uint32_t words); 119 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, 120 caddr_t buf, int size_left); 121 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words, 122 caddr_t buf, int size_left); 123 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, 124 int count, int size_left); 125 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 126 cred_t *credp, int *rval); 127 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp); 128 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp); 129 130 #define SETELSBIT(bmp, els) (bmp)[((els) >> 3) & 0x1F] |= \ 131 ((uint8_t)1) << ((els) & 7) 132 133 int qlt_enable_msix = 0; 134 135 /* Array to quickly calculate next free buf index to use */ 136 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff }; 137 138 static struct cb_ops qlt_cb_ops = { 139 qlt_open, 140 qlt_close, 141 nodev, 142 nodev, 143 nodev, 144 nodev, 145 nodev, 146 qlt_ioctl, 147 nodev, 148 nodev, 149 nodev, 150 nochpoll, 151 ddi_prop_op, 152 0, 153 D_MP | D_NEW 154 }; 155 156 static struct dev_ops qlt_ops = { 157 DEVO_REV, 158 0, 159 nodev, 160 nulldev, 161 nulldev, 162 qlt_attach, 163 qlt_detach, 164 nodev, 165 &qlt_cb_ops, 166 NULL, 167 ddi_power 168 }; 169 170 #define QLT_NAME "COMSTAR QLT" 171 #define QLT_VERSION "1.0" 172 173 static struct modldrv modldrv = { 174 &mod_driverops, 175 QLT_NAME, 176 &qlt_ops, 177 }; 178 179 static struct modlinkage modlinkage = { 180 MODREV_1, &modldrv, NULL 181 }; 182 183 void *qlt_state = NULL; 184 kmutex_t qlt_global_lock; 185 static uint32_t qlt_loaded_counter = 0; 186 187 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100", 188 "-X Mode 1 133", "--Invalid--", 189 "-X Mode 2 66", "-X Mode 2 100", 190 "-X Mode 2 133", " 66" }; 191 192 /* Always use 64 bit DMA. */ 193 static ddi_dma_attr_t qlt_queue_dma_attr = { 194 DMA_ATTR_V0, /* dma_attr_version */ 195 0, /* low DMA address range */ 196 0xffffffffffffffff, /* high DMA address range */ 197 0xffffffff, /* DMA counter register */ 198 64, /* DMA address alignment */ 199 0xff, /* DMA burstsizes */ 200 1, /* min effective DMA size */ 201 0xffffffff, /* max DMA xfer size */ 202 0xffffffff, /* segment boundary */ 203 1, /* s/g list length */ 204 1, /* granularity of device */ 205 0 /* DMA transfer flags */ 206 }; 207 208 /* qlogic logging */ 209 int enable_extended_logging = 0; 210 211 static char qlt_provider_name[] = "qlt"; 212 static struct stmf_port_provider *qlt_pp; 213 214 int 215 _init(void) 216 { 217 int ret; 218 219 ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0); 220 if (ret == 0) { 221 mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0); 222 qlt_pp = (stmf_port_provider_t *)stmf_alloc( 223 STMF_STRUCT_PORT_PROVIDER, 0, 0); 224 qlt_pp->pp_portif_rev = PORTIF_REV_1; 225 qlt_pp->pp_name = qlt_provider_name; 226 if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) { 227 stmf_free(qlt_pp); 228 mutex_destroy(&qlt_global_lock); 229 ddi_soft_state_fini(&qlt_state); 230 return (EIO); 231 } 232 ret = mod_install(&modlinkage); 233 if (ret != 0) { 234 (void) stmf_deregister_port_provider(qlt_pp); 235 stmf_free(qlt_pp); 236 mutex_destroy(&qlt_global_lock); 237 ddi_soft_state_fini(&qlt_state); 238 } 239 } 240 return (ret); 241 } 242 243 int 244 _fini(void) 245 { 246 int ret; 247 248 if (qlt_loaded_counter) 249 return (EBUSY); 250 ret = mod_remove(&modlinkage); 251 if (ret == 0) { 252 (void) stmf_deregister_port_provider(qlt_pp); 253 stmf_free(qlt_pp); 254 mutex_destroy(&qlt_global_lock); 255 ddi_soft_state_fini(&qlt_state); 256 } 257 return (ret); 258 } 259 260 int 261 _info(struct modinfo *modinfop) 262 { 263 return (mod_info(&modlinkage, modinfop)); 264 } 265 266 int 267 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval) 268 { 269 return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip, 270 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval)); 271 } 272 273 static int 274 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 275 { 276 int instance; 277 qlt_state_t *qlt; 278 ddi_device_acc_attr_t dev_acc_attr; 279 uint16_t did; 280 uint16_t val; 281 uint16_t mr; 282 size_t discard; 283 uint_t ncookies; 284 int max_read_size; 285 int max_payload_size; 286 fct_status_t ret; 287 288 /* No support for suspend resume yet */ 289 if (cmd != DDI_ATTACH) 290 return (DDI_FAILURE); 291 instance = ddi_get_instance(dip); 292 293 if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) { 294 return (DDI_FAILURE); 295 } 296 297 if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) 298 == NULL) { 299 goto attach_fail_1; 300 } 301 qlt->instance = instance; 302 qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP); 303 qlt->dip = dip; 304 if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) { 305 goto attach_fail_2; 306 } 307 did = PCICFG_RD16(qlt, PCI_CONF_DEVID); 308 if ((did != 0x2422) && (did != 0x2432) && 309 (did != 0x2522) && (did != 0x2532)) { 310 cmn_err(CE_WARN, "qlt(%d): unknwon devid(%x), failing attach", 311 instance, did); 312 goto attach_fail_4; 313 } 314 if ((did & 0xFF00) == 0x2500) 315 qlt->qlt_25xx_chip = 1; 316 317 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 318 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 319 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 320 if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100, 321 &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) { 322 goto attach_fail_4; 323 } 324 if (did == 0x2422) { 325 uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS); 326 uint32_t slot = pci_bits & PCI_64_BIT_SLOT; 327 pci_bits >>= 8; 328 pci_bits &= 0xf; 329 if ((pci_bits == 3) || (pci_bits == 7)) { 330 cmn_err(CE_NOTE, 331 "!qlt(%d): HBA running at PCI%sMHz (%d)", 332 instance, pci_speeds[pci_bits], pci_bits); 333 } else { 334 cmn_err(CE_WARN, 335 "qlt(%d): HBA running at PCI%sMHz %s(%d)", 336 instance, (pci_bits <= 8) ? pci_speeds[pci_bits] : 337 "(Invalid)", ((pci_bits == 0) || 338 (pci_bits == 8)) ? (slot ? "64 bit slot " : 339 "32 bit slot ") : "", pci_bits); 340 } 341 } 342 if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) { 343 cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance, 344 (unsigned long long)ret); 345 goto attach_fail_5; 346 } 347 348 if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP, 349 0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) { 350 goto attach_fail_5; 351 } 352 if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE, 353 &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 354 &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) != 355 DDI_SUCCESS) { 356 goto attach_fail_6; 357 } 358 if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL, 359 qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE, 360 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 361 &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) { 362 goto attach_fail_7; 363 } 364 if (ncookies != 1) 365 goto attach_fail_8; 366 qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET; 367 qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET; 368 qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET; 369 qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET; 370 371 /* mutex are inited in this function */ 372 if (qlt_setup_interrupts(qlt) != DDI_SUCCESS) 373 goto attach_fail_8; 374 375 (void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name), 376 "qlt%d", instance); 377 (void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias), 378 "%s,0", qlt->qlt_minor_name); 379 380 if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR, 381 instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) { 382 goto attach_fail_9; 383 } 384 385 cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL); 386 cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL); 387 mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL); 388 389 /* Setup PCI cfg space registers */ 390 max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11); 391 if (max_read_size == 11) 392 goto over_max_read_xfer_setting; 393 if (did == 0x2422) { 394 if (max_read_size == 512) 395 val = 0; 396 else if (max_read_size == 1024) 397 val = 1; 398 else if (max_read_size == 2048) 399 val = 2; 400 else if (max_read_size == 4096) 401 val = 3; 402 else { 403 cmn_err(CE_WARN, "qlt(%d) malformed " 404 "pci-max-read-request in qlt.conf. Valid values " 405 "for this HBA are 512/1024/2048/4096", instance); 406 goto over_max_read_xfer_setting; 407 } 408 mr = PCICFG_RD16(qlt, 0x4E); 409 mr &= 0xfff3; 410 mr |= (val << 2); 411 PCICFG_WR16(qlt, 0x4E, mr); 412 } else if ((did == 0x2432) || (did == 0x2532)) { 413 if (max_read_size == 128) 414 val = 0; 415 else if (max_read_size == 256) 416 val = 1; 417 else if (max_read_size == 512) 418 val = 2; 419 else if (max_read_size == 1024) 420 val = 3; 421 else if (max_read_size == 2048) 422 val = 4; 423 else if (max_read_size == 4096) 424 val = 5; 425 else { 426 cmn_err(CE_WARN, "qlt(%d) malformed " 427 "pci-max-read-request in qlt.conf. Valid values " 428 "for this HBA are 128/256/512/1024/2048/4096", 429 instance); 430 goto over_max_read_xfer_setting; 431 } 432 mr = PCICFG_RD16(qlt, 0x54); 433 mr &= 0x8fff; 434 mr |= (val << 12); 435 PCICFG_WR16(qlt, 0x54, mr); 436 } else { 437 cmn_err(CE_WARN, "qlt(%d): dont know how to set " 438 "pci-max-read-request for this device (%x)", 439 instance, did); 440 } 441 over_max_read_xfer_setting:; 442 443 max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11); 444 if (max_payload_size == 11) 445 goto over_max_payload_setting; 446 if ((did == 0x2432) || (did == 0x2532)) { 447 if (max_payload_size == 128) 448 val = 0; 449 else if (max_payload_size == 256) 450 val = 1; 451 else if (max_payload_size == 512) 452 val = 2; 453 else if (max_payload_size == 1024) 454 val = 3; 455 else { 456 cmn_err(CE_WARN, "qlt(%d) malformed " 457 "pcie-max-payload-size in qlt.conf. Valid values " 458 "for this HBA are 128/256/512/1024", 459 instance); 460 goto over_max_payload_setting; 461 } 462 mr = PCICFG_RD16(qlt, 0x54); 463 mr &= 0xff1f; 464 mr |= (val << 5); 465 PCICFG_WR16(qlt, 0x54, mr); 466 } else { 467 cmn_err(CE_WARN, "qlt(%d): dont know how to set " 468 "pcie-max-payload-size for this device (%x)", 469 instance, did); 470 } 471 472 over_max_payload_setting:; 473 474 if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS) 475 goto attach_fail_10; 476 477 ddi_report_dev(dip); 478 return (DDI_SUCCESS); 479 480 attach_fail_10:; 481 mutex_destroy(&qlt->qlt_ioctl_lock); 482 cv_destroy(&qlt->mbox_cv); 483 cv_destroy(&qlt->rp_dereg_cv); 484 ddi_remove_minor_node(dip, qlt->qlt_minor_name); 485 attach_fail_9:; 486 qlt_destroy_mutex(qlt); 487 qlt_release_intr(qlt); 488 attach_fail_8:; 489 (void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle); 490 attach_fail_7:; 491 ddi_dma_mem_free(&qlt->queue_mem_acc_handle); 492 attach_fail_6:; 493 ddi_dma_free_handle(&qlt->queue_mem_dma_handle); 494 attach_fail_5:; 495 ddi_regs_map_free(&qlt->regs_acc_handle); 496 attach_fail_4:; 497 pci_config_teardown(&qlt->pcicfg_acc_handle); 498 kmem_free(qlt->nvram, sizeof (qlt_nvram_t)); 499 attach_fail_2:; 500 attach_fail_1:; 501 ddi_soft_state_free(qlt_state, instance); 502 return (DDI_FAILURE); 503 } 504 505 #define FCT_I_EVENT_BRING_PORT_OFFLINE 0x83 506 507 /* ARGSUSED */ 508 static int 509 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 510 { 511 qlt_state_t *qlt; 512 513 int instance; 514 515 instance = ddi_get_instance(dip); 516 if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) 517 == NULL) { 518 return (DDI_FAILURE); 519 } 520 521 if (qlt->fw_code01) { 522 return (DDI_FAILURE); 523 } 524 525 if ((qlt->qlt_state != FCT_STATE_OFFLINE) || 526 qlt->qlt_state_not_acked) { 527 return (DDI_FAILURE); 528 } 529 if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS) 530 return (DDI_FAILURE); 531 ddi_remove_minor_node(dip, qlt->qlt_minor_name); 532 qlt_destroy_mutex(qlt); 533 qlt_release_intr(qlt); 534 (void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle); 535 ddi_dma_mem_free(&qlt->queue_mem_acc_handle); 536 ddi_dma_free_handle(&qlt->queue_mem_dma_handle); 537 ddi_regs_map_free(&qlt->regs_acc_handle); 538 pci_config_teardown(&qlt->pcicfg_acc_handle); 539 kmem_free(qlt->nvram, sizeof (qlt_nvram_t)); 540 cv_destroy(&qlt->mbox_cv); 541 cv_destroy(&qlt->rp_dereg_cv); 542 ddi_soft_state_free(qlt_state, instance); 543 544 return (DDI_SUCCESS); 545 } 546 547 static void 548 qlt_enable_intr(qlt_state_t *qlt) 549 { 550 if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) { 551 (void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt); 552 } else { 553 int i; 554 for (i = 0; i < qlt->intr_cnt; i++) 555 (void) ddi_intr_enable(qlt->htable[i]); 556 } 557 } 558 559 static void 560 qlt_disable_intr(qlt_state_t *qlt) 561 { 562 if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) { 563 (void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt); 564 } else { 565 int i; 566 for (i = 0; i < qlt->intr_cnt; i++) 567 (void) ddi_intr_disable(qlt->htable[i]); 568 } 569 } 570 571 static void 572 qlt_release_intr(qlt_state_t *qlt) 573 { 574 if (qlt->htable) { 575 int i; 576 for (i = 0; i < qlt->intr_cnt; i++) { 577 (void) ddi_intr_remove_handler(qlt->htable[i]); 578 (void) ddi_intr_free(qlt->htable[i]); 579 } 580 kmem_free(qlt->htable, qlt->intr_size); 581 } 582 qlt->htable = NULL; 583 qlt->intr_pri = 0; 584 qlt->intr_cnt = 0; 585 qlt->intr_size = 0; 586 qlt->intr_cap = 0; 587 } 588 589 590 static void 591 qlt_init_mutex(qlt_state_t *qlt) 592 { 593 mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER, 594 INT2PTR(qlt->intr_pri, void *)); 595 mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER, 596 INT2PTR(qlt->intr_pri, void *)); 597 mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER, 598 INT2PTR(qlt->intr_pri, void *)); 599 mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER, 600 INT2PTR(qlt->intr_pri, void *)); 601 } 602 603 static void 604 qlt_destroy_mutex(qlt_state_t *qlt) 605 { 606 mutex_destroy(&qlt->req_lock); 607 mutex_destroy(&qlt->preq_lock); 608 mutex_destroy(&qlt->mbox_lock); 609 mutex_destroy(&qlt->intr_lock); 610 } 611 612 613 static int 614 qlt_setup_msix(qlt_state_t *qlt) 615 { 616 int count, avail, actual; 617 int ret; 618 int itype = DDI_INTR_TYPE_MSIX; 619 int i; 620 621 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count); 622 if (ret != DDI_SUCCESS || count == 0) { 623 return (DDI_FAILURE); 624 } 625 ret = ddi_intr_get_navail(qlt->dip, itype, &avail); 626 if (ret != DDI_SUCCESS || avail == 0) { 627 return (DDI_FAILURE); 628 } 629 if (avail < count) { 630 stmf_trace(qlt->qlt_port_alias, 631 "qlt_setup_msix: nintrs=%d,avail=%d", count, avail); 632 } 633 634 qlt->intr_size = count * sizeof (ddi_intr_handle_t); 635 qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP); 636 ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype, 637 DDI_INTR_ALLOC_NORMAL, count, &actual, 0); 638 /* we need at least 2 interrupt vectors */ 639 if (ret != DDI_SUCCESS || actual < 2) { 640 ret = DDI_FAILURE; 641 goto release_intr; 642 } 643 if (actual < count) { 644 QLT_LOG(qlt->qlt_port_alias, "qlt_setup_msix: " 645 "requested: %d, received: %d\n", 646 count, actual); 647 } 648 649 qlt->intr_cnt = actual; 650 ret = ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri); 651 if (ret != DDI_SUCCESS) { 652 ret = DDI_FAILURE; 653 goto release_intr; 654 } 655 qlt_init_mutex(qlt); 656 for (i = 0; i < actual; i++) { 657 ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr, 658 qlt, INT2PTR(i, void *)); 659 if (ret != DDI_SUCCESS) 660 goto release_mutex; 661 } 662 663 (void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap); 664 qlt->intr_flags |= QLT_INTR_MSIX; 665 return (DDI_SUCCESS); 666 667 release_mutex: 668 qlt_destroy_mutex(qlt); 669 release_intr: 670 for (i = 0; i < actual; i++) 671 (void) ddi_intr_free(qlt->htable[i]); 672 free_mem: 673 kmem_free(qlt->htable, qlt->intr_size); 674 qlt->htable = NULL; 675 qlt_release_intr(qlt); 676 return (ret); 677 } 678 679 680 static int 681 qlt_setup_msi(qlt_state_t *qlt) 682 { 683 int count, avail, actual; 684 int itype = DDI_INTR_TYPE_MSI; 685 int ret; 686 int i; 687 688 /* get the # of interrupts */ 689 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count); 690 if (ret != DDI_SUCCESS || count == 0) { 691 return (DDI_FAILURE); 692 } 693 ret = ddi_intr_get_navail(qlt->dip, itype, &avail); 694 if (ret != DDI_SUCCESS || avail == 0) { 695 return (DDI_FAILURE); 696 } 697 if (avail < count) { 698 QLT_LOG(qlt->qlt_port_alias, 699 "qlt_setup_msi: nintrs=%d, avail=%d", count, avail); 700 } 701 /* MSI requires only 1 interrupt. */ 702 count = 1; 703 704 /* allocate interrupt */ 705 qlt->intr_size = count * sizeof (ddi_intr_handle_t); 706 qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP); 707 ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype, 708 0, count, &actual, DDI_INTR_ALLOC_NORMAL); 709 if (ret != DDI_SUCCESS || actual == 0) { 710 ret = DDI_FAILURE; 711 goto free_mem; 712 } 713 if (actual < count) { 714 QLT_LOG(qlt->qlt_port_alias, "qlt_setup_msi: " 715 "requested: %d, received:%d", 716 count, actual); 717 } 718 qlt->intr_cnt = actual; 719 720 /* 721 * Get priority for first msi, assume remaining are all the same. 722 */ 723 ret = ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri); 724 if (ret != DDI_SUCCESS) { 725 ret = DDI_FAILURE; 726 goto release_intr; 727 } 728 qlt_init_mutex(qlt); 729 730 /* add handler */ 731 for (i = 0; i < actual; i++) { 732 ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr, 733 qlt, INT2PTR(i, void *)); 734 if (ret != DDI_SUCCESS) 735 goto release_mutex; 736 } 737 738 (void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap); 739 qlt->intr_flags |= QLT_INTR_MSI; 740 return (DDI_SUCCESS); 741 742 release_mutex: 743 qlt_destroy_mutex(qlt); 744 release_intr: 745 for (i = 0; i < actual; i++) 746 (void) ddi_intr_free(qlt->htable[i]); 747 free_mem: 748 kmem_free(qlt->htable, qlt->intr_size); 749 qlt->htable = NULL; 750 qlt_release_intr(qlt); 751 return (ret); 752 } 753 754 static int 755 qlt_setup_fixed(qlt_state_t *qlt) 756 { 757 int count; 758 int actual; 759 int ret; 760 int itype = DDI_INTR_TYPE_FIXED; 761 762 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count); 763 /* Fixed interrupts can only have one interrupt. */ 764 if (ret != DDI_SUCCESS || count != 1) { 765 return (DDI_FAILURE); 766 } 767 768 qlt->intr_size = sizeof (ddi_intr_handle_t); 769 qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP); 770 ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype, 771 DDI_INTR_ALLOC_NORMAL, count, &actual, 0); 772 if (ret != DDI_SUCCESS || actual != 1) { 773 ret = DDI_FAILURE; 774 goto free_mem; 775 } 776 777 qlt->intr_cnt = actual; 778 ret = ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri); 779 if (ret != DDI_SUCCESS) { 780 ret = DDI_FAILURE; 781 goto release_intr; 782 } 783 qlt_init_mutex(qlt); 784 ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0); 785 if (ret != DDI_SUCCESS) 786 goto release_mutex; 787 788 qlt->intr_flags |= QLT_INTR_FIXED; 789 return (DDI_SUCCESS); 790 791 release_mutex: 792 qlt_destroy_mutex(qlt); 793 release_intr: 794 (void) ddi_intr_free(qlt->htable[0]); 795 free_mem: 796 kmem_free(qlt->htable, qlt->intr_size); 797 qlt->htable = NULL; 798 qlt_release_intr(qlt); 799 return (ret); 800 } 801 802 803 static int 804 qlt_setup_interrupts(qlt_state_t *qlt) 805 { 806 #if defined(__sparc) 807 int itypes = 0; 808 #endif 809 810 /* 811 * x86 has a bug in the ddi_intr_block_enable/disable area (6562198). So use 812 * MSI for sparc only for now. 813 */ 814 #if defined(__sparc) 815 if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) { 816 itypes = DDI_INTR_TYPE_FIXED; 817 } 818 819 if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) { 820 if (qlt_setup_msix(qlt) == DDI_SUCCESS) 821 return (DDI_SUCCESS); 822 } 823 if (itypes & DDI_INTR_TYPE_MSI) { 824 if (qlt_setup_msi(qlt) == DDI_SUCCESS) 825 return (DDI_SUCCESS); 826 } 827 #endif 828 return (qlt_setup_fixed(qlt)); 829 } 830 831 /* 832 * Filling the hba attributes 833 */ 834 void 835 qlt_populate_hba_fru_details(struct fct_local_port *port, 836 struct fct_port_attrs *port_attrs) 837 { 838 caddr_t bufp; 839 int len; 840 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private; 841 842 (void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN, 843 "QLogic Corp."); 844 (void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN, 845 "%s", QLT_NAME); 846 (void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN, 847 "%s", QLT_VERSION); 848 port_attrs->serial_number[0] = '\0'; 849 port_attrs->hardware_version[0] = '\0'; 850 851 (void) snprintf(port_attrs->firmware_version, 852 FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major, 853 qlt->fw_minor, qlt->fw_subminor); 854 855 /* Get FCode version */ 856 if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC | 857 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp, 858 (int *)&len) == DDI_PROP_SUCCESS) { 859 (void) snprintf(port_attrs->option_rom_version, 860 FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp); 861 kmem_free(bufp, len); 862 bufp = NULL; 863 } else { 864 #ifdef __sparc 865 #define FCHBA_OPTION_ROM_ERR_TEXT "No Fcode found" 866 #else 867 #define FCHBA_OPTION_ROM_ERR_TEXT "N/A" 868 #endif 869 (void) snprintf(port_attrs->option_rom_version, 870 FCHBA_OPTION_ROM_VERSION_LEN, "%s", 871 FCHBA_OPTION_ROM_ERR_TEXT); 872 } 873 port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] | 874 qlt->nvram->subsystem_vendor_id[1] << 8; 875 876 port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 | 877 qlt->nvram->max_frame_length[0]; 878 879 port_attrs->supported_cos = 0x10000000; 880 port_attrs->supported_speed = PORT_SPEED_1G | 881 PORT_SPEED_2G | PORT_SPEED_4G; 882 if (qlt->qlt_25xx_chip) 883 port_attrs->supported_speed |= PORT_SPEED_8G; 884 885 (void) snprintf(port_attrs->model, FCHBA_MODEL_LEN, "%s", 886 qlt->nvram->model_name); 887 (void) snprintf(port_attrs->model_description, 888 FCHBA_MODEL_DESCRIPTION_LEN, "%s", qlt->nvram->model_name); 889 } 890 891 /* ARGSUSED */ 892 fct_status_t 893 qlt_info(uint32_t cmd, fct_local_port_t *port, 894 void *arg, uint8_t *buf, uint32_t *bufsizep) 895 { 896 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private; 897 mbox_cmd_t *mcp; 898 fct_status_t ret = FCT_SUCCESS; 899 uint8_t *p; 900 fct_port_link_status_t *link_status; 901 902 switch (cmd) { 903 case FC_TGT_PORT_RLS: 904 if ((*bufsizep) < sizeof (fct_port_link_status_t)) { 905 ret = FCT_FAILURE; 906 break; 907 } 908 /* send mailbox command to get link status */ 909 mcp = qlt_alloc_mailbox_command(qlt, 156); 910 if (mcp == NULL) { 911 ret = FCT_ALLOC_FAILURE; 912 break; 913 } 914 915 /* GET LINK STATUS count */ 916 mcp->to_fw[0] = 0x6d; 917 mcp->to_fw[8] = 156/4; 918 mcp->to_fw_mask |= BIT_1 | BIT_8; 919 mcp->from_fw_mask |= BIT_1 | BIT_2; 920 921 ret = qlt_mailbox_command(qlt, mcp); 922 if (ret != QLT_SUCCESS) { 923 qlt_free_mailbox_command(qlt, mcp); 924 break; 925 } 926 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU); 927 928 p = mcp->dbuf->db_sglist[0].seg_addr; 929 link_status = (fct_port_link_status_t *)buf; 930 link_status->LinkFailureCount = LE_32(*((uint32_t *)p)); 931 link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4))); 932 link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8))); 933 link_status->PrimitiveSeqProtocolErrorCount = 934 LE_32(*((uint32_t *)(p + 12))); 935 link_status->InvalidTransmissionWordCount = 936 LE_32(*((uint32_t *)(p + 16))); 937 link_status->InvalidCRCCount = 938 LE_32(*((uint32_t *)(p + 20))); 939 940 qlt_free_mailbox_command(qlt, mcp); 941 break; 942 default: 943 ret = FCT_FAILURE; 944 break; 945 } 946 return (ret); 947 } 948 949 fct_status_t 950 qlt_port_start(caddr_t arg) 951 { 952 qlt_state_t *qlt = (qlt_state_t *)arg; 953 fct_local_port_t *port; 954 fct_dbuf_store_t *fds; 955 956 if (qlt_dmem_init(qlt) != QLT_SUCCESS) { 957 return (FCT_FAILURE); 958 } 959 port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0); 960 if (port == NULL) { 961 goto qlt_pstart_fail_1; 962 } 963 fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0); 964 if (fds == NULL) { 965 goto qlt_pstart_fail_2; 966 } 967 qlt->qlt_port = port; 968 fds->fds_alloc_data_buf = qlt_dmem_alloc; 969 fds->fds_free_data_buf = qlt_dmem_free; 970 fds->fds_fca_private = (void *)qlt; 971 /* 972 * Since we keep everything in the state struct and dont allocate any 973 * port private area, just use that pointer to point to the 974 * state struct. 975 */ 976 port->port_fca_private = qlt; 977 port->port_fca_abort_timeout = 5 * 1000; /* 5 seconds */ 978 bcopy(qlt->nvram->node_name, port->port_nwwn, 8); 979 bcopy(qlt->nvram->port_name, port->port_pwwn, 8); 980 fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn); 981 fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn); 982 port->port_default_alias = qlt->qlt_port_alias; 983 port->port_pp = qlt_pp; 984 port->port_fds = fds; 985 port->port_max_logins = QLT_MAX_LOGINS; 986 port->port_max_xchges = QLT_MAX_XCHGES; 987 port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t); 988 port->port_fca_rp_private_size = sizeof (qlt_remote_port_t); 989 port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t); 990 port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t); 991 port->port_get_link_info = qlt_get_link_info; 992 port->port_register_remote_port = qlt_register_remote_port; 993 port->port_deregister_remote_port = qlt_deregister_remote_port; 994 port->port_send_cmd = qlt_send_cmd; 995 port->port_xfer_scsi_data = qlt_xfer_scsi_data; 996 port->port_send_cmd_response = qlt_send_cmd_response; 997 port->port_abort_cmd = qlt_abort_cmd; 998 port->port_ctl = qlt_ctl; 999 port->port_flogi_xchg = qlt_do_flogi; 1000 port->port_populate_hba_details = qlt_populate_hba_fru_details; 1001 port->port_info = qlt_info; 1002 1003 if (fct_register_local_port(port) != FCT_SUCCESS) { 1004 goto qlt_pstart_fail_2_5; 1005 } 1006 1007 return (QLT_SUCCESS); 1008 1009 qlt_pstart_fail_3: 1010 (void) fct_deregister_local_port(port); 1011 qlt_pstart_fail_2_5: 1012 fct_free(fds); 1013 qlt_pstart_fail_2: 1014 fct_free(port); 1015 qlt->qlt_port = NULL; 1016 qlt_pstart_fail_1: 1017 qlt_dmem_fini(qlt); 1018 return (QLT_FAILURE); 1019 } 1020 1021 fct_status_t 1022 qlt_port_stop(caddr_t arg) 1023 { 1024 qlt_state_t *qlt = (qlt_state_t *)arg; 1025 1026 if (fct_deregister_local_port(qlt->qlt_port) != FCT_SUCCESS) 1027 return (QLT_FAILURE); 1028 fct_free(qlt->qlt_port->port_fds); 1029 fct_free(qlt->qlt_port); 1030 qlt->qlt_port = NULL; 1031 qlt_dmem_fini(qlt); 1032 return (QLT_SUCCESS); 1033 } 1034 1035 /* 1036 * Called by framework to init the HBA. 1037 * Can be called in the middle of I/O. (Why ??) 1038 * Should make sure sane state both before and after the initialization 1039 */ 1040 fct_status_t 1041 qlt_port_online(qlt_state_t *qlt) 1042 { 1043 uint64_t da; 1044 int instance; 1045 fct_status_t ret; 1046 uint16_t rcount; 1047 caddr_t icb; 1048 mbox_cmd_t *mcp; 1049 uint8_t *elsbmp; 1050 1051 instance = ddi_get_instance(qlt->dip); 1052 1053 /* XXX Make sure a sane state */ 1054 1055 if ((ret = qlt_reset_chip_and_download_fw(qlt, 0)) != QLT_SUCCESS) { 1056 cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret); 1057 return (ret); 1058 } 1059 1060 bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE); 1061 1062 /* Get resource count */ 1063 REG_WR16(qlt, REG_MBOX(0), 0x42); 1064 ret = qlt_raw_mailbox_command(qlt); 1065 rcount = REG_RD16(qlt, REG_MBOX(3)); 1066 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 1067 if (ret != QLT_SUCCESS) 1068 return (ret); 1069 1070 /* Enable PUREX */ 1071 REG_WR16(qlt, REG_MBOX(0), 0x38); 1072 REG_WR16(qlt, REG_MBOX(1), 0x0400); 1073 REG_WR16(qlt, REG_MBOX(2), 0x0); 1074 REG_WR16(qlt, REG_MBOX(3), 0x0); 1075 ret = qlt_raw_mailbox_command(qlt); 1076 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 1077 if (ret != QLT_SUCCESS) { 1078 cmn_err(CE_NOTE, "Enable PUREX failed"); 1079 return (ret); 1080 } 1081 1082 /* Pass ELS bitmap to fw */ 1083 REG_WR16(qlt, REG_MBOX(0), 0x59); 1084 REG_WR16(qlt, REG_MBOX(1), 0x0500); 1085 elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET; 1086 bzero(elsbmp, 32); 1087 da = qlt->queue_mem_cookie.dmac_laddress; 1088 da += MBOX_DMA_MEM_OFFSET; 1089 REG_WR16(qlt, REG_MBOX(3), da & 0xffff); 1090 da >>= 16; 1091 REG_WR16(qlt, REG_MBOX(2), da & 0xffff); 1092 da >>= 16; 1093 REG_WR16(qlt, REG_MBOX(7), da & 0xffff); 1094 da >>= 16; 1095 REG_WR16(qlt, REG_MBOX(6), da & 0xffff); 1096 SETELSBIT(elsbmp, ELS_OP_PLOGI); 1097 SETELSBIT(elsbmp, ELS_OP_LOGO); 1098 SETELSBIT(elsbmp, ELS_OP_ABTX); 1099 SETELSBIT(elsbmp, ELS_OP_ECHO); 1100 SETELSBIT(elsbmp, ELS_OP_PRLI); 1101 SETELSBIT(elsbmp, ELS_OP_PRLO); 1102 SETELSBIT(elsbmp, ELS_OP_SCN); 1103 SETELSBIT(elsbmp, ELS_OP_TPRLO); 1104 SETELSBIT(elsbmp, ELS_OP_PDISC); 1105 SETELSBIT(elsbmp, ELS_OP_ADISC); 1106 SETELSBIT(elsbmp, ELS_OP_RSCN); 1107 SETELSBIT(elsbmp, ELS_OP_RNID); 1108 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32, 1109 DDI_DMA_SYNC_FORDEV); 1110 ret = qlt_raw_mailbox_command(qlt); 1111 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 1112 if (ret != QLT_SUCCESS) { 1113 cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, " 1114 "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0], 1115 elsbmp[1]); 1116 return (ret); 1117 } 1118 1119 /* Init queue pointers */ 1120 REG_WR32(qlt, REG_REQ_IN_PTR, 0); 1121 REG_WR32(qlt, REG_REQ_OUT_PTR, 0); 1122 REG_WR32(qlt, REG_RESP_IN_PTR, 0); 1123 REG_WR32(qlt, REG_RESP_OUT_PTR, 0); 1124 REG_WR32(qlt, REG_PREQ_IN_PTR, 0); 1125 REG_WR32(qlt, REG_PREQ_OUT_PTR, 0); 1126 REG_WR32(qlt, REG_ATIO_IN_PTR, 0); 1127 REG_WR32(qlt, REG_ATIO_OUT_PTR, 0); 1128 qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0; 1129 qlt->req_available = REQUEST_QUEUE_ENTRIES - 1; 1130 qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0; 1131 qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0; 1132 qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0; 1133 1134 /* 1135 * XXX support for tunables. Also should we cache icb ? 1136 */ 1137 mcp = qlt_alloc_mailbox_command(qlt, 0x80); 1138 if (mcp == NULL) { 1139 return (STMF_ALLOC_FAILURE); 1140 } 1141 icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr; 1142 bzero(icb, 0x80); 1143 da = qlt->queue_mem_cookie.dmac_laddress; 1144 DMEM_WR16(qlt, icb, 1); /* Version */ 1145 DMEM_WR16(qlt, icb+4, 2112); /* Max frame length */ 1146 DMEM_WR16(qlt, icb+6, 16); /* Execution throttle */ 1147 DMEM_WR16(qlt, icb+8, rcount); /* Xchg count */ 1148 DMEM_WR16(qlt, icb+0x0a, 0x00); /* Hard address (not used) */ 1149 bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8); 1150 bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8); 1151 DMEM_WR16(qlt, icb+0x20, 3); /* Login retry count */ 1152 DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES); 1153 DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES); 1154 DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */ 1155 DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES); 1156 DMEM_WR64(qlt, icb+0x2c, da+REQUEST_QUEUE_OFFSET); 1157 DMEM_WR64(qlt, icb+0x34, da+RESPONSE_QUEUE_OFFSET); 1158 DMEM_WR64(qlt, icb+0x3c, da+PRIORITY_QUEUE_OFFSET); 1159 DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES); 1160 DMEM_WR64(qlt, icb+0x50, da+ATIO_QUEUE_OFFSET); 1161 DMEM_WR16(qlt, icb+0x58, 2); /* Interrupt delay Timer */ 1162 DMEM_WR16(qlt, icb+0x5a, 4); /* Login timeout (secs) */ 1163 DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 | 1164 BIT_2 | BIT_1 | BIT_0); 1165 DMEM_WR32(qlt, icb+0x60, BIT_5); 1166 DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 | BIT_4); 1167 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV); 1168 mcp->to_fw[0] = 0x60; 1169 1170 /* 1171 * This is the 1st command adter adapter initialize which will 1172 * use interrupts and regular mailbox interface. 1173 */ 1174 qlt->mbox_io_state = MBOX_STATE_READY; 1175 qlt_enable_intr(qlt); 1176 qlt->qlt_intr_enabled = 1; 1177 REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR); 1178 /* Issue mailbox to firmware */ 1179 ret = qlt_mailbox_command(qlt, mcp); 1180 if (ret != QLT_SUCCESS) { 1181 cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x", 1182 instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS)); 1183 } 1184 1185 mcp->to_fw_mask = BIT_0; 1186 mcp->from_fw_mask = BIT_0 | BIT_1; 1187 mcp->to_fw[0] = 0x28; 1188 ret = qlt_mailbox_command(qlt, mcp); 1189 if (ret != QLT_SUCCESS) { 1190 cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance, 1191 (long long)ret); 1192 } 1193 1194 qlt_free_mailbox_command(qlt, mcp); 1195 if (ret != QLT_SUCCESS) 1196 return (ret); 1197 return (FCT_SUCCESS); 1198 } 1199 1200 fct_status_t 1201 qlt_port_offline(qlt_state_t *qlt) 1202 { 1203 int retries; 1204 1205 mutex_enter(&qlt->mbox_lock); 1206 1207 if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) { 1208 mutex_exit(&qlt->mbox_lock); 1209 goto poff_mbox_done; 1210 } 1211 1212 /* Wait to grab the mailboxes */ 1213 for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY; 1214 retries++) { 1215 cv_wait(&qlt->mbox_cv, &qlt->mbox_lock); 1216 if ((retries > 5) || 1217 (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) { 1218 qlt->mbox_io_state = MBOX_STATE_UNKNOWN; 1219 mutex_exit(&qlt->mbox_lock); 1220 goto poff_mbox_done; 1221 } 1222 } 1223 qlt->mbox_io_state = MBOX_STATE_UNKNOWN; 1224 mutex_exit(&qlt->mbox_lock); 1225 poff_mbox_done:; 1226 qlt->intr_sneak_counter = 10; 1227 qlt_disable_intr(qlt); 1228 mutex_enter(&qlt->intr_lock); 1229 qlt->qlt_intr_enabled = 0; 1230 (void) qlt_reset_chip_and_download_fw(qlt, 1); 1231 drv_usecwait(20); 1232 qlt->intr_sneak_counter = 0; 1233 mutex_exit(&qlt->intr_lock); 1234 1235 return (FCT_SUCCESS); 1236 } 1237 1238 static fct_status_t 1239 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li) 1240 { 1241 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private; 1242 mbox_cmd_t *mcp; 1243 fct_status_t fc_ret; 1244 fct_status_t ret; 1245 clock_t et; 1246 1247 et = ddi_get_lbolt() + drv_usectohz(5000000); 1248 mcp = qlt_alloc_mailbox_command(qlt, 0); 1249 link_info_retry: 1250 mcp->to_fw[0] = 0x20; 1251 mcp->to_fw_mask |= BIT_0; 1252 mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7; 1253 /* Issue mailbox to firmware */ 1254 ret = qlt_mailbox_command(qlt, mcp); 1255 if (ret != QLT_SUCCESS) { 1256 if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) { 1257 /* Firmware is not ready */ 1258 if (ddi_get_lbolt() < et) { 1259 delay(drv_usectohz(50000)); 1260 goto link_info_retry; 1261 } 1262 } 1263 stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx " 1264 "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]); 1265 fc_ret = FCT_FAILURE; 1266 } else { 1267 li->portid = ((uint32_t)(mcp->from_fw[2])) | 1268 (((uint32_t)(mcp->from_fw[3])) << 16); 1269 1270 li->port_speed = qlt->link_speed; 1271 switch (mcp->from_fw[6]) { 1272 case 1: 1273 li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP; 1274 li->port_fca_flogi_done = 1; 1275 break; 1276 case 0: 1277 li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP; 1278 li->port_no_fct_flogi = 1; 1279 break; 1280 case 3: 1281 li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT; 1282 li->port_fca_flogi_done = 1; 1283 break; 1284 case 2: /*FALLTHROUGH*/ 1285 case 4: 1286 li->port_topology = PORT_TOPOLOGY_PT_TO_PT; 1287 li->port_fca_flogi_done = 1; 1288 break; 1289 default: 1290 li->port_topology = PORT_TOPOLOGY_UNKNOWN; 1291 QLT_LOG(qlt->qlt_port_alias, "Unknown link speed " 1292 "reported by fw %x", mcp->from_fw[6]); 1293 } 1294 qlt->cur_topology = li->port_topology; 1295 fc_ret = FCT_SUCCESS; 1296 } 1297 qlt_free_mailbox_command(qlt, mcp); 1298 1299 if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) { 1300 mcp = qlt_alloc_mailbox_command(qlt, 64); 1301 mcp->to_fw[0] = 0x64; 1302 mcp->to_fw[1] = 0x7FE; 1303 mcp->to_fw[10] = 0; 1304 mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_10; 1305 fc_ret = qlt_mailbox_command(qlt, mcp); 1306 if (fc_ret != QLT_SUCCESS) { 1307 stmf_trace(qlt->qlt_port_alias, "Attempt to get port " 1308 "database for F_port failed, ret = %llx", fc_ret); 1309 } else { 1310 uint8_t *p; 1311 1312 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU); 1313 p = mcp->dbuf->db_sglist[0].seg_addr; 1314 bcopy(p + 0x18, li->port_rpwwn, 8); 1315 bcopy(p + 0x20, li->port_rnwwn, 8); 1316 } 1317 qlt_free_mailbox_command(qlt, mcp); 1318 } 1319 return (fc_ret); 1320 } 1321 1322 static int 1323 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp) 1324 { 1325 int instance; 1326 qlt_state_t *qlt; 1327 1328 if (otype != OTYP_CHR) { 1329 return (EINVAL); 1330 } 1331 1332 /* 1333 * Since this is for debugging only, only allow root to issue ioctl now 1334 */ 1335 if (drv_priv(credp)) { 1336 return (EPERM); 1337 } 1338 1339 instance = (int)getminor(*devp); 1340 qlt = ddi_get_soft_state(qlt_state, instance); 1341 if (qlt == NULL) { 1342 return (ENXIO); 1343 } 1344 1345 mutex_enter(&qlt->qlt_ioctl_lock); 1346 if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) { 1347 /* 1348 * It is already open for exclusive access. 1349 * So shut the door on this caller. 1350 */ 1351 mutex_exit(&qlt->qlt_ioctl_lock); 1352 return (EBUSY); 1353 } 1354 1355 if (flag & FEXCL) { 1356 if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) { 1357 /* 1358 * Exclusive operation not possible 1359 * as it is already opened 1360 */ 1361 mutex_exit(&qlt->qlt_ioctl_lock); 1362 return (EBUSY); 1363 } 1364 qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL; 1365 } 1366 qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN; 1367 mutex_exit(&qlt->qlt_ioctl_lock); 1368 1369 return (0); 1370 } 1371 1372 /* ARGSUSED */ 1373 static int 1374 qlt_close(dev_t dev, int flag, int otype, cred_t *credp) 1375 { 1376 int instance; 1377 qlt_state_t *qlt; 1378 1379 if (otype != OTYP_CHR) { 1380 return (EINVAL); 1381 } 1382 1383 instance = (int)getminor(dev); 1384 qlt = ddi_get_soft_state(qlt_state, instance); 1385 if (qlt == NULL) { 1386 return (ENXIO); 1387 } 1388 1389 mutex_enter(&qlt->qlt_ioctl_lock); 1390 if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) { 1391 mutex_exit(&qlt->qlt_ioctl_lock); 1392 return (ENODEV); 1393 } 1394 1395 /* 1396 * It looks there's one hole here, maybe there could several concurrent 1397 * shareed open session, but we never check this case. 1398 * But it will not hurt too much, disregard it now. 1399 */ 1400 qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK; 1401 mutex_exit(&qlt->qlt_ioctl_lock); 1402 1403 return (0); 1404 } 1405 1406 /* 1407 * All of these ioctls are unstable interfaces which are meant to be used 1408 * in a controlled lab env. No formal testing will be (or needs to be) done 1409 * for these ioctls. Specially note that running with an additional 1410 * uploaded firmware is not supported and is provided here for test 1411 * purposes only. 1412 */ 1413 /* ARGSUSED */ 1414 static int 1415 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 1416 cred_t *credp, int *rval) 1417 { 1418 qlt_state_t *qlt; 1419 int ret = 0; 1420 #ifdef _LITTLE_ENDIAN 1421 int i; 1422 #endif 1423 stmf_iocdata_t *iocd; 1424 void *ibuf = NULL; 1425 void *obuf = NULL; 1426 uint32_t *intp; 1427 qlt_fw_info_t *fwi; 1428 mbox_cmd_t *mcp; 1429 fct_status_t st; 1430 char info[80]; 1431 1432 if (drv_priv(credp) != 0) 1433 return (EPERM); 1434 1435 qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev)); 1436 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf); 1437 if (ret) 1438 return (ret); 1439 iocd->stmf_error = 0; 1440 1441 switch (cmd) { 1442 case QLT_IOCTL_FETCH_FWDUMP: 1443 if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) { 1444 ret = EINVAL; 1445 break; 1446 } 1447 mutex_enter(&qlt->qlt_ioctl_lock); 1448 if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) { 1449 mutex_exit(&qlt->qlt_ioctl_lock); 1450 ret = ENODATA; 1451 iocd->stmf_error = QLTIO_NO_DUMP; 1452 break; 1453 } 1454 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) { 1455 mutex_exit(&qlt->qlt_ioctl_lock); 1456 ret = EBUSY; 1457 iocd->stmf_error = QLTIO_DUMP_INPROGRESS; 1458 break; 1459 } 1460 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) { 1461 mutex_exit(&qlt->qlt_ioctl_lock); 1462 ret = EEXIST; 1463 iocd->stmf_error = QLTIO_ALREADY_FETCHED; 1464 break; 1465 } 1466 bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE); 1467 qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER; 1468 mutex_exit(&qlt->qlt_ioctl_lock); 1469 1470 break; 1471 1472 case QLT_IOCTL_TRIGGER_FWDUMP: 1473 if (qlt->qlt_state != FCT_STATE_ONLINE) { 1474 ret = EACCES; 1475 iocd->stmf_error = QLTIO_NOT_ONLINE; 1476 break; 1477 } 1478 (void) snprintf(info, 80, "qlt_ioctl: qlt-%p, " 1479 "user triggered FWDUMP with RFLAG_RESET", (void *)qlt); 1480 info[79] = 0; 1481 if (fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_USER_REQUEST | 1482 STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, 1483 info) != FCT_SUCCESS) { 1484 ret = EIO; 1485 } 1486 break; 1487 case QLT_IOCTL_UPLOAD_FW: 1488 if ((iocd->stmf_ibuf_size < 1024) || 1489 (iocd->stmf_ibuf_size & 3)) { 1490 ret = EINVAL; 1491 iocd->stmf_error = QLTIO_INVALID_FW_SIZE; 1492 break; 1493 } 1494 intp = (uint32_t *)ibuf; 1495 #ifdef _LITTLE_ENDIAN 1496 for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) { 1497 intp[i] = BSWAP_32(intp[i]); 1498 } 1499 #endif 1500 if (((intp[3] << 2) >= iocd->stmf_ibuf_size) || 1501 (((intp[intp[3] + 3] + intp[3]) << 2) != 1502 iocd->stmf_ibuf_size)) { 1503 ret = EINVAL; 1504 iocd->stmf_error = QLTIO_INVALID_FW_SIZE; 1505 break; 1506 } 1507 if ((qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) || 1508 (!qlt->qlt_25xx_chip && ((intp[8] & 3) == 0))) { 1509 ret = EACCES; 1510 iocd->stmf_error = QLTIO_INVALID_FW_TYPE; 1511 break; 1512 } 1513 1514 /* Everything looks ok, lets copy this firmware */ 1515 if (qlt->fw_code01) { 1516 kmem_free(qlt->fw_code01, (qlt->fw_length01 + 1517 qlt->fw_length02) << 2); 1518 qlt->fw_code01 = NULL; 1519 } else { 1520 atomic_add_32(&qlt_loaded_counter, 1); 1521 } 1522 qlt->fw_length01 = intp[3]; 1523 qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size, 1524 KM_SLEEP); 1525 bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size); 1526 qlt->fw_addr01 = intp[2]; 1527 qlt->fw_code02 = &qlt->fw_code01[intp[3]]; 1528 qlt->fw_addr02 = qlt->fw_code02[2]; 1529 qlt->fw_length02 = qlt->fw_code02[3]; 1530 break; 1531 1532 case QLT_IOCTL_CLEAR_FW: 1533 if (qlt->fw_code01) { 1534 kmem_free(qlt->fw_code01, (qlt->fw_length01 + 1535 qlt->fw_length02) << 2); 1536 qlt->fw_code01 = NULL; 1537 atomic_add_32(&qlt_loaded_counter, -1); 1538 } 1539 break; 1540 1541 case QLT_IOCTL_GET_FW_INFO: 1542 if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) { 1543 ret = EINVAL; 1544 break; 1545 } 1546 fwi = (qlt_fw_info_t *)obuf; 1547 if (qlt->qlt_stay_offline) { 1548 fwi->fwi_stay_offline = 1; 1549 } 1550 if (qlt->qlt_state == FCT_STATE_ONLINE) { 1551 fwi->fwi_port_active = 1; 1552 } 1553 fwi->fwi_active_major = qlt->fw_major; 1554 fwi->fwi_active_minor = qlt->fw_minor; 1555 fwi->fwi_active_subminor = qlt->fw_subminor; 1556 fwi->fwi_active_attr = qlt->fw_attr; 1557 if (qlt->fw_code01) { 1558 fwi->fwi_fw_uploaded = 1; 1559 fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4]; 1560 fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5]; 1561 fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6]; 1562 fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7]; 1563 } 1564 if (qlt->qlt_25xx_chip) { 1565 fwi->fwi_default_major = (uint16_t)fw2500_code01[4]; 1566 fwi->fwi_default_minor = (uint16_t)fw2500_code01[5]; 1567 fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6]; 1568 fwi->fwi_default_attr = (uint16_t)fw2500_code01[7]; 1569 } else { 1570 fwi->fwi_default_major = (uint16_t)fw2400_code01[4]; 1571 fwi->fwi_default_minor = (uint16_t)fw2400_code01[5]; 1572 fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6]; 1573 fwi->fwi_default_attr = (uint16_t)fw2400_code01[7]; 1574 } 1575 break; 1576 1577 case QLT_IOCTL_STAY_OFFLINE: 1578 if (!iocd->stmf_ibuf_size) { 1579 ret = EINVAL; 1580 break; 1581 } 1582 if (*((char *)ibuf)) { 1583 qlt->qlt_stay_offline = 1; 1584 } else { 1585 qlt->qlt_stay_offline = 0; 1586 } 1587 break; 1588 1589 case QLT_IOCTL_MBOX: 1590 if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) || 1591 (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) { 1592 ret = EINVAL; 1593 break; 1594 } 1595 mcp = qlt_alloc_mailbox_command(qlt, 0); 1596 if (mcp == NULL) { 1597 ret = ENOMEM; 1598 break; 1599 } 1600 bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t)); 1601 st = qlt_mailbox_command(qlt, mcp); 1602 bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t)); 1603 qlt_free_mailbox_command(qlt, mcp); 1604 if (st != QLT_SUCCESS) { 1605 if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED) 1606 st = QLT_SUCCESS; 1607 } 1608 if (st != QLT_SUCCESS) { 1609 ret = EIO; 1610 switch (st) { 1611 case QLT_MBOX_NOT_INITIALIZED: 1612 iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED; 1613 break; 1614 case QLT_MBOX_BUSY: 1615 iocd->stmf_error = QLTIO_CANT_GET_MBOXES; 1616 break; 1617 case QLT_MBOX_TIMEOUT: 1618 iocd->stmf_error = QLTIO_MBOX_TIMED_OUT; 1619 break; 1620 case QLT_MBOX_ABORTED: 1621 iocd->stmf_error = QLTIO_MBOX_ABORTED; 1622 break; 1623 } 1624 } 1625 break; 1626 1627 default: 1628 QLT_LOG(qlt->qlt_port_alias, "qlt_ioctl: ioctl-0x%02X", cmd); 1629 ret = ENOTTY; 1630 } 1631 1632 if (ret == 0) { 1633 ret = stmf_copyout_iocdata(data, mode, iocd, obuf); 1634 } else if (iocd->stmf_error) { 1635 (void) stmf_copyout_iocdata(data, mode, iocd, obuf); 1636 } 1637 if (obuf) { 1638 kmem_free(obuf, iocd->stmf_obuf_size); 1639 obuf = NULL; 1640 } 1641 if (ibuf) { 1642 kmem_free(ibuf, iocd->stmf_ibuf_size); 1643 ibuf = NULL; 1644 } 1645 kmem_free(iocd, sizeof (stmf_iocdata_t)); 1646 return (ret); 1647 } 1648 1649 static void 1650 qlt_ctl(struct fct_local_port *port, int cmd, void *arg) 1651 { 1652 stmf_change_status_t st; 1653 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg; 1654 qlt_state_t *qlt; 1655 1656 ASSERT((cmd == FCT_CMD_PORT_ONLINE) || 1657 (cmd == FCT_CMD_PORT_OFFLINE) || 1658 (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) || 1659 (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE)); 1660 1661 qlt = (qlt_state_t *)port->port_fca_private; 1662 st.st_completion_status = FCT_SUCCESS; 1663 st.st_additional_info = NULL; 1664 1665 switch (cmd) { 1666 case FCT_CMD_PORT_ONLINE: 1667 if (qlt->qlt_state == FCT_STATE_ONLINE) 1668 st.st_completion_status = STMF_ALREADY; 1669 else if (qlt->qlt_state != FCT_STATE_OFFLINE) 1670 st.st_completion_status = FCT_FAILURE; 1671 if (st.st_completion_status == FCT_SUCCESS) { 1672 qlt->qlt_state = FCT_STATE_ONLINING; 1673 qlt->qlt_state_not_acked = 1; 1674 st.st_completion_status = qlt_port_online(qlt); 1675 if (st.st_completion_status != STMF_SUCCESS) { 1676 qlt->qlt_state = FCT_STATE_OFFLINE; 1677 qlt->qlt_state_not_acked = 0; 1678 } else { 1679 qlt->qlt_state = FCT_STATE_ONLINE; 1680 } 1681 } 1682 fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st); 1683 qlt->qlt_change_state_flags = 0; 1684 break; 1685 1686 case FCT_CMD_PORT_OFFLINE: 1687 if (qlt->qlt_state == FCT_STATE_OFFLINE) { 1688 st.st_completion_status = STMF_ALREADY; 1689 } else if (qlt->qlt_state != FCT_STATE_ONLINE) { 1690 st.st_completion_status = FCT_FAILURE; 1691 } 1692 if (st.st_completion_status == FCT_SUCCESS) { 1693 qlt->qlt_state = FCT_STATE_OFFLINING; 1694 qlt->qlt_state_not_acked = 1; 1695 1696 if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) { 1697 (void) qlt_firmware_dump(port, ssci); 1698 } 1699 qlt->qlt_change_state_flags = ssci->st_rflags; 1700 st.st_completion_status = qlt_port_offline(qlt); 1701 if (st.st_completion_status != STMF_SUCCESS) { 1702 qlt->qlt_state = FCT_STATE_ONLINE; 1703 qlt->qlt_state_not_acked = 0; 1704 } else { 1705 qlt->qlt_state = FCT_STATE_OFFLINE; 1706 } 1707 } 1708 fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st); 1709 break; 1710 1711 case FCT_ACK_PORT_ONLINE_COMPLETE: 1712 qlt->qlt_state_not_acked = 0; 1713 break; 1714 1715 case FCT_ACK_PORT_OFFLINE_COMPLETE: 1716 qlt->qlt_state_not_acked = 0; 1717 if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) && 1718 (qlt->qlt_stay_offline == 0)) { 1719 if (fct_port_initialize(port, 1720 qlt->qlt_change_state_flags, 1721 "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE " 1722 "with RLFLAG_RESET") != FCT_SUCCESS) { 1723 cmn_err(CE_WARN, "qlt_ctl: " 1724 "fct_port_initialize failed, please use " 1725 "stmfstate to start the port-%s manualy", 1726 qlt->qlt_port_alias); 1727 } 1728 } 1729 break; 1730 } 1731 } 1732 1733 /* ARGSUSED */ 1734 static fct_status_t 1735 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx) 1736 { 1737 cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)"); 1738 return (FCT_FAILURE); 1739 } 1740 1741 /* 1742 * Return a pointer to n entries in the request queue. Assumes that 1743 * request queue lock is held. Does a very short busy wait if 1744 * less/zero entries are available. Retuns NULL if it still cannot 1745 * fullfill the request. 1746 * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK** 1747 */ 1748 caddr_t 1749 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n) 1750 { 1751 int try = 0; 1752 1753 while (qlt->req_available < n) { 1754 uint32_t val1, val2, val3; 1755 val1 = REG_RD32(qlt, REG_REQ_OUT_PTR); 1756 val2 = REG_RD32(qlt, REG_REQ_OUT_PTR); 1757 val3 = REG_RD32(qlt, REG_REQ_OUT_PTR); 1758 if ((val1 != val2) || (val2 != val3)) 1759 continue; 1760 1761 qlt->req_ndx_from_fw = val1; 1762 qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 - 1763 ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) & 1764 (REQUEST_QUEUE_ENTRIES - 1)); 1765 if (qlt->req_available < n) { 1766 if (try < 2) { 1767 drv_usecwait(100); 1768 try++; 1769 continue; 1770 } else { 1771 stmf_trace(qlt->qlt_port_alias, 1772 "Req Q is full"); 1773 return (NULL); 1774 } 1775 } 1776 break; 1777 } 1778 /* We dont change anything until the entries are sumitted */ 1779 return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]); 1780 } 1781 1782 /* 1783 * updates the req in ptr to fw. Assumes that req lock is held. 1784 */ 1785 void 1786 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n) 1787 { 1788 ASSERT(n >= 1); 1789 qlt->req_ndx_to_fw += n; 1790 qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1; 1791 qlt->req_available -= n; 1792 REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw); 1793 } 1794 1795 1796 /* 1797 * Return a pointer to n entries in the priority request queue. Assumes that 1798 * priority request queue lock is held. Does a very short busy wait if 1799 * less/zero entries are available. Retuns NULL if it still cannot 1800 * fullfill the request. 1801 * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK** 1802 */ 1803 caddr_t 1804 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n) 1805 { 1806 int try = 0; 1807 uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 - 1808 ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) & 1809 (PRIORITY_QUEUE_ENTRIES - 1)); 1810 1811 while (req_available < n) { 1812 uint32_t val1, val2, val3; 1813 val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR); 1814 val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR); 1815 val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR); 1816 if ((val1 != val2) || (val2 != val3)) 1817 continue; 1818 1819 qlt->preq_ndx_from_fw = val1; 1820 req_available = PRIORITY_QUEUE_ENTRIES - 1 - 1821 ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) & 1822 (PRIORITY_QUEUE_ENTRIES - 1)); 1823 if (req_available < n) { 1824 if (try < 2) { 1825 drv_usecwait(100); 1826 try++; 1827 continue; 1828 } else { 1829 return (NULL); 1830 } 1831 } 1832 break; 1833 } 1834 /* We dont change anything until the entries are sumitted */ 1835 return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]); 1836 } 1837 1838 /* 1839 * updates the req in ptr to fw. Assumes that req lock is held. 1840 */ 1841 void 1842 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n) 1843 { 1844 ASSERT(n >= 1); 1845 qlt->preq_ndx_to_fw += n; 1846 qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1; 1847 REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw); 1848 } 1849 1850 /* 1851 * - Should not be called from Interrupt. 1852 * - A very hardware specific function. Does not touch driver state. 1853 * - Assumes that interrupts are disabled or not there. 1854 * - Expects that the caller makes sure that all activity has stopped 1855 * and its ok now to go ahead and reset the chip. Also the caller 1856 * takes care of post reset damage control. 1857 * - called by initialize adapter() and dump_fw(for reset only). 1858 * - During attach() nothing much is happening and during initialize_adapter() 1859 * the function (caller) does all the housekeeping so that this function 1860 * can execute in peace. 1861 * - Returns 0 on success. 1862 */ 1863 static fct_status_t 1864 qlt_reset_chip_and_download_fw(qlt_state_t *qlt, int reset_only) 1865 { 1866 int cntr; 1867 uint32_t start_addr; 1868 fct_status_t ret; 1869 1870 /* XXX: Switch off LEDs */ 1871 1872 /* Disable Interrupts */ 1873 REG_WR32(qlt, REG_INTR_CTRL, 0); 1874 (void) REG_RD32(qlt, REG_INTR_CTRL); 1875 /* Stop DMA */ 1876 REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL); 1877 1878 /* Wait for DMA to be stopped */ 1879 cntr = 0; 1880 while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) { 1881 delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */ 1882 cntr++; 1883 /* 3 sec should be more than enough */ 1884 if (cntr == 300) 1885 return (QLT_DMA_STUCK); 1886 } 1887 1888 /* Reset the Chip */ 1889 REG_WR32(qlt, REG_CTRL_STATUS, 1890 DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET); 1891 1892 qlt->qlt_link_up = 0; 1893 1894 drv_usecwait(100); 1895 1896 /* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */ 1897 cntr = 0; 1898 while (REG_RD16(qlt, REG_MBOX(0)) != 0) { 1899 delay(drv_usectohz(10000)); 1900 cntr++; 1901 /* 3 sec should be more than enough */ 1902 if (cntr == 300) 1903 return (QLT_ROM_STUCK); 1904 } 1905 /* Disable Interrupts (Probably not needed) */ 1906 REG_WR32(qlt, REG_INTR_CTRL, 0); 1907 if (reset_only) 1908 return (QLT_SUCCESS); 1909 1910 /* Load the two segments */ 1911 if (qlt->fw_code01 != NULL) { 1912 ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01, 1913 qlt->fw_addr01); 1914 if (ret == QLT_SUCCESS) { 1915 ret = qlt_load_risc_ram(qlt, qlt->fw_code02, 1916 qlt->fw_length02, qlt->fw_addr02); 1917 } 1918 start_addr = qlt->fw_addr01; 1919 } else if (qlt->qlt_25xx_chip) { 1920 ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01, 1921 fw2500_addr01); 1922 if (ret == QLT_SUCCESS) { 1923 ret = qlt_load_risc_ram(qlt, fw2500_code02, 1924 fw2500_length02, fw2500_addr02); 1925 } 1926 start_addr = fw2500_addr01; 1927 } else { 1928 ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01, 1929 fw2400_addr01); 1930 if (ret == QLT_SUCCESS) { 1931 ret = qlt_load_risc_ram(qlt, fw2400_code02, 1932 fw2400_length02, fw2400_addr02); 1933 } 1934 start_addr = fw2400_addr01; 1935 } 1936 if (ret != QLT_SUCCESS) 1937 return (ret); 1938 1939 /* Verify Checksum */ 1940 REG_WR16(qlt, REG_MBOX(0), 7); 1941 REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff); 1942 REG_WR16(qlt, REG_MBOX(2), start_addr & 0xffff); 1943 ret = qlt_raw_mailbox_command(qlt); 1944 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 1945 if (ret != QLT_SUCCESS) 1946 return (ret); 1947 1948 /* Execute firmware */ 1949 REG_WR16(qlt, REG_MBOX(0), 2); 1950 REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff); 1951 REG_WR16(qlt, REG_MBOX(2), start_addr & 0xffff); 1952 REG_WR16(qlt, REG_MBOX(3), 0); 1953 REG_WR16(qlt, REG_MBOX(4), 1); /* 25xx enable additional credits */ 1954 ret = qlt_raw_mailbox_command(qlt); 1955 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 1956 if (ret != QLT_SUCCESS) 1957 return (ret); 1958 1959 /* Get revisions (About Firmware) */ 1960 REG_WR16(qlt, REG_MBOX(0), 8); 1961 ret = qlt_raw_mailbox_command(qlt); 1962 qlt->fw_major = REG_RD16(qlt, REG_MBOX(1)); 1963 qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2)); 1964 qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3)); 1965 qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4)); 1966 qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5)); 1967 qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6)); 1968 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 1969 if (ret != QLT_SUCCESS) 1970 return (ret); 1971 1972 return (QLT_SUCCESS); 1973 } 1974 1975 /* 1976 * Used only from qlt_reset_chip_and_download_fw(). 1977 */ 1978 static fct_status_t 1979 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr, 1980 uint32_t word_count, uint32_t risc_addr) 1981 { 1982 uint32_t words_sent = 0; 1983 uint32_t words_being_sent; 1984 uint32_t *cur_host_addr; 1985 uint32_t cur_risc_addr; 1986 uint64_t da; 1987 fct_status_t ret; 1988 1989 while (words_sent < word_count) { 1990 cur_host_addr = &(host_addr[words_sent]); 1991 cur_risc_addr = risc_addr + (words_sent << 2); 1992 words_being_sent = min(word_count - words_sent, 1993 TOTAL_DMA_MEM_SIZE >> 2); 1994 ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr, 1995 (uint32_t *)qlt->queue_mem_ptr, words_being_sent, 1996 DDI_DEV_AUTOINCR); 1997 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0, 1998 words_being_sent << 2, DDI_DMA_SYNC_FORDEV); 1999 da = qlt->queue_mem_cookie.dmac_laddress; 2000 REG_WR16(qlt, REG_MBOX(0), 0x0B); 2001 REG_WR16(qlt, REG_MBOX(1), risc_addr & 0xffff); 2002 REG_WR16(qlt, REG_MBOX(8), ((cur_risc_addr >> 16) & 0xffff)); 2003 REG_WR16(qlt, REG_MBOX(3), da & 0xffff); 2004 da >>= 16; 2005 REG_WR16(qlt, REG_MBOX(2), da & 0xffff); 2006 da >>= 16; 2007 REG_WR16(qlt, REG_MBOX(7), da & 0xffff); 2008 da >>= 16; 2009 REG_WR16(qlt, REG_MBOX(6), da & 0xffff); 2010 REG_WR16(qlt, REG_MBOX(5), words_being_sent & 0xffff); 2011 REG_WR16(qlt, REG_MBOX(4), (words_being_sent >> 16) & 0xffff); 2012 ret = qlt_raw_mailbox_command(qlt); 2013 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 2014 if (ret != QLT_SUCCESS) 2015 return (ret); 2016 words_sent += words_being_sent; 2017 } 2018 return (QLT_SUCCESS); 2019 } 2020 2021 /* 2022 * Not used during normal operation. Only during driver init. 2023 * Assumes that interrupts are disabled and mailboxes are loaded. 2024 * Just triggers the mailbox command an waits for the completion. 2025 * Also expects that There is nothing else going on and we will only 2026 * get back a mailbox completion from firmware. 2027 * ---DOES NOT CLEAR INTERRUPT--- 2028 * Used only from the code path originating from 2029 * qlt_reset_chip_and_download_fw() 2030 */ 2031 static fct_status_t 2032 qlt_raw_mailbox_command(qlt_state_t *qlt) 2033 { 2034 int cntr = 0; 2035 uint32_t status; 2036 2037 REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR); 2038 while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_INTR_REQUEST) == 0) { 2039 cntr++; 2040 if (cntr == 100) 2041 return (QLT_MAILBOX_STUCK); 2042 delay(drv_usectohz(10000)); 2043 } 2044 status = (REG_RD32(qlt, REG_RISC_STATUS) & 0xff); 2045 if ((status == 1) || (status == 2) || 2046 (status == 0x10) || (status == 0x11)) { 2047 uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0)); 2048 if (mbox0 == 0x4000) 2049 return (QLT_SUCCESS); 2050 else 2051 return (QLT_MBOX_FAILED | mbox0); 2052 } 2053 /* This is unexpected, dump a message */ 2054 cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx", 2055 ddi_get_instance(qlt->dip), (unsigned long long)status); 2056 return (QLT_UNEXPECTED_RESPONSE); 2057 } 2058 2059 static mbox_cmd_t * 2060 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size) 2061 { 2062 mbox_cmd_t *mcp; 2063 2064 mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP); 2065 if (dma_size) { 2066 qlt_dmem_bctl_t *bctl; 2067 uint64_t da; 2068 2069 mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0); 2070 if (mcp->dbuf == NULL) { 2071 kmem_free(mcp, sizeof (*mcp)); 2072 return (NULL); 2073 } 2074 mcp->dbuf->db_data_size = dma_size; 2075 ASSERT(mcp->dbuf->db_sglist_length == 1); 2076 2077 bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private; 2078 da = bctl->bctl_dev_addr; 2079 /* This is the most common initialization of dma ptrs */ 2080 mcp->to_fw[3] = da & 0xffff; 2081 da >>= 16; 2082 mcp->to_fw[2] = da & 0xffff; 2083 da >>= 16; 2084 mcp->to_fw[7] = da & 0xffff; 2085 da >>= 16; 2086 mcp->to_fw[6] = da & 0xffff; 2087 mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6; 2088 } 2089 mcp->to_fw_mask |= BIT_0; 2090 mcp->from_fw_mask |= BIT_0; 2091 return (mcp); 2092 } 2093 2094 void 2095 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp) 2096 { 2097 if (mcp->dbuf) 2098 qlt_i_dmem_free(qlt, mcp->dbuf); 2099 kmem_free(mcp, sizeof (*mcp)); 2100 } 2101 2102 /* 2103 * This can sleep. Should never be called from interrupt context. 2104 */ 2105 static fct_status_t 2106 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp) 2107 { 2108 int retries; 2109 int i; 2110 char info[80]; 2111 2112 if (curthread->t_flag & T_INTR_THREAD) { 2113 ASSERT(0); 2114 return (QLT_MBOX_FAILED); 2115 } 2116 2117 mutex_enter(&qlt->mbox_lock); 2118 /* See if mailboxes are still uninitialized */ 2119 if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) { 2120 mutex_exit(&qlt->mbox_lock); 2121 return (QLT_MBOX_NOT_INITIALIZED); 2122 } 2123 2124 /* Wait to grab the mailboxes */ 2125 for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY; 2126 retries++) { 2127 cv_wait(&qlt->mbox_cv, &qlt->mbox_lock); 2128 if ((retries > 5) || 2129 (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) { 2130 mutex_exit(&qlt->mbox_lock); 2131 return (QLT_MBOX_BUSY); 2132 } 2133 } 2134 /* Make sure we always ask for mailbox 0 */ 2135 mcp->from_fw_mask |= BIT_0; 2136 2137 /* Load mailboxes, set state and generate RISC interrupt */ 2138 qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING; 2139 qlt->mcp = mcp; 2140 for (i = 0; i < MAX_MBOXES; i++) { 2141 if (mcp->to_fw_mask & ((uint32_t)1 << i)) 2142 REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]); 2143 } 2144 REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR); 2145 2146 qlt_mbox_wait_loop:; 2147 /* Wait for mailbox command completion */ 2148 if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt() 2149 + drv_usectohz(MBOX_TIMEOUT)) < 0) { 2150 (void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, " 2151 "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]); 2152 info[79] = 0; 2153 qlt->mcp = NULL; 2154 qlt->mbox_io_state = MBOX_STATE_UNKNOWN; 2155 mutex_exit(&qlt->mbox_lock); 2156 2157 /* 2158 * XXX Throw HBA fatal error event 2159 */ 2160 (void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR | 2161 STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info); 2162 return (QLT_MBOX_TIMEOUT); 2163 } 2164 if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING) 2165 goto qlt_mbox_wait_loop; 2166 2167 qlt->mcp = NULL; 2168 2169 /* Make sure its a completion */ 2170 if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) { 2171 ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN); 2172 mutex_exit(&qlt->mbox_lock); 2173 return (QLT_MBOX_ABORTED); 2174 } 2175 2176 /* MBox command completed. Clear state, retuen based on mbox 0 */ 2177 /* Mailboxes are already loaded by interrupt routine */ 2178 qlt->mbox_io_state = MBOX_STATE_READY; 2179 mutex_exit(&qlt->mbox_lock); 2180 if (mcp->from_fw[0] != 0x4000) 2181 return (QLT_MBOX_FAILED | mcp->from_fw[0]); 2182 2183 return (QLT_SUCCESS); 2184 } 2185 2186 /* 2187 * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE** 2188 */ 2189 /* ARGSUSED */ 2190 static uint_t 2191 qlt_isr(caddr_t arg, caddr_t arg2) 2192 { 2193 qlt_state_t *qlt = (qlt_state_t *)arg; 2194 int instance; 2195 uint32_t risc_status, intr_type; 2196 int i; 2197 int intr_loop_count; 2198 char info[80]; 2199 2200 risc_status = REG_RD32(qlt, REG_RISC_STATUS); 2201 if (!mutex_tryenter(&qlt->intr_lock)) { 2202 /* 2203 * Normally we will always get this lock. If tryenter is 2204 * failing then it means that driver is trying to do 2205 * some cleanup and is masking the intr but some intr 2206 * has sneaked in between. See if our device has generated 2207 * this intr. If so then wait a bit and return claimed. 2208 * If not then return claimed if this is the 1st instance 2209 * of a interrupt after driver has grabbed the lock. 2210 */ 2211 if (risc_status & BIT_15) { 2212 drv_usecwait(10); 2213 return (DDI_INTR_CLAIMED); 2214 } else if (qlt->intr_sneak_counter) { 2215 qlt->intr_sneak_counter--; 2216 return (DDI_INTR_CLAIMED); 2217 } else { 2218 return (DDI_INTR_UNCLAIMED); 2219 } 2220 } 2221 if (((risc_status & BIT_15) == 0) || 2222 (qlt->qlt_intr_enabled == 0)) { 2223 /* 2224 * This might be a pure coincedence that we are operating 2225 * in a interrupt disabled mode and another device 2226 * sharing the interrupt line has generated an interrupt 2227 * while an interrupt from our device might be pending. Just 2228 * ignore it and let the code handling the interrupt 2229 * disabled mode handle it. 2230 */ 2231 mutex_exit(&qlt->intr_lock); 2232 return (DDI_INTR_UNCLAIMED); 2233 } 2234 2235 /* 2236 * XXX take care for MSI case. disable intrs 2237 * Its gonna be complicated becasue of the max iterations. 2238 * as hba will have posted the intr which did not go on PCI 2239 * but we did not service it either becasue of max iterations. 2240 * Maybe offload the intr on a different thread. 2241 */ 2242 instance = ddi_get_instance(qlt->dip); 2243 intr_loop_count = 0; 2244 2245 REG_WR32(qlt, REG_INTR_CTRL, 0); 2246 2247 intr_again:; 2248 /* First check for high performance path */ 2249 intr_type = risc_status & 0xff; 2250 if (intr_type == 0x1C) { 2251 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 2252 qlt->atio_ndx_from_fw = risc_status >> 16; 2253 qlt_handle_atio_queue_update(qlt); 2254 } else if (intr_type == 0x13) { 2255 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 2256 qlt->resp_ndx_from_fw = risc_status >> 16; 2257 qlt_handle_resp_queue_update(qlt); 2258 /* XXX what about priority queue */ 2259 } else if (intr_type == 0x1D) { 2260 qlt->atio_ndx_from_fw = REG_RD32(qlt, REG_ATIO_IN_PTR); 2261 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 2262 qlt->resp_ndx_from_fw = risc_status >> 16; 2263 qlt_handle_atio_queue_update(qlt); 2264 qlt_handle_resp_queue_update(qlt); 2265 } else if (intr_type == 0x12) { 2266 uint16_t code = risc_status >> 16; 2267 uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1)); 2268 uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2)); 2269 uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5)); 2270 uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6)); 2271 2272 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 2273 stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x," 2274 " mb5=%x, mb6=%x", code, mbox1, mbox2, mbox5, mbox6); 2275 cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x," 2276 " mb5=%x, mb6=%x", instance, code, mbox1, mbox2, mbox5, 2277 mbox6); 2278 2279 if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) { 2280 if (qlt->qlt_link_up) { 2281 fct_handle_event(qlt->qlt_port, 2282 FCT_EVENT_LINK_RESET, 0, 0); 2283 } 2284 } else if (code == 0x8012) { 2285 qlt->qlt_link_up = 0; 2286 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN, 2287 0, 0); 2288 } else if (code == 0x8011) { 2289 switch (mbox1) { 2290 case 0: qlt->link_speed = PORT_SPEED_1G; 2291 break; 2292 case 1: qlt->link_speed = PORT_SPEED_2G; 2293 break; 2294 case 3: qlt->link_speed = PORT_SPEED_4G; 2295 break; 2296 case 4: qlt->link_speed = PORT_SPEED_8G; 2297 break; 2298 default: 2299 qlt->link_speed = PORT_SPEED_UNKNOWN; 2300 } 2301 qlt->qlt_link_up = 1; 2302 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP, 2303 0, 0); 2304 } else if (code == 0x8002) { 2305 (void) snprintf(info, 80, 2306 "Got 8002, mb1=%x mb2=%x mb5=%x mb6=%x", 2307 mbox1, mbox2, mbox5, mbox6); 2308 info[79] = 0; 2309 (void) fct_port_shutdown(qlt->qlt_port, 2310 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET | 2311 STMF_RFLAG_COLLECT_DEBUG_DUMP, info); 2312 } 2313 } else if ((intr_type == 0x10) || (intr_type == 0x11)) { 2314 /* Handle mailbox completion */ 2315 mutex_enter(&qlt->mbox_lock); 2316 if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) { 2317 cmn_err(CE_WARN, "qlt(%d): mailbox completion received" 2318 " when driver wasn't waiting for it %d", 2319 instance, qlt->mbox_io_state); 2320 } else { 2321 for (i = 0; i < MAX_MBOXES; i++) { 2322 if (qlt->mcp->from_fw_mask & 2323 (((uint32_t)1) << i)) { 2324 qlt->mcp->from_fw[i] = 2325 REG_RD16(qlt, REG_MBOX(i)); 2326 } 2327 } 2328 qlt->mbox_io_state = MBOX_STATE_CMD_DONE; 2329 } 2330 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 2331 cv_broadcast(&qlt->mbox_cv); 2332 mutex_exit(&qlt->mbox_lock); 2333 } else { 2334 cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x", 2335 instance, intr_type); 2336 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR); 2337 } 2338 2339 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting */ 2340 risc_status = REG_RD32(qlt, REG_RISC_STATUS); 2341 if ((risc_status & BIT_15) && 2342 (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) { 2343 goto intr_again; 2344 } 2345 2346 REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR); 2347 2348 mutex_exit(&qlt->intr_lock); 2349 return (DDI_INTR_CLAIMED); 2350 } 2351 2352 /* **************** NVRAM Functions ********************** */ 2353 2354 fct_status_t 2355 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp) 2356 { 2357 uint32_t timer; 2358 2359 /* Clear access error flag */ 2360 REG_WR32(qlt, REG_CTRL_STATUS, 2361 REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR); 2362 2363 REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31); 2364 2365 /* Wait for READ cycle to complete. */ 2366 for (timer = 3000; timer; timer--) { 2367 if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) { 2368 break; 2369 } 2370 drv_usecwait(10); 2371 } 2372 if (timer == 0) { 2373 return (QLT_FLASH_TIMEOUT); 2374 } else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) { 2375 return (QLT_FLASH_ACCESS_ERROR); 2376 } 2377 2378 *bp = REG_RD32(qlt, REG_FLASH_DATA); 2379 2380 return (QLT_SUCCESS); 2381 } 2382 2383 fct_status_t 2384 qlt_read_nvram(qlt_state_t *qlt) 2385 { 2386 uint32_t index, addr, chksum; 2387 uint32_t val, *ptr; 2388 fct_status_t ret; 2389 qlt_nvram_t *nv; 2390 uint64_t empty_node_name = 0; 2391 2392 if (qlt->qlt_25xx_chip) { 2393 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ? 2394 QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR; 2395 } else { 2396 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ? 2397 NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR; 2398 } 2399 mutex_enter(&qlt_global_lock); 2400 2401 /* Pause RISC. */ 2402 REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_RISC_PAUSE); 2403 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */ 2404 2405 /* Get NVRAM data and calculate checksum. */ 2406 ptr = (uint32_t *)qlt->nvram; 2407 chksum = 0; 2408 for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) { 2409 ret = qlt_read_flash_word(qlt, addr++, &val); 2410 if (ret != QLT_SUCCESS) { 2411 mutex_exit(&qlt_global_lock); 2412 return (ret); 2413 } 2414 chksum += val; 2415 *ptr = LE_32(val); 2416 ptr++; 2417 } 2418 2419 /* Release RISC Pause */ 2420 REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_PAUSE); 2421 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */ 2422 2423 mutex_exit(&qlt_global_lock); 2424 2425 /* Sanity check NVRAM Data */ 2426 nv = qlt->nvram; 2427 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 2428 nv->id[2] != 'P' || nv->id[3] != ' ' || 2429 (nv->nvram_version[0] | nv->nvram_version[1]) == 0) { 2430 return (QLT_BAD_NVRAM_DATA); 2431 } 2432 2433 /* If node name is zero, hand craft it from port name */ 2434 if (bcmp(nv->node_name, &empty_node_name, 8) == 0) { 2435 bcopy(nv->port_name, nv->node_name, 8); 2436 nv->node_name[0] = nv->node_name[0] & ~BIT_0; 2437 nv->port_name[0] = nv->node_name[0] | BIT_0; 2438 } 2439 2440 return (QLT_SUCCESS); 2441 } 2442 2443 uint32_t 2444 qlt_sync_atio_queue(qlt_state_t *qlt) 2445 { 2446 uint32_t total_ent; 2447 2448 if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) { 2449 total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw; 2450 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET 2451 + (qlt->atio_ndx_to_fw << 6), total_ent << 6, 2452 DDI_DMA_SYNC_FORCPU); 2453 } else { 2454 total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw + 2455 qlt->atio_ndx_from_fw; 2456 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET 2457 + (qlt->atio_ndx_to_fw << 6), (ATIO_QUEUE_ENTRIES - 2458 qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU); 2459 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, 2460 ATIO_QUEUE_OFFSET, 2461 qlt->atio_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU); 2462 } 2463 return (total_ent); 2464 } 2465 2466 void 2467 qlt_handle_atio_queue_update(qlt_state_t *qlt) 2468 { 2469 uint32_t total_ent; 2470 2471 if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw) 2472 return; 2473 2474 total_ent = qlt_sync_atio_queue(qlt); 2475 2476 do { 2477 uint8_t *atio = (uint8_t *)&qlt->atio_ptr[ 2478 qlt->atio_ndx_to_fw << 6]; 2479 uint32_t ent_cnt; 2480 2481 ent_cnt = (uint32_t)(atio[1]); 2482 if (ent_cnt > total_ent) { 2483 break; 2484 } 2485 switch ((uint8_t)(atio[0])) { 2486 case 0x0d: /* INOT */ 2487 qlt_handle_inot(qlt, atio); 2488 break; 2489 case 0x06: /* ATIO */ 2490 qlt_handle_atio(qlt, atio); 2491 break; 2492 default: 2493 cmn_err(CE_WARN, "qlt_handle_atio_queue_update: " 2494 "atio[0] is %x, qlt-%p", atio[0], (void *)qlt); 2495 break; 2496 } 2497 qlt->atio_ndx_to_fw = (qlt->atio_ndx_to_fw + ent_cnt) & 2498 (ATIO_QUEUE_ENTRIES - 1); 2499 total_ent -= ent_cnt; 2500 } while (total_ent > 0); 2501 REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw); 2502 } 2503 2504 uint32_t 2505 qlt_sync_resp_queue(qlt_state_t *qlt) 2506 { 2507 uint32_t total_ent; 2508 2509 if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) { 2510 total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw; 2511 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, 2512 RESPONSE_QUEUE_OFFSET 2513 + (qlt->resp_ndx_to_fw << 6), total_ent << 6, 2514 DDI_DMA_SYNC_FORCPU); 2515 } else { 2516 total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw + 2517 qlt->resp_ndx_from_fw; 2518 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, 2519 RESPONSE_QUEUE_OFFSET 2520 + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES - 2521 qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU); 2522 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, 2523 RESPONSE_QUEUE_OFFSET, 2524 qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU); 2525 } 2526 return (total_ent); 2527 } 2528 2529 void 2530 qlt_handle_resp_queue_update(qlt_state_t *qlt) 2531 { 2532 uint32_t total_ent; 2533 uint8_t c; 2534 2535 if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw) 2536 return; 2537 2538 total_ent = qlt_sync_resp_queue(qlt); 2539 2540 do { 2541 caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6]; 2542 uint32_t ent_cnt; 2543 2544 ent_cnt = (uint32_t)(resp[1]); 2545 if (ent_cnt > total_ent) { 2546 break; 2547 } 2548 switch ((uint8_t)(resp[0])) { 2549 case 0x12: /* CTIO completion */ 2550 qlt_handle_ctio_completion(qlt, (uint8_t *)resp); 2551 break; 2552 case 0x0e: /* NACK */ 2553 /* Do Nothing */ 2554 break; 2555 case 0x29: /* CT PassThrough */ 2556 qlt_handle_ct_completion(qlt, (uint8_t *)resp); 2557 break; 2558 case 0x33: /* Abort IO IOCB completion */ 2559 qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp); 2560 break; 2561 case 0x51: /* PUREX */ 2562 qlt_handle_purex(qlt, (uint8_t *)resp); 2563 break; 2564 case 0x52: 2565 qlt_handle_dereg_completion(qlt, (uint8_t *)resp); 2566 break; 2567 case 0x53: /* ELS passthrough */ 2568 c = ((uint8_t)resp[0x1f]) >> 5; 2569 if (c == 0) { 2570 qlt_handle_sol_els_completion(qlt, 2571 (uint8_t *)resp); 2572 } else if (c == 3) { 2573 qlt_handle_unsol_els_abort_completion(qlt, 2574 (uint8_t *)resp); 2575 } else { 2576 qlt_handle_unsol_els_completion(qlt, 2577 (uint8_t *)resp); 2578 } 2579 break; 2580 case 0x54: /* ABTS received */ 2581 qlt_handle_rcvd_abts(qlt, (uint8_t *)resp); 2582 break; 2583 case 0x55: /* ABTS completion */ 2584 qlt_handle_abts_completion(qlt, (uint8_t *)resp); 2585 break; 2586 } 2587 qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) & 2588 (RESPONSE_QUEUE_ENTRIES - 1); 2589 total_ent -= ent_cnt; 2590 } while (total_ent > 0); 2591 REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw); 2592 } 2593 2594 fct_status_t 2595 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle, 2596 uint16_t *ret_handle) 2597 { 2598 fct_status_t ret; 2599 mbox_cmd_t *mcp; 2600 uint16_t n; 2601 uint16_t h; 2602 uint32_t ent_id; 2603 uint8_t *p; 2604 int found = 0; 2605 2606 mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8); 2607 if (mcp == NULL) { 2608 return (STMF_ALLOC_FAILURE); 2609 } 2610 mcp->to_fw[0] = 0x7C; /* GET ID LIST */ 2611 mcp->to_fw[8] = 2048 * 8; 2612 mcp->to_fw_mask |= BIT_8; 2613 mcp->from_fw_mask |= BIT_1 | BIT_2; 2614 2615 ret = qlt_mailbox_command(qlt, mcp); 2616 if (ret != QLT_SUCCESS) { 2617 cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, " 2618 "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0], 2619 mcp->from_fw[1], mcp->from_fw[2]); 2620 qlt_free_mailbox_command(qlt, mcp); 2621 return (ret); 2622 } 2623 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU); 2624 p = mcp->dbuf->db_sglist[0].seg_addr; 2625 for (n = 0; n < mcp->from_fw[1]; n++) { 2626 ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF; 2627 h = (uint16_t)p[4] | (((uint16_t)p[5]) << 8); 2628 if (ent_id == id) { 2629 found = 1; 2630 *ret_handle = h; 2631 if ((cmd_handle != FCT_HANDLE_NONE) && 2632 (cmd_handle != h)) { 2633 cmn_err(CE_WARN, "login for portid %x came in " 2634 "with handle %x, while the portid was " 2635 "already using a different handle %x", 2636 id, cmd_handle, h); 2637 qlt_free_mailbox_command(qlt, mcp); 2638 return (QLT_FAILURE); 2639 } 2640 break; 2641 } 2642 if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) { 2643 cmn_err(CE_WARN, "login for portid %x came in with " 2644 "handle %x, while the handle was already in use " 2645 "for portid %x", id, cmd_handle, ent_id); 2646 qlt_free_mailbox_command(qlt, mcp); 2647 return (QLT_FAILURE); 2648 } 2649 p += 8; 2650 } 2651 if (!found) { 2652 *ret_handle = cmd_handle; 2653 } 2654 qlt_free_mailbox_command(qlt, mcp); 2655 return (FCT_SUCCESS); 2656 } 2657 2658 /* ARGSUSED */ 2659 fct_status_t 2660 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp, 2661 fct_cmd_t *login) 2662 { 2663 uint8_t *p; 2664 2665 p = ((fct_els_t *)login->cmd_specific)->els_req_payload; 2666 p[0] = ELS_OP_PLOGI; 2667 *((uint16_t *)(&p[4])) = 0x2020; 2668 p[7] = 3; 2669 p[8] = 0x88; 2670 p[10] = 8; 2671 p[13] = 0xff; p[15] = 0x1f; 2672 p[18] = 7; p[19] = 0xd0; 2673 2674 bcopy(port->port_pwwn, p + 20, 8); 2675 bcopy(port->port_nwwn, p + 28, 8); 2676 2677 p[68] = 0x80; 2678 p[74] = 8; 2679 p[77] = 0xff; 2680 p[81] = 1; 2681 2682 return (FCT_SUCCESS); 2683 } 2684 2685 /* ARGSUSED */ 2686 fct_status_t 2687 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp, 2688 fct_cmd_t *login) 2689 { 2690 return (FCT_SUCCESS); 2691 } 2692 2693 fct_status_t 2694 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp, 2695 fct_cmd_t *login) 2696 { 2697 uint16_t h; 2698 fct_status_t ret; 2699 2700 switch (rp->rp_id) { 2701 case 0xFFFFFC: h = 0x7FC; break; 2702 case 0xFFFFFD: h = 0x7FD; break; 2703 case 0xFFFFFE: h = 0x7FE; break; 2704 case 0xFFFFFF: h = 0x7FF; break; 2705 default: 2706 ret = qlt_portid_to_handle( 2707 (qlt_state_t *)port->port_fca_private, rp->rp_id, 2708 login->cmd_rp_handle, &h); 2709 if (ret != FCT_SUCCESS) 2710 return (ret); 2711 } 2712 2713 if (login->cmd_type == FCT_CMD_SOL_ELS) { 2714 ret = qlt_fill_plogi_req(port, rp, login); 2715 } else { 2716 ret = qlt_fill_plogi_resp(port, rp, login); 2717 } 2718 2719 if (ret != FCT_SUCCESS) 2720 return (ret); 2721 2722 if (h == FCT_HANDLE_NONE) 2723 return (FCT_SUCCESS); 2724 2725 if (rp->rp_handle == FCT_HANDLE_NONE) { 2726 rp->rp_handle = h; 2727 return (FCT_SUCCESS); 2728 } 2729 2730 if (rp->rp_handle == h) 2731 return (FCT_SUCCESS); 2732 2733 return (FCT_FAILURE); 2734 } 2735 /* invoked in single thread */ 2736 fct_status_t 2737 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp) 2738 { 2739 uint8_t *req; 2740 qlt_state_t *qlt; 2741 clock_t dereg_req_timer; 2742 fct_status_t ret; 2743 2744 qlt = (qlt_state_t *)port->port_fca_private; 2745 2746 if ((qlt->qlt_state == FCT_STATE_OFFLINE) || 2747 (qlt->qlt_state == FCT_STATE_OFFLINING)) 2748 return (FCT_SUCCESS); 2749 ASSERT(qlt->rp_id_in_dereg == 0); 2750 2751 mutex_enter(&qlt->preq_lock); 2752 req = (uint8_t *)qlt_get_preq_entries(qlt, 1); 2753 if (req == NULL) { 2754 mutex_exit(&qlt->preq_lock); 2755 return (FCT_BUSY); 2756 } 2757 bzero(req, IOCB_SIZE); 2758 req[0] = 0x52; req[1] = 1; 2759 /* QMEM_WR32(qlt, (&req[4]), 0xffffffff); */ 2760 QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle); 2761 QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */ 2762 QMEM_WR32(qlt, (&req[0x10]), rp->rp_id); 2763 qlt->rp_id_in_dereg = rp->rp_id; 2764 qlt_submit_preq_entries(qlt, 1); 2765 2766 dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT); 2767 if (cv_timedwait(&qlt->rp_dereg_cv, 2768 &qlt->preq_lock, dereg_req_timer) > 0) { 2769 ret = qlt->rp_dereg_status; 2770 } else { 2771 ret = FCT_BUSY; 2772 } 2773 qlt->rp_dereg_status = 0; 2774 qlt->rp_id_in_dereg = 0; 2775 mutex_exit(&qlt->preq_lock); 2776 return (ret); 2777 } 2778 2779 /* 2780 * Pass received ELS up to framework. 2781 */ 2782 static void 2783 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp) 2784 { 2785 fct_cmd_t *cmd; 2786 fct_els_t *els; 2787 qlt_cmd_t *qcmd; 2788 uint32_t payload_size; 2789 uint32_t remote_portid; 2790 uint8_t *pldptr, *bndrptr; 2791 int i, off; 2792 uint16_t iocb_flags; 2793 char info[160]; 2794 2795 remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) | 2796 ((uint32_t)(resp[0x1A])) << 16; 2797 iocb_flags = QMEM_RD16(qlt, (&resp[8])); 2798 if (iocb_flags & BIT_15) { 2799 payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24; 2800 } else { 2801 payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24; 2802 } 2803 2804 if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) { 2805 cmn_err(CE_WARN, "handle_purex: payload is too large"); 2806 goto cmd_null; 2807 } 2808 2809 cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS, payload_size + 2810 GET_STRUCT_SIZE(qlt_cmd_t), 0); 2811 if (cmd == NULL) { 2812 cmd_null:; 2813 (void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't " 2814 "allocate space for fct_cmd", (void *)qlt); 2815 info[159] = 0; 2816 (void) fct_port_shutdown(qlt->qlt_port, 2817 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); 2818 return; 2819 } 2820 2821 cmd->cmd_port = qlt->qlt_port; 2822 cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa); 2823 if (cmd->cmd_rp_handle == 0xFFFF) { 2824 cmd->cmd_rp_handle = FCT_HANDLE_NONE; 2825 } 2826 2827 els = (fct_els_t *)cmd->cmd_specific; 2828 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 2829 els->els_req_size = payload_size; 2830 els->els_req_payload = GET_BYTE_OFFSET(qcmd, 2831 GET_STRUCT_SIZE(qlt_cmd_t)); 2832 qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10])); 2833 cmd->cmd_rportid = remote_portid; 2834 cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) | 2835 ((uint32_t)(resp[0x16])) << 16; 2836 cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26])); 2837 cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24])); 2838 pldptr = &resp[0x2C]; 2839 bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6)); 2840 for (i = 0, off = 0x2c; i < payload_size; i += 4) { 2841 /* Take care of fw's swapping of payload */ 2842 els->els_req_payload[i] = pldptr[3]; 2843 els->els_req_payload[i+1] = pldptr[2]; 2844 els->els_req_payload[i+2] = pldptr[1]; 2845 els->els_req_payload[i+3] = pldptr[0]; 2846 pldptr += 4; 2847 if (pldptr == bndrptr) 2848 pldptr = (uint8_t *)qlt->resp_ptr; 2849 off += 4; 2850 if (off >= IOCB_SIZE) { 2851 off = 4; 2852 pldptr += 4; 2853 } 2854 } 2855 fct_post_rcvd_cmd(cmd, 0); 2856 } 2857 2858 fct_status_t 2859 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags) 2860 { 2861 qlt_state_t *qlt; 2862 char info[160]; 2863 2864 qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private; 2865 2866 if (cmd->cmd_type == FCT_CMD_FCP_XCHG) { 2867 if (ioflags & FCT_IOF_FORCE_FCA_DONE) { 2868 goto fatal_panic; 2869 } else { 2870 return (qlt_send_status(qlt, cmd)); 2871 } 2872 } 2873 2874 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) { 2875 if (ioflags & FCT_IOF_FORCE_FCA_DONE) { 2876 goto fatal_panic; 2877 } else { 2878 return (qlt_send_els_response(qlt, cmd)); 2879 } 2880 } 2881 2882 if (ioflags & FCT_IOF_FORCE_FCA_DONE) { 2883 cmd->cmd_handle = 0; 2884 } 2885 2886 if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) { 2887 return (qlt_send_abts_response(qlt, cmd, 0)); 2888 } else { 2889 ASSERT(0); 2890 return (FCT_FAILURE); 2891 } 2892 2893 fatal_panic:; 2894 (void) snprintf(info, 160, "qlt_send_cmd_response: can not handle " 2895 "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd, 2896 ioflags); 2897 info[159] = 0; 2898 (void) fct_port_shutdown(qlt->qlt_port, 2899 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); 2900 return (FCT_FAILURE); 2901 } 2902 2903 /* ARGSUSED */ 2904 fct_status_t 2905 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags) 2906 { 2907 qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private; 2908 qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private; 2909 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 2910 uint8_t *req; 2911 uint16_t flags; 2912 2913 if (dbuf->db_handle == 0) 2914 qcmd->dbuf = dbuf; 2915 flags = ((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5; 2916 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) { 2917 flags |= 2; 2918 qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV); 2919 } else { 2920 flags |= 1; 2921 } 2922 2923 if (dbuf->db_flags & DB_SEND_STATUS_GOOD) 2924 flags |= BIT_15; 2925 2926 mutex_enter(&qlt->req_lock); 2927 req = (uint8_t *)qlt_get_req_entries(qlt, 1); 2928 if (req == NULL) { 2929 mutex_exit(&qlt->req_lock); 2930 return (FCT_BUSY); 2931 } 2932 bzero(req, IOCB_SIZE); 2933 req[0] = 0x12; req[1] = 0x1; 2934 req[2] = dbuf->db_handle; 2935 QMEM_WR32(qlt, req+4, cmd->cmd_handle); 2936 QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle); 2937 QMEM_WR16(qlt, req+10, 60); /* 60 seconds timeout */ 2938 req[12] = 1; 2939 QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid); 2940 QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr); 2941 QMEM_WR16(qlt, req+0x1A, flags); 2942 QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid); 2943 QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset); 2944 QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size); 2945 QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr); 2946 QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size); 2947 qlt_submit_req_entries(qlt, 1); 2948 mutex_exit(&qlt->req_lock); 2949 2950 return (STMF_SUCCESS); 2951 } 2952 2953 /* 2954 * We must construct proper FCP_RSP_IU now. Here we only focus on 2955 * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO), 2956 * we could have catched them before we enter here. 2957 */ 2958 fct_status_t 2959 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd) 2960 { 2961 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 2962 scsi_task_t *task = (scsi_task_t *)cmd->cmd_specific; 2963 qlt_dmem_bctl_t *bctl; 2964 uint32_t size; 2965 uint8_t *req, *fcp_rsp_iu; 2966 uint8_t *psd, sensbuf[24]; /* sense data */ 2967 uint16_t flags; 2968 uint16_t scsi_status; 2969 int use_mode2; 2970 int ndx; 2971 2972 /* 2973 * Enter fast channel for non check condition 2974 */ 2975 if (task->task_scsi_status != STATUS_CHECK) { 2976 /* 2977 * We will use mode1 2978 */ 2979 flags = BIT_6 | BIT_15 | 2980 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5); 2981 scsi_status = (uint16_t)task->task_scsi_status; 2982 if (task->task_status_ctrl == TASK_SCTRL_OVER) { 2983 scsi_status |= BIT_10; 2984 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) { 2985 scsi_status |= BIT_11; 2986 } 2987 qcmd->dbuf_rsp_iu = NULL; 2988 2989 /* 2990 * Fillout CTIO type 7 IOCB 2991 */ 2992 mutex_enter(&qlt->req_lock); 2993 req = (uint8_t *)qlt_get_req_entries(qlt, 1); 2994 if (req == NULL) { 2995 mutex_exit(&qlt->req_lock); 2996 return (FCT_BUSY); 2997 } 2998 2999 /* 3000 * Common fields 3001 */ 3002 bzero(req, IOCB_SIZE); 3003 req[0x00] = 0x12; 3004 req[0x01] = 0x1; 3005 req[0x02] = BIT_7; /* indicate if it's a pure status req */ 3006 QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle); 3007 QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle); 3008 QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid); 3009 QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr); 3010 3011 /* 3012 * Mode-specific fields 3013 */ 3014 QMEM_WR16(qlt, req + 0x1A, flags); 3015 QMEM_WR32(qlt, req + 0x1C, task->task_resid); 3016 QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid); 3017 QMEM_WR16(qlt, req + 0x22, scsi_status); 3018 3019 /* 3020 * Trigger FW to send SCSI status out 3021 */ 3022 qlt_submit_req_entries(qlt, 1); 3023 mutex_exit(&qlt->req_lock); 3024 return (STMF_SUCCESS); 3025 } 3026 3027 ASSERT(task->task_scsi_status == STATUS_CHECK); 3028 /* 3029 * Decide the SCSI status mode, that should be used 3030 */ 3031 use_mode2 = (task->task_sense_length > 24); 3032 3033 /* 3034 * Prepare required information per the SCSI status mode 3035 */ 3036 flags = BIT_15 | (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5); 3037 if (use_mode2) { 3038 flags |= BIT_7; 3039 3040 size = task->task_sense_length; 3041 qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt, 3042 task->task_sense_length, &size, 0); 3043 if (!qcmd->dbuf_rsp_iu) { 3044 return (FCT_ALLOC_FAILURE); 3045 } 3046 3047 /* 3048 * Start to construct FCP_RSP IU 3049 */ 3050 fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr; 3051 bzero(fcp_rsp_iu, 24); 3052 3053 /* 3054 * FCP_RSP IU flags, byte10 3055 */ 3056 fcp_rsp_iu[10] |= BIT_1; 3057 if (task->task_status_ctrl == TASK_SCTRL_OVER) { 3058 fcp_rsp_iu[10] |= BIT_2; 3059 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) { 3060 fcp_rsp_iu[10] |= BIT_3; 3061 } 3062 3063 /* 3064 * SCSI status code, byte11 3065 */ 3066 fcp_rsp_iu[11] = task->task_scsi_status; 3067 3068 /* 3069 * FCP_RESID (Overrun or underrun) 3070 */ 3071 fcp_rsp_iu[12] = (task->task_resid >> 24) & 0xFF; 3072 fcp_rsp_iu[13] = (task->task_resid >> 16) & 0xFF; 3073 fcp_rsp_iu[14] = (task->task_resid >> 8) & 0xFF; 3074 fcp_rsp_iu[15] = (task->task_resid >> 0) & 0xFF; 3075 3076 /* 3077 * FCP_SNS_LEN 3078 */ 3079 fcp_rsp_iu[18] = (task->task_sense_length >> 8) & 0xFF; 3080 fcp_rsp_iu[19] = (task->task_sense_length >> 0) & 0xFF; 3081 3082 /* 3083 * FCP_RSP_LEN 3084 */ 3085 /* 3086 * no FCP_RSP_INFO 3087 */ 3088 /* 3089 * FCP_SNS_INFO 3090 */ 3091 bcopy(task->task_sense_data, fcp_rsp_iu + 24, 3092 task->task_sense_length); 3093 3094 /* 3095 * Ensure dma data consistency 3096 */ 3097 qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV); 3098 } else { 3099 flags |= BIT_6; 3100 3101 scsi_status = (uint16_t)task->task_scsi_status; 3102 if (task->task_status_ctrl == TASK_SCTRL_OVER) { 3103 scsi_status |= BIT_10; 3104 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) { 3105 scsi_status |= BIT_11; 3106 } 3107 if (task->task_sense_length) { 3108 scsi_status |= BIT_9; 3109 } 3110 bcopy(task->task_sense_data, sensbuf, task->task_sense_length); 3111 qcmd->dbuf_rsp_iu = NULL; 3112 } 3113 3114 /* 3115 * Fillout CTIO type 7 IOCB 3116 */ 3117 mutex_enter(&qlt->req_lock); 3118 req = (uint8_t *)qlt_get_req_entries(qlt, 1); 3119 if (req == NULL) { 3120 mutex_exit(&qlt->req_lock); 3121 if (use_mode2) { 3122 qlt_dmem_free(cmd->cmd_port->port_fds, 3123 qcmd->dbuf_rsp_iu); 3124 qcmd->dbuf_rsp_iu = NULL; 3125 } 3126 return (FCT_BUSY); 3127 } 3128 3129 /* 3130 * Common fields 3131 */ 3132 bzero(req, IOCB_SIZE); 3133 req[0x00] = 0x12; 3134 req[0x01] = 0x1; 3135 req[0x02] = BIT_7; /* to indicate if it's a pure status req */ 3136 QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle); 3137 QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle); 3138 QMEM_WR16(qlt, req + 0x0A, 0); /* not timed by FW */ 3139 if (use_mode2) { 3140 QMEM_WR16(qlt, req+0x0C, 1); /* FCP RSP IU data field */ 3141 } 3142 QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid); 3143 QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr); 3144 3145 /* 3146 * Mode-specific fields 3147 */ 3148 if (!use_mode2) { 3149 QMEM_WR16(qlt, req + 0x18, task->task_sense_length); 3150 } 3151 QMEM_WR16(qlt, req + 0x1A, flags); 3152 QMEM_WR32(qlt, req + 0x1C, task->task_resid); 3153 QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid); 3154 if (use_mode2) { 3155 bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private; 3156 QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length); 3157 QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr); 3158 QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length); 3159 } else { 3160 QMEM_WR16(qlt, req + 0x22, scsi_status); 3161 psd = req+0x28; 3162 3163 /* 3164 * Data in sense buf is always big-endian, data in IOCB 3165 * should always be little-endian, so we must do swapping. 3166 */ 3167 size = ((task->task_sense_length + 3) & (~3)); 3168 for (ndx = 0; ndx < size; ndx += 4) { 3169 psd[ndx + 0] = sensbuf[ndx + 3]; 3170 psd[ndx + 1] = sensbuf[ndx + 2]; 3171 psd[ndx + 2] = sensbuf[ndx + 1]; 3172 psd[ndx + 3] = sensbuf[ndx + 0]; 3173 } 3174 } 3175 3176 /* 3177 * Trigger FW to send SCSI status out 3178 */ 3179 qlt_submit_req_entries(qlt, 1); 3180 mutex_exit(&qlt->req_lock); 3181 3182 return (STMF_SUCCESS); 3183 } 3184 3185 fct_status_t 3186 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd) 3187 { 3188 qlt_cmd_t *qcmd; 3189 fct_els_t *els = (fct_els_t *)cmd->cmd_specific; 3190 uint8_t *req, *addr; 3191 qlt_dmem_bctl_t *bctl; 3192 uint32_t minsize; 3193 uint8_t elsop, req1f; 3194 3195 addr = els->els_resp_payload; 3196 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 3197 3198 minsize = els->els_resp_size; 3199 qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0); 3200 if (qcmd->dbuf == NULL) 3201 return (FCT_BUSY); 3202 3203 bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private; 3204 3205 bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size); 3206 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV); 3207 3208 if (addr[0] == 0x02) { /* ACC */ 3209 req1f = BIT_5; 3210 } else { 3211 req1f = BIT_6; 3212 } 3213 elsop = els->els_req_payload[0]; 3214 if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) || 3215 (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) { 3216 req1f |= BIT_4; 3217 } 3218 3219 mutex_enter(&qlt->req_lock); 3220 req = (uint8_t *)qlt_get_req_entries(qlt, 1); 3221 if (req == NULL) { 3222 mutex_exit(&qlt->req_lock); 3223 qlt_dmem_free(NULL, qcmd->dbuf); 3224 qcmd->dbuf = NULL; 3225 return (FCT_BUSY); 3226 } 3227 bzero(req, IOCB_SIZE); 3228 req[0] = 0x53; req[1] = 1; req[0xf] = 0x10; 3229 req[0x16] = elsop; req[0x1f] = req1f; 3230 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle); 3231 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle); 3232 QMEM_WR16(qlt, (&req[0xC]), 1); 3233 QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr); 3234 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid); 3235 if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) { 3236 req[0x1b] = (cmd->cmd_lportid >> 16) & 0xff; 3237 req[0x1c] = cmd->cmd_lportid & 0xff; 3238 req[0x1d] = (cmd->cmd_lportid >> 8) & 0xff; 3239 } 3240 QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size); 3241 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); 3242 QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size); 3243 qlt_submit_req_entries(qlt, 1); 3244 mutex_exit(&qlt->req_lock); 3245 3246 return (FCT_SUCCESS); 3247 } 3248 3249 fct_status_t 3250 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate) 3251 { 3252 qlt_abts_cmd_t *qcmd; 3253 fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific; 3254 uint8_t *req; 3255 uint32_t lportid; 3256 uint32_t fctl; 3257 int i; 3258 3259 qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private; 3260 3261 mutex_enter(&qlt->req_lock); 3262 req = (uint8_t *)qlt_get_req_entries(qlt, 1); 3263 if (req == NULL) { 3264 mutex_exit(&qlt->req_lock); 3265 return (FCT_BUSY); 3266 } 3267 bcopy(qcmd->buf, req, IOCB_SIZE); 3268 lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF; 3269 fctl = QMEM_RD32(qlt, req+0x1C); 3270 fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16); 3271 req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate; 3272 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle); 3273 if (cmd->cmd_rp) 3274 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle); 3275 else 3276 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle); 3277 if (terminate) { 3278 QMEM_WR16(qlt, (&req[0xC]), 1); 3279 } 3280 QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid); 3281 req[0x17] = abts->abts_resp_rctl; 3282 QMEM_WR32(qlt, req+0x18, lportid); 3283 QMEM_WR32(qlt, req+0x1C, fctl); 3284 req[0x23]++; 3285 for (i = 0; i < 12; i += 4) { 3286 /* Take care of firmware's LE requirement */ 3287 req[0x2C+i] = abts->abts_resp_payload[i+3]; 3288 req[0x2C+i+1] = abts->abts_resp_payload[i+2]; 3289 req[0x2C+i+2] = abts->abts_resp_payload[i+1]; 3290 req[0x2C+i+3] = abts->abts_resp_payload[i]; 3291 } 3292 qlt_submit_req_entries(qlt, 1); 3293 mutex_exit(&qlt->req_lock); 3294 3295 return (FCT_SUCCESS); 3296 } 3297 3298 static void 3299 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot) 3300 { 3301 int i; 3302 uint32_t d; 3303 caddr_t req; 3304 /* Just put it on the request queue */ 3305 mutex_enter(&qlt->req_lock); 3306 req = qlt_get_req_entries(qlt, 1); 3307 if (req == NULL) { 3308 mutex_exit(&qlt->req_lock); 3309 /* XXX handle this */ 3310 return; 3311 } 3312 for (i = 0; i < 16; i++) { 3313 d = QMEM_RD32(qlt, inot); 3314 inot += 4; 3315 QMEM_WR32(qlt, req, d); 3316 req += 4; 3317 } 3318 req -= 64; 3319 req[0] = 0x0e; 3320 qlt_submit_req_entries(qlt, 1); 3321 mutex_exit(&qlt->req_lock); 3322 } 3323 3324 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 }; 3325 static void 3326 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio) 3327 { 3328 fct_cmd_t *cmd; 3329 scsi_task_t *task; 3330 qlt_cmd_t *qcmd; 3331 uint32_t rportid, fw_xchg_addr; 3332 uint8_t *p, *q, *req, tm; 3333 uint16_t cdb_size, flags, oxid; 3334 char info[160]; 3335 3336 /* 3337 * If either bidirection xfer is requested of there is extended 3338 * CDB, atio[0x20 + 11] will be greater than or equal to 3. 3339 */ 3340 cdb_size = 16; 3341 if (atio[0x20 + 11] >= 3) { 3342 uint8_t b = atio[0x20 + 11]; 3343 uint16_t b1; 3344 if ((b & 3) == 3) { 3345 cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O " 3346 "received, dropping the cmd as bidirectional " 3347 " transfers are not yet supported", qlt->instance); 3348 /* XXX abort the I/O */ 3349 return; 3350 } 3351 cdb_size += b & 0xfc; 3352 /* 3353 * Verify that we have enough entries. Without additional CDB 3354 * Everything will fit nicely within the same 64 bytes. So the 3355 * additional cdb size is essentially the # of additional bytes 3356 * we need. 3357 */ 3358 b1 = (uint16_t)b; 3359 if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) { 3360 cmn_err(CE_WARN, "qlt(%d): cmd received with extended " 3361 " cdb (cdb size = %d bytes), however the firmware " 3362 " did not DMAed the entire FCP_CMD IU, entry count " 3363 " is %d while it should be %d", qlt->instance, 3364 cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1)); 3365 /* XXX abort the I/O */ 3366 return; 3367 } 3368 } 3369 3370 rportid = (((uint32_t)atio[8 + 5]) << 16) | 3371 (((uint32_t)atio[8 + 6]) << 8) | atio[8+7]; 3372 fw_xchg_addr = QMEM_RD32(qlt, atio+4); 3373 oxid = (((uint16_t)atio[8 + 16]) << 8) | atio[8+17]; 3374 3375 if (fw_xchg_addr == 0xFFFFFFFF) { 3376 cmd = NULL; 3377 } else { 3378 cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE, 3379 rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE); 3380 } 3381 if (cmd == NULL) { 3382 /* Abort this IO */ 3383 flags = BIT_14 | ((atio[3] & 0xF0) << 5); 3384 3385 mutex_enter(&qlt->req_lock); 3386 req = (uint8_t *)qlt_get_req_entries(qlt, 1); 3387 if (req == NULL) { 3388 mutex_exit(&qlt->req_lock); 3389 3390 (void) snprintf(info, 160, 3391 "qlt_handle_atio: qlt-%p, can't " 3392 "allocate space for scsi_task", (void *)qlt); 3393 info[159] = 0; 3394 (void) fct_port_shutdown(qlt->qlt_port, 3395 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); 3396 return; 3397 } 3398 bzero(req, IOCB_SIZE); 3399 req[0] = 0x12; req[1] = 0x1; 3400 QMEM_WR32(qlt, req+4, 0); 3401 QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port, 3402 rportid)); 3403 QMEM_WR16(qlt, req+10, 60); 3404 QMEM_WR32(qlt, req+0x10, rportid); 3405 QMEM_WR32(qlt, req+0x14, fw_xchg_addr); 3406 QMEM_WR16(qlt, req+0x1A, flags); 3407 QMEM_WR16(qlt, req+0x20, oxid); 3408 qlt_submit_req_entries(qlt, 1); 3409 mutex_exit(&qlt->req_lock); 3410 3411 return; 3412 } 3413 3414 task = (scsi_task_t *)cmd->cmd_specific; 3415 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 3416 qcmd->fw_xchg_addr = fw_xchg_addr; 3417 qcmd->param.atio_byte3 = atio[3]; 3418 cmd->cmd_oxid = oxid; 3419 cmd->cmd_rxid = (((uint16_t)atio[8 + 18]) << 8) | atio[8+19]; 3420 cmd->cmd_rportid = rportid; 3421 cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) | 3422 (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3]; 3423 cmd->cmd_rp_handle = FCT_HANDLE_NONE; 3424 /* Dont do a 64 byte read as this is IOMMU */ 3425 q = atio+0x28; 3426 /* XXX Handle fcp_cntl */ 3427 task->task_cmd_seq_no = (uint32_t)(*q++); 3428 task->task_csn_size = 8; 3429 task->task_flags = qlt_task_flags[(*q++) & 7]; 3430 tm = *q++; 3431 if (tm) { 3432 if (tm & BIT_1) 3433 task->task_mgmt_function = TM_ABORT_TASK_SET; 3434 else if (tm & BIT_2) 3435 task->task_mgmt_function = TM_CLEAR_TASK_SET; 3436 else if (tm & BIT_4) 3437 task->task_mgmt_function = TM_LUN_RESET; 3438 else if (tm & BIT_5) 3439 task->task_mgmt_function = TM_TARGET_COLD_RESET; 3440 else if (tm & BIT_6) 3441 task->task_mgmt_function = TM_CLEAR_ACA; 3442 else 3443 task->task_mgmt_function = TM_ABORT_TASK; 3444 } 3445 task->task_max_nbufs = STMF_BUFS_MAX; 3446 task->task_csn_size = 8; 3447 task->task_flags |= ((*q++) & 3) << 5; 3448 p = task->task_cdb; 3449 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++; 3450 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++; 3451 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++; 3452 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++; 3453 if (cdb_size > 16) { 3454 uint16_t xtra = cdb_size - 16; 3455 uint16_t i; 3456 uint8_t cb[4]; 3457 3458 while (xtra) { 3459 *p++ = *q++; 3460 xtra--; 3461 if (q == ((uint8_t *)qlt->queue_mem_ptr + 3462 ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) { 3463 q = (uint8_t *)qlt->queue_mem_ptr + 3464 ATIO_QUEUE_OFFSET; 3465 } 3466 } 3467 for (i = 0; i < 4; i++) { 3468 cb[i] = *q++; 3469 if (q == ((uint8_t *)qlt->queue_mem_ptr + 3470 ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) { 3471 q = (uint8_t *)qlt->queue_mem_ptr + 3472 ATIO_QUEUE_OFFSET; 3473 } 3474 } 3475 task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) | 3476 (((uint32_t)cb[1]) << 16) | 3477 (((uint32_t)cb[2]) << 8) | cb[3]; 3478 } else { 3479 task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) | 3480 (((uint32_t)q[1]) << 16) | 3481 (((uint32_t)q[2]) << 8) | q[3]; 3482 } 3483 fct_post_rcvd_cmd(cmd, 0); 3484 } 3485 3486 static void 3487 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp) 3488 { 3489 uint16_t status; 3490 uint32_t portid; 3491 uint32_t subcode1, subcode2; 3492 3493 status = QMEM_RD16(qlt, rsp+8); 3494 portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff; 3495 subcode1 = QMEM_RD32(qlt, rsp+0x14); 3496 subcode2 = QMEM_RD32(qlt, rsp+0x18); 3497 3498 mutex_enter(&qlt->preq_lock); 3499 if (portid != qlt->rp_id_in_dereg) { 3500 int instance = ddi_get_instance(qlt->dip); 3501 cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x" 3502 " received when driver wasn't waiting for it", 3503 instance, portid); 3504 mutex_exit(&qlt->preq_lock); 3505 return; 3506 } 3507 3508 if (status != 0) { 3509 QLT_LOG(qlt->qlt_port_alias, "implicit logout completed " 3510 "for 0x%x with status %x, subcode1 %x subcode2 %x", 3511 portid, status, subcode1, subcode2); 3512 if (status == 0x31 && subcode1 == 0x0a) 3513 qlt->rp_dereg_status = FCT_SUCCESS; 3514 else 3515 qlt->rp_dereg_status = 3516 QLT_FIRMWARE_ERROR(status, subcode1, subcode2); 3517 } else { 3518 qlt->rp_dereg_status = FCT_SUCCESS; 3519 } 3520 cv_signal(&qlt->rp_dereg_cv); 3521 mutex_exit(&qlt->preq_lock); 3522 } 3523 3524 /* 3525 * Note that when an ELS is aborted, the regular or aborted completion 3526 * (if any) gets posted before the abort IOCB comes back on response queue. 3527 */ 3528 static void 3529 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp) 3530 { 3531 char info[160]; 3532 fct_cmd_t *cmd; 3533 qlt_cmd_t *qcmd; 3534 uint32_t hndl; 3535 uint32_t subcode1, subcode2; 3536 uint16_t status; 3537 3538 hndl = QMEM_RD32(qlt, rsp+4); 3539 status = QMEM_RD16(qlt, rsp+8); 3540 subcode1 = QMEM_RD32(qlt, rsp+0x24); 3541 subcode2 = QMEM_RD32(qlt, rsp+0x28); 3542 3543 if (!CMD_HANDLE_VALID(hndl)) { 3544 /* 3545 * This cannot happen for unsol els completion. This can 3546 * only happen when abort for an unsol els completes. 3547 * This condition indicates a firmware bug. 3548 */ 3549 (void) snprintf(info, 160, "qlt_handle_unsol_els_completion: " 3550 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p", 3551 hndl, status, subcode1, subcode2, (void *)rsp); 3552 info[159] = 0; 3553 (void) fct_port_shutdown(qlt->qlt_port, 3554 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET | 3555 STMF_RFLAG_COLLECT_DEBUG_DUMP, info); 3556 return; 3557 } 3558 3559 if (status == 5) { 3560 /* 3561 * When an unsolicited els is aborted, the abort is done 3562 * by a ELSPT iocb with abort control. This is the aborted IOCB 3563 * and not the abortee. We will do the cleanup when the 3564 * IOCB which caused the abort, returns. 3565 */ 3566 stmf_trace(0, "--UNSOL ELS returned with status 5 --"); 3567 return; 3568 } 3569 3570 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl); 3571 if (cmd == NULL) { 3572 /* 3573 * Now why would this happen ??? 3574 */ 3575 (void) snprintf(info, 160, 3576 "qlt_handle_unsol_els_completion: can not " 3577 "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status, 3578 (void *)rsp); 3579 info[159] = 0; 3580 (void) fct_port_shutdown(qlt->qlt_port, 3581 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); 3582 3583 return; 3584 } 3585 3586 ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS); 3587 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 3588 if (qcmd->flags & QLT_CMD_ABORTING) { 3589 /* 3590 * This is the same case as "if (status == 5)" above. The 3591 * only difference is that in this case the firmware actually 3592 * finished sending the response. So the abort attempt will 3593 * come back with status ?. We will handle it there. 3594 */ 3595 stmf_trace(0, "--UNSOL ELS finished while we are trying to " 3596 "abort it"); 3597 return; 3598 } 3599 3600 if (qcmd->dbuf != NULL) { 3601 qlt_dmem_free(NULL, qcmd->dbuf); 3602 qcmd->dbuf = NULL; 3603 } 3604 3605 if (status == 0) { 3606 fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE); 3607 } else { 3608 fct_send_response_done(cmd, 3609 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0); 3610 } 3611 } 3612 3613 static void 3614 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp) 3615 { 3616 char info[160]; 3617 fct_cmd_t *cmd; 3618 qlt_cmd_t *qcmd; 3619 uint32_t hndl; 3620 uint32_t subcode1, subcode2; 3621 uint16_t status; 3622 3623 hndl = QMEM_RD32(qlt, rsp+4); 3624 status = QMEM_RD16(qlt, rsp+8); 3625 subcode1 = QMEM_RD32(qlt, rsp+0x24); 3626 subcode2 = QMEM_RD32(qlt, rsp+0x28); 3627 3628 if (!CMD_HANDLE_VALID(hndl)) { 3629 ASSERT(hndl == 0); 3630 /* 3631 * Someone has requested to abort it, but no one is waiting for 3632 * this completion. 3633 */ 3634 if ((status != 0) && (status != 8)) { 3635 /* 3636 * There could be exchange resource leakage, so 3637 * throw HBA fatal error event now 3638 */ 3639 (void) snprintf(info, 160, 3640 "qlt_handle_unsol_els_abort_completion: " 3641 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p", 3642 hndl, status, subcode1, subcode2, (void *)rsp); 3643 info[159] = 0; 3644 (void) fct_port_shutdown(qlt->qlt_port, 3645 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET | 3646 STMF_RFLAG_COLLECT_DEBUG_DUMP, info); 3647 return; 3648 } 3649 3650 return; 3651 } 3652 3653 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl); 3654 if (cmd == NULL) { 3655 /* 3656 * Why would this happen ?? 3657 */ 3658 (void) snprintf(info, 160, 3659 "qlt_handle_unsol_els_abort_completion: can not get " 3660 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status, 3661 (void *)rsp); 3662 info[159] = 0; 3663 (void) fct_port_shutdown(qlt->qlt_port, 3664 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); 3665 3666 return; 3667 } 3668 3669 ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS); 3670 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 3671 ASSERT(qcmd->flags & QLT_CMD_ABORTING); 3672 3673 if (qcmd->dbuf != NULL) { 3674 qlt_dmem_free(NULL, qcmd->dbuf); 3675 qcmd->dbuf = NULL; 3676 } 3677 3678 if (status == 0) { 3679 fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE); 3680 } else if (status == 8) { 3681 fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE); 3682 } else { 3683 fct_cmd_fca_aborted(cmd, 3684 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0); 3685 } 3686 } 3687 3688 static void 3689 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp) 3690 { 3691 char info[160]; 3692 fct_cmd_t *cmd; 3693 fct_els_t *els; 3694 qlt_cmd_t *qcmd; 3695 uint32_t hndl; 3696 uint32_t subcode1, subcode2; 3697 uint16_t status; 3698 3699 hndl = QMEM_RD32(qlt, rsp+4); 3700 status = QMEM_RD16(qlt, rsp+8); 3701 subcode1 = QMEM_RD32(qlt, rsp+0x24); 3702 subcode2 = QMEM_RD32(qlt, rsp+0x28); 3703 3704 if (!CMD_HANDLE_VALID(hndl)) { 3705 /* 3706 * This cannot happen for sol els completion. 3707 */ 3708 (void) snprintf(info, 160, "qlt_handle_sol_els_completion: " 3709 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p", 3710 hndl, status, subcode1, subcode2, (void *)rsp); 3711 info[159] = 0; 3712 (void) fct_port_shutdown(qlt->qlt_port, 3713 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET | 3714 STMF_RFLAG_COLLECT_DEBUG_DUMP, info); 3715 return; 3716 } 3717 3718 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl); 3719 if (cmd == NULL) { 3720 (void) snprintf(info, 160, 3721 "qlt_handle_sol_els_completion: can not " 3722 "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status, 3723 (void *)rsp); 3724 info[159] = 0; 3725 (void) fct_port_shutdown(qlt->qlt_port, 3726 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); 3727 3728 return; 3729 } 3730 3731 ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS); 3732 els = (fct_els_t *)cmd->cmd_specific; 3733 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 3734 qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10])); 3735 3736 if (qcmd->flags & QLT_CMD_ABORTING) { 3737 /* 3738 * We will handle it when the ABORT IO IOCB returns. 3739 */ 3740 return; 3741 } 3742 3743 if (qcmd->dbuf != NULL) { 3744 if (status == 0) { 3745 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL); 3746 bcopy(qcmd->dbuf->db_sglist[0].seg_addr + 3747 qcmd->param.resp_offset, 3748 els->els_resp_payload, els->els_resp_size); 3749 } 3750 qlt_dmem_free(NULL, qcmd->dbuf); 3751 qcmd->dbuf = NULL; 3752 } 3753 3754 if (status == 0) { 3755 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE); 3756 } else { 3757 fct_send_cmd_done(cmd, 3758 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0); 3759 } 3760 } 3761 3762 static void 3763 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp) 3764 { 3765 fct_cmd_t *cmd; 3766 fct_sol_ct_t *ct; 3767 qlt_cmd_t *qcmd; 3768 uint32_t hndl; 3769 uint16_t status; 3770 char info[160]; 3771 3772 hndl = QMEM_RD32(qlt, rsp+4); 3773 status = QMEM_RD16(qlt, rsp+8); 3774 3775 if (!CMD_HANDLE_VALID(hndl)) { 3776 /* 3777 * Solicited commands will always have a valid handle. 3778 */ 3779 (void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-" 3780 "%x, status-%x, rsp-%p", hndl, status, (void *)rsp); 3781 info[159] = 0; 3782 (void) fct_port_shutdown(qlt->qlt_port, 3783 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET | 3784 STMF_RFLAG_COLLECT_DEBUG_DUMP, info); 3785 return; 3786 } 3787 3788 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl); 3789 if (cmd == NULL) { 3790 (void) snprintf(info, 160, 3791 "qlt_handle_ct_completion: cannot find " 3792 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status, 3793 (void *)rsp); 3794 info[159] = 0; 3795 (void) fct_port_shutdown(qlt->qlt_port, 3796 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); 3797 3798 return; 3799 } 3800 3801 ct = (fct_sol_ct_t *)cmd->cmd_specific; 3802 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 3803 ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT); 3804 3805 if (qcmd->flags & QLT_CMD_ABORTING) { 3806 /* 3807 * We will handle it when ABORT IO IOCB returns; 3808 */ 3809 return; 3810 } 3811 3812 ASSERT(qcmd->dbuf); 3813 if (status == 0) { 3814 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL); 3815 bcopy(qcmd->dbuf->db_sglist[0].seg_addr + 3816 qcmd->param.resp_offset, 3817 ct->ct_resp_payload, ct->ct_resp_size); 3818 } 3819 qlt_dmem_free(NULL, qcmd->dbuf); 3820 qcmd->dbuf = NULL; 3821 3822 if (status == 0) { 3823 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE); 3824 } else { 3825 fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0); 3826 } 3827 } 3828 3829 static void 3830 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp) 3831 { 3832 fct_cmd_t *cmd; 3833 scsi_task_t *task; 3834 qlt_cmd_t *qcmd; 3835 stmf_data_buf_t *dbuf; 3836 fct_status_t fc_st; 3837 uint32_t iof = 0; 3838 uint32_t hndl; 3839 uint16_t status; 3840 uint16_t flags; 3841 uint8_t abort_req; 3842 uint8_t n; 3843 char info[160]; 3844 3845 /* XXX: Check validity of the IOCB by checking 4th byte. */ 3846 hndl = QMEM_RD32(qlt, rsp+4); 3847 status = QMEM_RD16(qlt, rsp+8); 3848 flags = QMEM_RD16(qlt, rsp+0x1a); 3849 n = rsp[2]; 3850 3851 if (!CMD_HANDLE_VALID(hndl)) { 3852 ASSERT(hndl == 0); 3853 /* 3854 * Someone has requested to abort it, but no one is waiting for 3855 * this completion. 3856 */ 3857 QLT_LOG(qlt->qlt_port_alias, "qlt_handle_ctio_completion: " 3858 "hndl-%x, status-%x, rsp-%p", hndl, status, (void *)rsp); 3859 if ((status != 1) && (status != 2)) { 3860 /* 3861 * There could be exchange resource leakage, so 3862 * throw HBA fatal error event now 3863 */ 3864 (void) snprintf(info, 160, 3865 "qlt_handle_ctio_completion: hndl-" 3866 "%x, status-%x, rsp-%p", hndl, status, (void *)rsp); 3867 info[159] = 0; 3868 (void) fct_port_shutdown(qlt->qlt_port, 3869 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); 3870 3871 } 3872 3873 return; 3874 } 3875 3876 if (flags & BIT_14) { 3877 abort_req = 1; 3878 QLT_EXT_LOG(qlt->qlt_port_alias, "qlt_handle_ctio_completion: " 3879 "abort: hndl-%x, status-%x, rsp-%p", hndl, status, 3880 (void *)rsp); 3881 } else { 3882 abort_req = 0; 3883 } 3884 3885 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl); 3886 if (cmd == NULL) { 3887 (void) snprintf(info, 160, 3888 "qlt_handle_ctio_completion: cannot find " 3889 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status, 3890 (void *)rsp); 3891 info[159] = 0; 3892 (void) fct_port_shutdown(qlt->qlt_port, 3893 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); 3894 3895 return; 3896 } 3897 3898 task = (scsi_task_t *)cmd->cmd_specific; 3899 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 3900 if (qcmd->dbuf_rsp_iu) { 3901 ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7); 3902 qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu); 3903 qcmd->dbuf_rsp_iu = NULL; 3904 } 3905 3906 if ((status == 1) || (status == 2)) { 3907 if (abort_req) { 3908 fc_st = FCT_ABORT_SUCCESS; 3909 iof = FCT_IOF_FCA_DONE; 3910 } else { 3911 fc_st = FCT_SUCCESS; 3912 if (flags & BIT_15) { 3913 iof = FCT_IOF_FCA_DONE; 3914 } 3915 } 3916 } else { 3917 if ((status == 8) && abort_req) { 3918 fc_st = FCT_NOT_FOUND; 3919 iof = FCT_IOF_FCA_DONE; 3920 } else { 3921 fc_st = QLT_FIRMWARE_ERROR(status, 0, 0); 3922 } 3923 } 3924 dbuf = NULL; 3925 if (((n & BIT_7) == 0) && (!abort_req)) { 3926 /* A completion of data xfer */ 3927 if (n == 0) { 3928 dbuf = qcmd->dbuf; 3929 } else { 3930 dbuf = stmf_handle_to_buf(task, n); 3931 } 3932 3933 ASSERT(dbuf != NULL); 3934 if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT) 3935 qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU); 3936 if (flags & BIT_15) { 3937 dbuf->db_flags |= DB_STATUS_GOOD_SENT; 3938 } 3939 3940 dbuf->db_xfer_status = fc_st; 3941 fct_scsi_data_xfer_done(cmd, dbuf, iof); 3942 return; 3943 } 3944 if (!abort_req) { 3945 /* 3946 * This was just a pure status xfer. 3947 */ 3948 fct_send_response_done(cmd, fc_st, iof); 3949 return; 3950 } 3951 3952 fct_cmd_fca_aborted(cmd, fc_st, iof); 3953 } 3954 3955 static void 3956 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp) 3957 { 3958 char info[80]; 3959 fct_cmd_t *cmd; 3960 qlt_cmd_t *qcmd; 3961 uint32_t h; 3962 uint16_t status; 3963 3964 h = QMEM_RD32(qlt, rsp+4); 3965 status = QMEM_RD16(qlt, rsp+8); 3966 3967 if (!CMD_HANDLE_VALID(h)) { 3968 /* 3969 * Solicited commands always have a valid handle. 3970 */ 3971 (void) snprintf(info, 80, 3972 "qlt_handle_sol_abort_completion: hndl-" 3973 "%x, status-%x, rsp-%p", h, status, (void *)rsp); 3974 info[79] = 0; 3975 (void) fct_port_shutdown(qlt->qlt_port, 3976 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET | 3977 STMF_RFLAG_COLLECT_DEBUG_DUMP, info); 3978 return; 3979 } 3980 cmd = fct_handle_to_cmd(qlt->qlt_port, h); 3981 if (cmd == NULL) { 3982 /* 3983 * What happened to the cmd ?? 3984 */ 3985 (void) snprintf(info, 80, 3986 "qlt_handle_sol_abort_completion: cannot " 3987 "find cmd, hndl-%x, status-%x, rsp-%p", h, status, 3988 (void *)rsp); 3989 info[79] = 0; 3990 (void) fct_port_shutdown(qlt->qlt_port, 3991 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); 3992 3993 return; 3994 } 3995 3996 ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) || 3997 (cmd->cmd_type == FCT_CMD_SOL_CT)); 3998 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 3999 if (qcmd->dbuf != NULL) { 4000 qlt_dmem_free(NULL, qcmd->dbuf); 4001 qcmd->dbuf = NULL; 4002 } 4003 ASSERT(qcmd->flags & QLT_CMD_ABORTING); 4004 if (status == 0) { 4005 fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE); 4006 } else if (status == 0x31) { 4007 fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE); 4008 } else { 4009 fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0); 4010 } 4011 } 4012 4013 static void 4014 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp) 4015 { 4016 qlt_abts_cmd_t *qcmd; 4017 fct_cmd_t *cmd; 4018 uint32_t remote_portid; 4019 char info[160]; 4020 4021 remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) | 4022 ((uint32_t)(resp[0x1A])) << 16; 4023 cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS, 4024 sizeof (qlt_abts_cmd_t), 0); 4025 if (cmd == NULL) { 4026 (void) snprintf(info, 160, 4027 "qlt_handle_rcvd_abts: qlt-%p, can't " 4028 "allocate space for fct_cmd", (void *)qlt); 4029 info[159] = 0; 4030 (void) fct_port_shutdown(qlt->qlt_port, 4031 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); 4032 return; 4033 } 4034 4035 resp[0xC] = resp[0xD] = resp[0xE] = 0; 4036 qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private; 4037 bcopy(resp, qcmd->buf, IOCB_SIZE); 4038 cmd->cmd_port = qlt->qlt_port; 4039 cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA); 4040 if (cmd->cmd_rp_handle == 0xFFFF) 4041 cmd->cmd_rp_handle = FCT_HANDLE_NONE; 4042 4043 cmd->cmd_rportid = remote_portid; 4044 cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) | 4045 ((uint32_t)(resp[0x16])) << 16; 4046 cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26])); 4047 cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24])); 4048 fct_post_rcvd_cmd(cmd, 0); 4049 } 4050 4051 static void 4052 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp) 4053 { 4054 uint16_t status; 4055 char info[80]; 4056 4057 status = QMEM_RD16(qlt, resp+8); 4058 4059 if ((status == 0) || (status == 5)) { 4060 return; 4061 } 4062 (void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x", 4063 status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38), 4064 ((uint32_t)(qlt->resp_ndx_to_fw)) << 6); 4065 info[79] = 0; 4066 (void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR | 4067 STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info); 4068 } 4069 4070 #ifdef DEBUG 4071 uint32_t qlt_drop_abort_counter = 0; 4072 #endif 4073 4074 fct_status_t 4075 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags) 4076 { 4077 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private; 4078 4079 if ((qlt->qlt_state == FCT_STATE_OFFLINE) || 4080 (qlt->qlt_state == FCT_STATE_OFFLINING)) { 4081 return (FCT_NOT_FOUND); 4082 } 4083 4084 #ifdef DEBUG 4085 if (qlt_drop_abort_counter > 0) { 4086 if (atomic_add_32_nv(&qlt_drop_abort_counter, -1) == 1) 4087 return (FCT_SUCCESS); 4088 } 4089 #endif 4090 4091 if (cmd->cmd_type == FCT_CMD_FCP_XCHG) { 4092 return (qlt_abort_unsol_scsi_cmd(qlt, cmd)); 4093 } 4094 4095 if (flags & FCT_IOF_FORCE_FCA_DONE) { 4096 cmd->cmd_handle = 0; 4097 } 4098 4099 if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) { 4100 return (qlt_send_abts_response(qlt, cmd, 1)); 4101 } 4102 4103 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) { 4104 return (qlt_abort_purex(qlt, cmd)); 4105 } 4106 4107 if ((cmd->cmd_type == FCT_CMD_SOL_ELS) || 4108 (cmd->cmd_type == FCT_CMD_SOL_CT)) { 4109 return (qlt_abort_sol_cmd(qlt, cmd)); 4110 } 4111 4112 ASSERT(0); 4113 return (FCT_FAILURE); 4114 } 4115 4116 fct_status_t 4117 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd) 4118 { 4119 uint8_t *req; 4120 qlt_cmd_t *qcmd; 4121 4122 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 4123 qcmd->flags |= QLT_CMD_ABORTING; 4124 QLT_LOG(qlt->qlt_port_alias, "qlt_abort_sol_cmd: fctcmd-%p, " 4125 "cmd_handle-%x", cmd, cmd->cmd_handle); 4126 4127 mutex_enter(&qlt->req_lock); 4128 req = (uint8_t *)qlt_get_req_entries(qlt, 1); 4129 if (req == NULL) { 4130 mutex_exit(&qlt->req_lock); 4131 4132 return (FCT_BUSY); 4133 } 4134 bzero(req, IOCB_SIZE); 4135 req[0] = 0x33; req[1] = 1; 4136 QMEM_WR32(qlt, req+4, cmd->cmd_handle); 4137 if (cmd->cmd_rp) { 4138 QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle); 4139 } else { 4140 QMEM_WR16(qlt, req+8, 0xFFFF); 4141 } 4142 4143 QMEM_WR32(qlt, req+0xc, cmd->cmd_handle); 4144 QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid); 4145 qlt_submit_req_entries(qlt, 1); 4146 mutex_exit(&qlt->req_lock); 4147 4148 return (FCT_SUCCESS); 4149 } 4150 4151 fct_status_t 4152 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd) 4153 { 4154 uint8_t *req; 4155 qlt_cmd_t *qcmd; 4156 fct_els_t *els; 4157 uint8_t elsop, req1f; 4158 4159 els = (fct_els_t *)cmd->cmd_specific; 4160 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 4161 elsop = els->els_req_payload[0]; 4162 QLT_LOG(qlt->qlt_port_alias, 4163 "qlt_abort_purex: fctcmd-%p, cmd_handle-%x, " 4164 "elsop-%x", cmd, cmd->cmd_handle, elsop); 4165 req1f = 0x60; /* Terminate xchg */ 4166 if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) || 4167 (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) { 4168 req1f |= BIT_4; 4169 } 4170 4171 mutex_enter(&qlt->req_lock); 4172 req = (uint8_t *)qlt_get_req_entries(qlt, 1); 4173 if (req == NULL) { 4174 mutex_exit(&qlt->req_lock); 4175 4176 return (FCT_BUSY); 4177 } 4178 4179 qcmd->flags |= QLT_CMD_ABORTING; 4180 bzero(req, IOCB_SIZE); 4181 req[0] = 0x53; req[1] = 1; req[0xf] = 0x10; 4182 req[0x16] = elsop; req[0x1f] = req1f; 4183 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle); 4184 if (cmd->cmd_rp) { 4185 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle); 4186 } else { 4187 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle); 4188 } 4189 4190 QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr); 4191 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid); 4192 qlt_submit_req_entries(qlt, 1); 4193 mutex_exit(&qlt->req_lock); 4194 4195 return (FCT_SUCCESS); 4196 } 4197 4198 fct_status_t 4199 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd) 4200 { 4201 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 4202 uint8_t *req; 4203 uint16_t flags; 4204 4205 flags = BIT_14 | (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5); 4206 QLT_EXT_LOG(qlt->qlt_port_alias, "qlt_abort_unsol_scsi_cmd: fctcmd-%p, " 4207 "cmd_handle-%x", cmd, cmd->cmd_handle); 4208 4209 mutex_enter(&qlt->req_lock); 4210 req = (uint8_t *)qlt_get_req_entries(qlt, 1); 4211 if (req == NULL) { 4212 mutex_exit(&qlt->req_lock); 4213 4214 return (FCT_BUSY); 4215 } 4216 4217 qcmd->flags |= QLT_CMD_ABORTING; 4218 bzero(req, IOCB_SIZE); 4219 req[0] = 0x12; req[1] = 0x1; 4220 QMEM_WR32(qlt, req+4, cmd->cmd_handle); 4221 QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle); 4222 QMEM_WR16(qlt, req+10, 60); /* 60 seconds timeout */ 4223 QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid); 4224 QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr); 4225 QMEM_WR16(qlt, req+0x1A, flags); 4226 QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid); 4227 qlt_submit_req_entries(qlt, 1); 4228 mutex_exit(&qlt->req_lock); 4229 4230 return (FCT_SUCCESS); 4231 } 4232 4233 fct_status_t 4234 qlt_send_cmd(fct_cmd_t *cmd) 4235 { 4236 qlt_state_t *qlt; 4237 4238 qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private; 4239 if (cmd->cmd_type == FCT_CMD_SOL_ELS) { 4240 return (qlt_send_els(qlt, cmd)); 4241 } else if (cmd->cmd_type == FCT_CMD_SOL_CT) { 4242 return (qlt_send_ct(qlt, cmd)); 4243 } 4244 4245 ASSERT(0); 4246 return (FCT_FAILURE); 4247 } 4248 4249 fct_status_t 4250 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd) 4251 { 4252 uint8_t *req; 4253 fct_els_t *els; 4254 qlt_cmd_t *qcmd; 4255 stmf_data_buf_t *buf; 4256 qlt_dmem_bctl_t *bctl; 4257 uint32_t sz, minsz; 4258 4259 els = (fct_els_t *)cmd->cmd_specific; 4260 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 4261 qcmd->flags = QLT_CMD_TYPE_SOLICITED; 4262 qcmd->param.resp_offset = (els->els_req_size + 7) & ~7; 4263 sz = minsz = qcmd->param.resp_offset + els->els_resp_size; 4264 buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0); 4265 if (buf == NULL) { 4266 return (FCT_BUSY); 4267 } 4268 bctl = (qlt_dmem_bctl_t *)buf->db_port_private; 4269 4270 qcmd->dbuf = buf; 4271 bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr, 4272 els->els_req_size); 4273 qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV); 4274 4275 mutex_enter(&qlt->req_lock); 4276 req = (uint8_t *)qlt_get_req_entries(qlt, 1); 4277 if (req == NULL) { 4278 qlt_dmem_free(NULL, buf); 4279 mutex_exit(&qlt->req_lock); 4280 return (FCT_BUSY); 4281 } 4282 bzero(req, IOCB_SIZE); 4283 req[0] = 0x53; req[1] = 1; 4284 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle); 4285 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle); 4286 QMEM_WR16(qlt, (&req[0xC]), 1); 4287 QMEM_WR16(qlt, (&req[0xE]), 0x1000); 4288 QMEM_WR16(qlt, (&req[0x14]), 1); 4289 req[0x16] = els->els_req_payload[0]; 4290 if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) { 4291 req[0x1b] = (cmd->cmd_lportid >> 16) & 0xff; 4292 req[0x1c] = cmd->cmd_lportid & 0xff; 4293 req[0x1d] = (cmd->cmd_lportid >> 8) & 0xff; 4294 } 4295 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id); 4296 QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size); 4297 QMEM_WR32(qlt, (&req[0x24]), els->els_req_size); 4298 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); 4299 QMEM_WR32(qlt, (&req[0x30]), els->els_req_size); 4300 QMEM_WR64(qlt, (&req[0x34]), bctl->bctl_dev_addr + 4301 qcmd->param.resp_offset); 4302 QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size); 4303 qlt_submit_req_entries(qlt, 1); 4304 mutex_exit(&qlt->req_lock); 4305 4306 return (FCT_SUCCESS); 4307 } 4308 4309 fct_status_t 4310 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd) 4311 { 4312 uint8_t *req; 4313 fct_sol_ct_t *ct; 4314 qlt_cmd_t *qcmd; 4315 stmf_data_buf_t *buf; 4316 qlt_dmem_bctl_t *bctl; 4317 uint32_t sz, minsz; 4318 4319 ct = (fct_sol_ct_t *)cmd->cmd_specific; 4320 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private; 4321 qcmd->flags = QLT_CMD_TYPE_SOLICITED; 4322 qcmd->param.resp_offset = (ct->ct_req_size + 7) & ~7; 4323 sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size; 4324 buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0); 4325 if (buf == NULL) { 4326 return (FCT_BUSY); 4327 } 4328 bctl = (qlt_dmem_bctl_t *)buf->db_port_private; 4329 4330 qcmd->dbuf = buf; 4331 bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr, 4332 ct->ct_req_size); 4333 qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV); 4334 4335 mutex_enter(&qlt->req_lock); 4336 req = (uint8_t *)qlt_get_req_entries(qlt, 1); 4337 if (req == NULL) { 4338 qlt_dmem_free(NULL, buf); 4339 mutex_exit(&qlt->req_lock); 4340 return (FCT_BUSY); 4341 } 4342 bzero(req, IOCB_SIZE); 4343 req[0] = 0x29; req[1] = 1; 4344 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle); 4345 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle); 4346 QMEM_WR16(qlt, (&req[0xC]), 1); 4347 QMEM_WR16(qlt, (&req[0x10]), 0x20); /* > (2 * RA_TOV) */ 4348 QMEM_WR16(qlt, (&req[0x14]), 1); 4349 4350 QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size); 4351 QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size); 4352 4353 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */ 4354 QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size); 4355 QMEM_WR64(qlt, (&req[0x34]), bctl->bctl_dev_addr + 4356 qcmd->param.resp_offset); /* RESPONSE DSD */ 4357 QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size); 4358 4359 qlt_submit_req_entries(qlt, 1); 4360 mutex_exit(&qlt->req_lock); 4361 4362 return (FCT_SUCCESS); 4363 } 4364 4365 4366 /* 4367 * All QLT_FIRMWARE_* will mainly be handled in this function 4368 * It can not be called in interrupt context 4369 * 4370 * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags 4371 * and qlt_ioctl_lock 4372 */ 4373 static fct_status_t 4374 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci) 4375 { 4376 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private; 4377 int i; 4378 int retries; 4379 int n, size_left; 4380 char c = ' '; 4381 uint32_t addr, endaddr, words_to_read; 4382 caddr_t buf; 4383 4384 mutex_enter(&qlt->qlt_ioctl_lock); 4385 /* 4386 * To make sure that there's no outstanding dumping task 4387 */ 4388 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) { 4389 mutex_exit(&qlt->qlt_ioctl_lock); 4390 QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: outstanding"); 4391 return (FCT_FAILURE); 4392 } 4393 4394 /* 4395 * To make sure not to overwrite existing dump 4396 */ 4397 if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) && 4398 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) && 4399 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) { 4400 /* 4401 * If we have alreay one dump, but it's not triggered by user 4402 * and the user hasn't fetched it, we shouldn't dump again. 4403 */ 4404 mutex_exit(&qlt->qlt_ioctl_lock); 4405 QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: There's one " 4406 "dump, please fetech it"); 4407 cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there " 4408 "is one already outstanding.", qlt->instance); 4409 return (FCT_FAILURE); 4410 } 4411 qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS; 4412 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) { 4413 qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER; 4414 } else { 4415 qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER; 4416 } 4417 mutex_exit(&qlt->qlt_ioctl_lock); 4418 4419 size_left = QLT_FWDUMP_BUFSIZE; 4420 if (!qlt->qlt_fwdump_buf) { 4421 ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)); 4422 /* 4423 * It's the only place that we allocate buf for dumping. After 4424 * it's allocated, we will use it until the port is detached. 4425 */ 4426 qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP); 4427 } 4428 4429 /* 4430 * Start to dump firmware 4431 */ 4432 buf = (caddr_t)qlt->qlt_fwdump_buf; 4433 4434 /* 4435 * Print the ISP firmware revision number and attributes information 4436 * Read the RISC to Host Status register 4437 */ 4438 n = snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d " 4439 "Attributes %04x\n\nR2H Status Register\n%08x", 4440 qlt->fw_major, qlt->fw_minor, 4441 qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, 0x44)); 4442 buf += n; size_left -= n; 4443 4444 /* 4445 * Before pausing the RISC, make sure no mailbox can execute 4446 */ 4447 mutex_enter(&qlt->mbox_lock); 4448 if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) { 4449 /* 4450 * Wait to grab the mailboxes 4451 */ 4452 for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) && 4453 (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) { 4454 (void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, 4455 ddi_get_lbolt() + drv_usectohz(1000000)); 4456 if (retries > 5) { 4457 mutex_exit(&qlt->mbox_lock); 4458 QLT_LOG(qlt->qlt_port_alias, 4459 "qlt_firmware_dump: " 4460 "can't drain out mailbox commands"); 4461 goto dump_fail; 4462 } 4463 } 4464 qlt->mbox_io_state = MBOX_STATE_UNKNOWN; 4465 cv_broadcast(&qlt->mbox_cv); 4466 } 4467 mutex_exit(&qlt->mbox_lock); 4468 4469 /* 4470 * Pause the RISC processor 4471 */ 4472 REG_WR32(qlt, REG_HCCR, 0x30000000); 4473 4474 /* 4475 * Wait for the RISC processor to pause 4476 */ 4477 for (i = 0; i < 200; i++) { 4478 if (REG_RD32(qlt, 0x44) & 0x100) { 4479 break; 4480 } 4481 drv_usecwait(1000); 4482 } 4483 if (i == 200) { 4484 QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: can't pause"); 4485 return (FCT_FAILURE); 4486 } 4487 4488 if (!qlt->qlt_25xx_chip) { 4489 goto over_25xx_specific_dump; 4490 } 4491 n = snprintf(buf, size_left, "\n\nHostRisc registers\n"); 4492 buf += n; size_left -= n; 4493 REG_WR32(qlt, 0x54, 0x7000); 4494 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4495 buf += n; size_left -= n; 4496 REG_WR32(qlt, 0x54, 0x7010); 4497 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4498 buf += n; size_left -= n; 4499 REG_WR32(qlt, 0x54, 0x7C00); 4500 4501 n = snprintf(buf, size_left, "\nPCIe registers\n"); 4502 buf += n; size_left -= n; 4503 REG_WR32(qlt, 0xC0, 0x1); 4504 n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left); 4505 buf += n; size_left -= n; 4506 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left); 4507 buf += n; size_left -= n; 4508 REG_WR32(qlt, 0xC0, 0x0); 4509 4510 over_25xx_specific_dump:; 4511 n = snprintf(buf, size_left, "\n\nHost Interface Registers\n"); 4512 buf += n; size_left -= n; 4513 /* 4514 * Capture data from 32 regsiters 4515 */ 4516 n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left); 4517 buf += n; size_left -= n; 4518 4519 /* 4520 * Disable interrupts 4521 */ 4522 REG_WR32(qlt, 0xc, 0); 4523 4524 /* 4525 * Shadow registers 4526 */ 4527 n = snprintf(buf, size_left, "\nShadow Registers\n"); 4528 buf += n; size_left -= n; 4529 4530 REG_WR32(qlt, 0x54, 0xF70); 4531 addr = 0xb0000000; 4532 for (i = 0; i < 0xb; i++) { 4533 if ((!qlt->qlt_25xx_chip) && (i >= 7)) { 4534 break; 4535 } 4536 if (i && ((i & 7) == 0)) { 4537 n = snprintf(buf, size_left, "\n"); 4538 buf += n; size_left -= n; 4539 } 4540 REG_WR32(qlt, 0xF0, addr); 4541 n = snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC)); 4542 buf += n; size_left -= n; 4543 addr += 0x100000; 4544 } 4545 4546 if (qlt->qlt_25xx_chip) { 4547 REG_WR32(qlt, 0x54, 0x10); 4548 n = snprintf(buf, size_left, "\n\nRISC IO Register\n%08x", 4549 REG_RD32(qlt, 0xC0)); 4550 buf += n; size_left -= n; 4551 } 4552 4553 /* 4554 * Mailbox registers 4555 */ 4556 n = snprintf(buf, size_left, "\n\nMailbox Registers\n"); 4557 buf += n; size_left -= n; 4558 for (i = 0; i < 32; i += 2) { 4559 if ((i + 2) & 15) { 4560 c = ' '; 4561 } else { 4562 c = '\n'; 4563 } 4564 n = snprintf(buf, size_left, "%04x %04x%c", 4565 REG_RD16(qlt, 0x80 + (i << 1)), 4566 REG_RD16(qlt, 0x80 + ((i+1) << 1)), c); 4567 buf += n; size_left -= n; 4568 } 4569 4570 /* 4571 * Transfer sequence registers 4572 */ 4573 n = snprintf(buf, size_left, "\nXSEQ GP Registers\n"); 4574 buf += n; size_left -= n; 4575 4576 REG_WR32(qlt, 0x54, 0xBF00); 4577 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4578 buf += n; size_left -= n; 4579 REG_WR32(qlt, 0x54, 0xBF10); 4580 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4581 buf += n; size_left -= n; 4582 REG_WR32(qlt, 0x54, 0xBF20); 4583 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4584 buf += n; size_left -= n; 4585 REG_WR32(qlt, 0x54, 0xBF30); 4586 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4587 buf += n; size_left -= n; 4588 REG_WR32(qlt, 0x54, 0xBF40); 4589 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4590 buf += n; size_left -= n; 4591 REG_WR32(qlt, 0x54, 0xBF50); 4592 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4593 buf += n; size_left -= n; 4594 REG_WR32(qlt, 0x54, 0xBF60); 4595 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4596 buf += n; size_left -= n; 4597 REG_WR32(qlt, 0x54, 0xBF70); 4598 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4599 buf += n; size_left -= n; 4600 n = snprintf(buf, size_left, "\nXSEQ-0 registers\n"); 4601 buf += n; size_left -= n; 4602 REG_WR32(qlt, 0x54, 0xBFE0); 4603 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4604 buf += n; size_left -= n; 4605 n = snprintf(buf, size_left, "\nXSEQ-1 registers\n"); 4606 buf += n; size_left -= n; 4607 REG_WR32(qlt, 0x54, 0xBFF0); 4608 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4609 buf += n; size_left -= n; 4610 4611 /* 4612 * Receive sequence registers 4613 */ 4614 n = snprintf(buf, size_left, "\nRSEQ GP Registers\n"); 4615 buf += n; size_left -= n; 4616 REG_WR32(qlt, 0x54, 0xFF00); 4617 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4618 buf += n; size_left -= n; 4619 REG_WR32(qlt, 0x54, 0xFF10); 4620 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4621 buf += n; size_left -= n; 4622 REG_WR32(qlt, 0x54, 0xFF20); 4623 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4624 buf += n; size_left -= n; 4625 REG_WR32(qlt, 0x54, 0xFF30); 4626 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4627 buf += n; size_left -= n; 4628 REG_WR32(qlt, 0x54, 0xFF40); 4629 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4630 buf += n; size_left -= n; 4631 REG_WR32(qlt, 0x54, 0xFF50); 4632 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4633 buf += n; size_left -= n; 4634 REG_WR32(qlt, 0x54, 0xFF60); 4635 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4636 buf += n; size_left -= n; 4637 REG_WR32(qlt, 0x54, 0xFF70); 4638 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4639 buf += n; size_left -= n; 4640 n = snprintf(buf, size_left, "\nRSEQ-0 registers\n"); 4641 buf += n; size_left -= n; 4642 REG_WR32(qlt, 0x54, 0xFFD0); 4643 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4644 buf += n; size_left -= n; 4645 n = snprintf(buf, size_left, "\nRSEQ-1 registers\n"); 4646 buf += n; size_left -= n; 4647 REG_WR32(qlt, 0x54, 0xFFE0); 4648 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4649 buf += n; size_left -= n; 4650 n = snprintf(buf, size_left, "\nRSEQ-2 registers\n"); 4651 buf += n; size_left -= n; 4652 REG_WR32(qlt, 0x54, 0xFFF0); 4653 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4654 buf += n; size_left -= n; 4655 4656 if (!qlt->qlt_25xx_chip) 4657 goto over_aseq_regs; 4658 4659 /* 4660 * Auxiliary sequencer registers 4661 */ 4662 n = snprintf(buf, size_left, "\nASEQ GP Registers\n"); 4663 buf += n; size_left -= n; 4664 REG_WR32(qlt, 0x54, 0xB000); 4665 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4666 buf += n; size_left -= n; 4667 REG_WR32(qlt, 0x54, 0xB010); 4668 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4669 buf += n; size_left -= n; 4670 REG_WR32(qlt, 0x54, 0xB020); 4671 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4672 buf += n; size_left -= n; 4673 REG_WR32(qlt, 0x54, 0xB030); 4674 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4675 buf += n; size_left -= n; 4676 REG_WR32(qlt, 0x54, 0xB040); 4677 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4678 buf += n; size_left -= n; 4679 REG_WR32(qlt, 0x54, 0xB050); 4680 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4681 buf += n; size_left -= n; 4682 REG_WR32(qlt, 0x54, 0xB060); 4683 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4684 buf += n; size_left -= n; 4685 REG_WR32(qlt, 0x54, 0xB070); 4686 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4687 buf += n; size_left -= n; 4688 n = snprintf(buf, size_left, "\nASEQ-0 registers\n"); 4689 buf += n; size_left -= n; 4690 REG_WR32(qlt, 0x54, 0xB0C0); 4691 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4692 buf += n; size_left -= n; 4693 REG_WR32(qlt, 0x54, 0xB0D0); 4694 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4695 buf += n; size_left -= n; 4696 n = snprintf(buf, size_left, "\nASEQ-1 registers\n"); 4697 buf += n; size_left -= n; 4698 REG_WR32(qlt, 0x54, 0xB0E0); 4699 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4700 buf += n; size_left -= n; 4701 n = snprintf(buf, size_left, "\nASEQ-2 registers\n"); 4702 buf += n; size_left -= n; 4703 REG_WR32(qlt, 0x54, 0xB0F0); 4704 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4705 buf += n; size_left -= n; 4706 4707 over_aseq_regs:; 4708 4709 /* 4710 * Command DMA registers 4711 */ 4712 n = snprintf(buf, size_left, "\nCommand DMA registers\n"); 4713 buf += n; size_left -= n; 4714 REG_WR32(qlt, 0x54, 0x7100); 4715 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4716 buf += n; size_left -= n; 4717 4718 /* 4719 * Queues 4720 */ 4721 n = snprintf(buf, size_left, 4722 "\nRequest0 Queue DMA Channel registers\n"); 4723 buf += n; size_left -= n; 4724 REG_WR32(qlt, 0x54, 0x7200); 4725 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left); 4726 buf += n; size_left -= n; 4727 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left); 4728 buf += n; size_left -= n; 4729 4730 n = snprintf(buf, size_left, 4731 "\n\nResponse0 Queue DMA Channel registers\n"); 4732 buf += n; size_left -= n; 4733 REG_WR32(qlt, 0x54, 0x7300); 4734 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left); 4735 buf += n; size_left -= n; 4736 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left); 4737 buf += n; size_left -= n; 4738 4739 n = snprintf(buf, size_left, 4740 "\n\nRequest1 Queue DMA Channel registers\n"); 4741 buf += n; size_left -= n; 4742 REG_WR32(qlt, 0x54, 0x7400); 4743 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left); 4744 buf += n; size_left -= n; 4745 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left); 4746 buf += n; size_left -= n; 4747 4748 /* 4749 * Transmit DMA registers 4750 */ 4751 n = snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n"); 4752 buf += n; size_left -= n; 4753 REG_WR32(qlt, 0x54, 0x7600); 4754 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4755 buf += n; size_left -= n; 4756 REG_WR32(qlt, 0x54, 0x7610); 4757 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4758 buf += n; size_left -= n; 4759 n = snprintf(buf, size_left, "\nXMT1 Data DMA registers\n"); 4760 buf += n; size_left -= n; 4761 REG_WR32(qlt, 0x54, 0x7620); 4762 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4763 buf += n; size_left -= n; 4764 REG_WR32(qlt, 0x54, 0x7630); 4765 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4766 buf += n; size_left -= n; 4767 n = snprintf(buf, size_left, "\nXMT2 Data DMA registers\n"); 4768 buf += n; size_left -= n; 4769 REG_WR32(qlt, 0x54, 0x7640); 4770 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4771 buf += n; size_left -= n; 4772 REG_WR32(qlt, 0x54, 0x7650); 4773 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4774 buf += n; size_left -= n; 4775 n = snprintf(buf, size_left, "\nXMT3 Data DMA registers\n"); 4776 buf += n; size_left -= n; 4777 REG_WR32(qlt, 0x54, 0x7660); 4778 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4779 buf += n; size_left -= n; 4780 REG_WR32(qlt, 0x54, 0x7670); 4781 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4782 buf += n; size_left -= n; 4783 n = snprintf(buf, size_left, "\nXMT4 Data DMA registers\n"); 4784 buf += n; size_left -= n; 4785 REG_WR32(qlt, 0x54, 0x7680); 4786 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4787 buf += n; size_left -= n; 4788 REG_WR32(qlt, 0x54, 0x7690); 4789 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4790 buf += n; size_left -= n; 4791 n = snprintf(buf, size_left, "\nXMT Data DMA Common registers\n"); 4792 buf += n; size_left -= n; 4793 REG_WR32(qlt, 0x54, 0x76A0); 4794 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4795 buf += n; size_left -= n; 4796 4797 /* 4798 * Receive DMA registers 4799 */ 4800 n = snprintf(buf, size_left, "\nRCV Thread 0 Data DMA registers\n"); 4801 buf += n; size_left -= n; 4802 REG_WR32(qlt, 0x54, 0x7700); 4803 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4804 buf += n; size_left -= n; 4805 REG_WR32(qlt, 0x54, 0x7710); 4806 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4807 buf += n; size_left -= n; 4808 n = snprintf(buf, size_left, "\nRCV Thread 1 Data DMA registers\n"); 4809 buf += n; size_left -= n; 4810 REG_WR32(qlt, 0x54, 0x7720); 4811 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4812 buf += n; size_left -= n; 4813 REG_WR32(qlt, 0x54, 0x7730); 4814 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4815 buf += n; size_left -= n; 4816 4817 /* 4818 * RISC registers 4819 */ 4820 n = snprintf(buf, size_left, "\nRISC GP registers\n"); 4821 buf += n; size_left -= n; 4822 REG_WR32(qlt, 0x54, 0x0F00); 4823 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4824 buf += n; size_left -= n; 4825 REG_WR32(qlt, 0x54, 0x0F10); 4826 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4827 buf += n; size_left -= n; 4828 REG_WR32(qlt, 0x54, 0x0F20); 4829 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4830 buf += n; size_left -= n; 4831 REG_WR32(qlt, 0x54, 0x0F30); 4832 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4833 buf += n; size_left -= n; 4834 REG_WR32(qlt, 0x54, 0x0F40); 4835 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4836 buf += n; size_left -= n; 4837 REG_WR32(qlt, 0x54, 0x0F50); 4838 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4839 buf += n; size_left -= n; 4840 REG_WR32(qlt, 0x54, 0x0F60); 4841 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4842 buf += n; size_left -= n; 4843 REG_WR32(qlt, 0x54, 0x0F70); 4844 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4845 buf += n; size_left -= n; 4846 4847 /* 4848 * Local memory controller registers 4849 */ 4850 n = snprintf(buf, size_left, "\nLMC registers\n"); 4851 buf += n; size_left -= n; 4852 REG_WR32(qlt, 0x54, 0x3000); 4853 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4854 buf += n; size_left -= n; 4855 REG_WR32(qlt, 0x54, 0x3010); 4856 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4857 buf += n; size_left -= n; 4858 REG_WR32(qlt, 0x54, 0x3020); 4859 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4860 buf += n; size_left -= n; 4861 REG_WR32(qlt, 0x54, 0x3030); 4862 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4863 buf += n; size_left -= n; 4864 REG_WR32(qlt, 0x54, 0x3040); 4865 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4866 buf += n; size_left -= n; 4867 REG_WR32(qlt, 0x54, 0x3050); 4868 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4869 buf += n; size_left -= n; 4870 REG_WR32(qlt, 0x54, 0x3060); 4871 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4872 buf += n; size_left -= n; 4873 4874 if (qlt->qlt_25xx_chip) { 4875 REG_WR32(qlt, 0x54, 0x3070); 4876 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4877 buf += n; size_left -= n; 4878 } 4879 4880 /* 4881 * Fibre protocol module regsiters 4882 */ 4883 n = snprintf(buf, size_left, "\nFPM hardware registers\n"); 4884 buf += n; size_left -= n; 4885 REG_WR32(qlt, 0x54, 0x4000); 4886 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4887 buf += n; size_left -= n; 4888 REG_WR32(qlt, 0x54, 0x4010); 4889 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4890 buf += n; size_left -= n; 4891 REG_WR32(qlt, 0x54, 0x4020); 4892 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4893 buf += n; size_left -= n; 4894 REG_WR32(qlt, 0x54, 0x4030); 4895 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4896 buf += n; size_left -= n; 4897 REG_WR32(qlt, 0x54, 0x4040); 4898 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4899 buf += n; size_left -= n; 4900 REG_WR32(qlt, 0x54, 0x4050); 4901 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4902 buf += n; size_left -= n; 4903 REG_WR32(qlt, 0x54, 0x4060); 4904 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4905 buf += n; size_left -= n; 4906 REG_WR32(qlt, 0x54, 0x4070); 4907 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4908 buf += n; size_left -= n; 4909 REG_WR32(qlt, 0x54, 0x4080); 4910 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4911 buf += n; size_left -= n; 4912 REG_WR32(qlt, 0x54, 0x4090); 4913 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4914 buf += n; size_left -= n; 4915 REG_WR32(qlt, 0x54, 0x40A0); 4916 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4917 buf += n; size_left -= n; 4918 REG_WR32(qlt, 0x54, 0x40B0); 4919 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4920 buf += n; size_left -= n; 4921 4922 /* 4923 * Fibre buffer registers 4924 */ 4925 n = snprintf(buf, size_left, "\nFB hardware registers\n"); 4926 buf += n; size_left -= n; 4927 REG_WR32(qlt, 0x54, 0x6000); 4928 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4929 buf += n; size_left -= n; 4930 REG_WR32(qlt, 0x54, 0x6010); 4931 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4932 buf += n; size_left -= n; 4933 REG_WR32(qlt, 0x54, 0x6020); 4934 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4935 buf += n; size_left -= n; 4936 REG_WR32(qlt, 0x54, 0x6030); 4937 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4938 buf += n; size_left -= n; 4939 REG_WR32(qlt, 0x54, 0x6040); 4940 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4941 buf += n; size_left -= n; 4942 REG_WR32(qlt, 0x54, 0x6100); 4943 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4944 buf += n; size_left -= n; 4945 REG_WR32(qlt, 0x54, 0x6130); 4946 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4947 buf += n; size_left -= n; 4948 REG_WR32(qlt, 0x54, 0x6150); 4949 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4950 buf += n; size_left -= n; 4951 REG_WR32(qlt, 0x54, 0x6170); 4952 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4953 buf += n; size_left -= n; 4954 REG_WR32(qlt, 0x54, 0x6190); 4955 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4956 buf += n; size_left -= n; 4957 REG_WR32(qlt, 0x54, 0x61B0); 4958 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4959 buf += n; size_left -= n; 4960 4961 if (qlt->qlt_25xx_chip) { 4962 REG_WR32(qlt, 0x54, 0x6F00); 4963 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left); 4964 buf += n; size_left -= n; 4965 } 4966 4967 qlt->intr_sneak_counter = 10; 4968 qlt_disable_intr(qlt); 4969 mutex_enter(&qlt->intr_lock); 4970 qlt->qlt_intr_enabled = 0; 4971 (void) qlt_reset_chip_and_download_fw(qlt, 1); 4972 drv_usecwait(20); 4973 qlt->intr_sneak_counter = 0; 4974 mutex_exit(&qlt->intr_lock); 4975 4976 /* 4977 * Memory 4978 */ 4979 n = snprintf(buf, size_left, "\nCode RAM\n"); 4980 buf += n; size_left -= n; 4981 4982 addr = 0x20000; 4983 endaddr = 0x22000; 4984 words_to_read = 0; 4985 while (addr < endaddr) { 4986 words_to_read = MBOX_DMA_MEM_SIZE >> 2; 4987 if ((words_to_read + addr) > endaddr) { 4988 words_to_read = endaddr - addr; 4989 } 4990 if (qlt_read_risc_ram(qlt, addr, words_to_read) != 4991 QLT_SUCCESS) { 4992 QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: Error " 4993 "reading risc ram - CODE RAM"); 4994 goto dump_fail; 4995 } 4996 4997 n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left); 4998 buf += n; size_left -= n; 4999 5000 if (size_left < 100000) { 5001 QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: run " 5002 "out of space - CODE RAM"); 5003 goto dump_ok; 5004 } 5005 addr += words_to_read; 5006 } 5007 5008 n = snprintf(buf, size_left, "\nExternal Memory\n"); 5009 buf += n; size_left -= n; 5010 5011 addr = 0x100000; 5012 endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo; 5013 endaddr++; 5014 if (endaddr & 7) { 5015 endaddr = (endaddr + 7) & 0xFFFFFFF8; 5016 } 5017 5018 words_to_read = 0; 5019 while (addr < endaddr) { 5020 words_to_read = MBOX_DMA_MEM_SIZE >> 2; 5021 if ((words_to_read + addr) > endaddr) { 5022 words_to_read = endaddr - addr; 5023 } 5024 if (qlt_read_risc_ram(qlt, addr, words_to_read) != 5025 QLT_SUCCESS) { 5026 QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: Error " 5027 "reading risc ram - EXT RAM"); 5028 goto dump_fail; 5029 } 5030 n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left); 5031 buf += n; size_left -= n; 5032 if (size_left < 100000) { 5033 QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: run " 5034 "out of space - EXT RAM"); 5035 goto dump_ok; 5036 } 5037 addr += words_to_read; 5038 } 5039 5040 /* 5041 * Label the end tag 5042 */ 5043 n = snprintf(buf, size_left, "[<==END] ISP Debug Dump\n"); 5044 buf += n; size_left -= n; 5045 5046 /* 5047 * Queue dumping 5048 */ 5049 n = snprintf(buf, size_left, "\nRequest Queue\n"); 5050 buf += n; size_left -= n; 5051 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET, 5052 REQUEST_QUEUE_ENTRIES, buf, size_left); 5053 buf += n; size_left -= n; 5054 5055 n = snprintf(buf, size_left, "\nPriority Queue\n"); 5056 buf += n; size_left -= n; 5057 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET, 5058 PRIORITY_QUEUE_ENTRIES, buf, size_left); 5059 buf += n; size_left -= n; 5060 5061 n = snprintf(buf, size_left, "\nResponse Queue\n"); 5062 buf += n; size_left -= n; 5063 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET, 5064 RESPONSE_QUEUE_ENTRIES, buf, size_left); 5065 buf += n; size_left -= n; 5066 5067 n = snprintf(buf, size_left, "\nATIO queue\n"); 5068 buf += n; size_left -= n; 5069 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET, 5070 ATIO_QUEUE_ENTRIES, buf, size_left); 5071 buf += n; size_left -= n; 5072 5073 /* 5074 * Lable dump reason 5075 */ 5076 n = snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n", 5077 qlt->qlt_port_alias, ssci->st_additional_info); 5078 buf += n; size_left -= n; 5079 5080 dump_ok: 5081 QLT_LOG(qlt->qlt_port_alias, "qlt_fireware_dump: left-%d", size_left); 5082 5083 mutex_enter(&qlt->qlt_ioctl_lock); 5084 qlt->qlt_ioctl_flags &= 5085 ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER); 5086 qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID; 5087 mutex_exit(&qlt->qlt_ioctl_lock); 5088 return (FCT_SUCCESS); 5089 5090 dump_fail: 5091 mutex_enter(&qlt->qlt_ioctl_lock); 5092 qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK; 5093 mutex_exit(&qlt->qlt_ioctl_lock); 5094 return (FCT_FAILURE); 5095 } 5096 5097 static int 5098 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count, 5099 int size_left) 5100 { 5101 int i; 5102 int n; 5103 char c = ' '; 5104 5105 for (i = 0, n = 0; i < count; i++) { 5106 if ((i + 1) & 7) { 5107 c = ' '; 5108 } else { 5109 c = '\n'; 5110 } 5111 n += snprintf(&buf[n], (size_left - n), "%08x%c", 5112 REG_RD32(qlt, startaddr + (i << 2)), c); 5113 } 5114 return (n); 5115 } 5116 5117 static int 5118 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words, 5119 caddr_t buf, int size_left) 5120 { 5121 int i; 5122 int n; 5123 char c = ' '; 5124 uint32_t *ptr; 5125 5126 ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET); 5127 for (i = 0, n = 0; i < words; i++) { 5128 if ((i & 7) == 0) { 5129 n += snprintf(&buf[n], (size_left - n), "%08x: ", 5130 addr + i); 5131 } 5132 if ((i + 1) & 7) { 5133 c = ' '; 5134 } else { 5135 c = '\n'; 5136 } 5137 n += snprintf(&buf[n], (size_left - n), "%08x%c", ptr[i], c); 5138 } 5139 return (n); 5140 } 5141 5142 static int 5143 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf, 5144 int size_left) 5145 { 5146 int i; 5147 int n; 5148 char c = ' '; 5149 int words; 5150 uint16_t *ptr; 5151 uint16_t w; 5152 5153 words = entries * 32; 5154 ptr = (uint16_t *)qadr; 5155 for (i = 0, n = 0; i < words; i++) { 5156 if ((i & 7) == 0) { 5157 n += snprintf(&buf[n], (size_left - n), "%05x: ", i); 5158 } 5159 if ((i + 1) & 7) { 5160 c = ' '; 5161 } else { 5162 c = '\n'; 5163 } 5164 w = QMEM_RD16(qlt, &ptr[i]); 5165 n += snprintf(&buf[n], (size_left - n), "%04x%c", w, c); 5166 } 5167 return (n); 5168 } 5169 5170 /* 5171 * Only called by debug dump. Interrupts are disabled and mailboxes alongwith 5172 * mailbox ram is available. 5173 * Copy data from RISC RAM to system memory 5174 */ 5175 static fct_status_t 5176 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words) 5177 { 5178 uint64_t da; 5179 fct_status_t ret; 5180 5181 REG_WR16(qlt, REG_MBOX(0), 0xc); 5182 da = qlt->queue_mem_cookie.dmac_laddress; 5183 da += MBOX_DMA_MEM_OFFSET; 5184 5185 /* 5186 * System destination address 5187 */ 5188 REG_WR16(qlt, REG_MBOX(3), da & 0xffff); 5189 da >>= 16; 5190 REG_WR16(qlt, REG_MBOX(2), da & 0xffff); 5191 da >>= 16; 5192 REG_WR16(qlt, REG_MBOX(7), da & 0xffff); 5193 da >>= 16; 5194 REG_WR16(qlt, REG_MBOX(6), da & 0xffff); 5195 5196 /* 5197 * Length 5198 */ 5199 REG_WR16(qlt, REG_MBOX(5), words & 0xffff); 5200 REG_WR16(qlt, REG_MBOX(4), ((words >> 16) & 0xffff)); 5201 5202 /* 5203 * RISC source address 5204 */ 5205 REG_WR16(qlt, REG_MBOX(1), addr & 0xffff); 5206 REG_WR16(qlt, REG_MBOX(8), ((addr >> 16) & 0xffff)); 5207 5208 ret = qlt_raw_mailbox_command(qlt); 5209 REG_WR32(qlt, REG_HCCR, 0xA0000000); 5210 if (ret == QLT_SUCCESS) { 5211 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, 5212 MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU); 5213 } else { 5214 QLT_LOG(qlt->qlt_port_alias, "qlt_read_risc_ram: qlt raw_mbox " 5215 "failed 0x%llX", ret); 5216 } 5217 return (ret); 5218 } 5219