1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 #include <sys/scsi/adapters/pmcs/pmcs.h> 26 27 #define PMCS_DRIVER_VERSION "pmcs HBA device driver" 28 29 static char *pmcs_driver_rev = PMCS_DRIVER_VERSION; 30 31 /* 32 * Non-DDI Compliant stuff 33 */ 34 extern char hw_serial[]; 35 36 /* 37 * Global driver data 38 */ 39 void *pmcs_softc_state = NULL; 40 void *pmcs_iport_softstate = NULL; 41 42 /* 43 * Tracing and Logging info 44 */ 45 pmcs_tbuf_t *pmcs_tbuf = NULL; 46 uint32_t pmcs_tbuf_num_elems = 0; 47 pmcs_tbuf_t *pmcs_tbuf_ptr; 48 uint32_t pmcs_tbuf_idx = 0; 49 boolean_t pmcs_tbuf_wrap = B_FALSE; 50 static kmutex_t pmcs_trace_lock; 51 52 /* 53 * If pmcs_force_syslog value is non-zero, all messages put in the trace log 54 * will also be sent to system log. 55 */ 56 int pmcs_force_syslog = 0; 57 int pmcs_console = 0; 58 59 /* 60 * External References 61 */ 62 extern int ncpus_online; 63 64 /* 65 * Local static data 66 */ 67 static int fwlog_level = 3; 68 static int physpeed = PHY_LINK_ALL; 69 static int phymode = PHY_LM_AUTO; 70 static int block_mask = 0; 71 static int phymap_usec = 3 * MICROSEC; 72 static int iportmap_usec = 2 * MICROSEC; 73 74 #ifdef DEBUG 75 static int debug_mask = 1; 76 #else 77 static int debug_mask = 0; 78 #endif 79 80 #ifdef DISABLE_MSIX 81 static int disable_msix = 1; 82 #else 83 static int disable_msix = 0; 84 #endif 85 86 #ifdef DISABLE_MSI 87 static int disable_msi = 1; 88 #else 89 static int disable_msi = 0; 90 #endif 91 92 static uint16_t maxqdepth = 0xfffe; 93 94 /* 95 * Local prototypes 96 */ 97 static int pmcs_attach(dev_info_t *, ddi_attach_cmd_t); 98 static int pmcs_detach(dev_info_t *, ddi_detach_cmd_t); 99 static int pmcs_unattach(pmcs_hw_t *); 100 static int pmcs_iport_unattach(pmcs_iport_t *); 101 static int pmcs_add_more_chunks(pmcs_hw_t *, unsigned long); 102 static void pmcs_watchdog(void *); 103 static int pmcs_setup_intr(pmcs_hw_t *); 104 static int pmcs_teardown_intr(pmcs_hw_t *); 105 106 static uint_t pmcs_nonio_ix(caddr_t, caddr_t); 107 static uint_t pmcs_general_ix(caddr_t, caddr_t); 108 static uint_t pmcs_event_ix(caddr_t, caddr_t); 109 static uint_t pmcs_iodone_ix(caddr_t, caddr_t); 110 static uint_t pmcs_fatal_ix(caddr_t, caddr_t); 111 static uint_t pmcs_all_intr(caddr_t, caddr_t); 112 static int pmcs_quiesce(dev_info_t *dip); 113 static boolean_t pmcs_fabricate_wwid(pmcs_hw_t *); 114 115 static void pmcs_create_phy_stats(pmcs_iport_t *); 116 int pmcs_update_phy_stats(kstat_t *, int); 117 static void pmcs_destroy_phy_stats(pmcs_iport_t *); 118 119 static void pmcs_fm_fini(pmcs_hw_t *pwp); 120 static void pmcs_fm_init(pmcs_hw_t *pwp); 121 static int pmcs_fm_error_cb(dev_info_t *dip, 122 ddi_fm_error_t *err, const void *impl_data); 123 124 /* 125 * Local configuration data 126 */ 127 static struct dev_ops pmcs_ops = { 128 DEVO_REV, /* devo_rev, */ 129 0, /* refcnt */ 130 ddi_no_info, /* info */ 131 nulldev, /* identify */ 132 nulldev, /* probe */ 133 pmcs_attach, /* attach */ 134 pmcs_detach, /* detach */ 135 nodev, /* reset */ 136 NULL, /* driver operations */ 137 NULL, /* bus operations */ 138 ddi_power, /* power management */ 139 pmcs_quiesce /* quiesce */ 140 }; 141 142 static struct modldrv modldrv = { 143 &mod_driverops, 144 PMCS_DRIVER_VERSION, 145 &pmcs_ops, /* driver ops */ 146 }; 147 static struct modlinkage modlinkage = { 148 MODREV_1, &modldrv, NULL 149 }; 150 151 const ddi_dma_attr_t pmcs_dattr = { 152 DMA_ATTR_V0, /* dma_attr version */ 153 0x0000000000000000ull, /* dma_attr_addr_lo */ 154 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 155 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 156 0x0000000000000001ull, /* dma_attr_align */ 157 0x00000078, /* dma_attr_burstsizes */ 158 0x00000001, /* dma_attr_minxfer */ 159 0x00000000FFFFFFFFull, /* dma_attr_maxxfer */ 160 0x00000000FFFFFFFFull, /* dma_attr_seg */ 161 1, /* dma_attr_sgllen */ 162 512, /* dma_attr_granular */ 163 0 /* dma_attr_flags */ 164 }; 165 166 static ddi_device_acc_attr_t rattr = { 167 DDI_DEVICE_ATTR_V1, 168 DDI_STRUCTURE_LE_ACC, 169 DDI_STRICTORDER_ACC, 170 DDI_DEFAULT_ACC 171 }; 172 173 174 /* 175 * Attach/Detach functions 176 */ 177 178 int 179 _init(void) 180 { 181 int ret; 182 183 ret = ddi_soft_state_init(&pmcs_softc_state, sizeof (pmcs_hw_t), 1); 184 if (ret != 0) { 185 cmn_err(CE_WARN, "?soft state init failed for pmcs"); 186 return (ret); 187 } 188 189 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 190 cmn_err(CE_WARN, "?scsi_hba_init failed for pmcs"); 191 ddi_soft_state_fini(&pmcs_softc_state); 192 return (ret); 193 } 194 195 /* 196 * Allocate soft state for iports 197 */ 198 ret = ddi_soft_state_init(&pmcs_iport_softstate, 199 sizeof (pmcs_iport_t), 2); 200 if (ret != 0) { 201 cmn_err(CE_WARN, "?iport soft state init failed for pmcs"); 202 ddi_soft_state_fini(&pmcs_softc_state); 203 return (ret); 204 } 205 206 ret = mod_install(&modlinkage); 207 if (ret != 0) { 208 cmn_err(CE_WARN, "?mod_install failed for pmcs (%d)", ret); 209 scsi_hba_fini(&modlinkage); 210 ddi_soft_state_fini(&pmcs_iport_softstate); 211 ddi_soft_state_fini(&pmcs_softc_state); 212 return (ret); 213 } 214 215 /* Initialize the global trace lock */ 216 mutex_init(&pmcs_trace_lock, NULL, MUTEX_DRIVER, NULL); 217 218 return (0); 219 } 220 221 int 222 _fini(void) 223 { 224 int ret; 225 if ((ret = mod_remove(&modlinkage)) != 0) { 226 return (ret); 227 } 228 scsi_hba_fini(&modlinkage); 229 230 /* Free pmcs log buffer and destroy the global lock */ 231 if (pmcs_tbuf) { 232 kmem_free(pmcs_tbuf, 233 pmcs_tbuf_num_elems * sizeof (pmcs_tbuf_t)); 234 pmcs_tbuf = NULL; 235 } 236 mutex_destroy(&pmcs_trace_lock); 237 238 ddi_soft_state_fini(&pmcs_iport_softstate); 239 ddi_soft_state_fini(&pmcs_softc_state); 240 return (0); 241 } 242 243 int 244 _info(struct modinfo *modinfop) 245 { 246 return (mod_info(&modlinkage, modinfop)); 247 } 248 249 static int 250 pmcs_iport_attach(dev_info_t *dip) 251 { 252 pmcs_iport_t *iport; 253 pmcs_hw_t *pwp; 254 scsi_hba_tran_t *tran; 255 void *ua_priv = NULL; 256 char *iport_ua; 257 char *init_port; 258 int hba_inst; 259 int inst; 260 261 hba_inst = ddi_get_instance(ddi_get_parent(dip)); 262 inst = ddi_get_instance(dip); 263 264 pwp = ddi_get_soft_state(pmcs_softc_state, hba_inst); 265 if (pwp == NULL) { 266 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 267 "%s: iport%d attach invoked with NULL parent (HBA) node)", 268 __func__, inst); 269 return (DDI_FAILURE); 270 } 271 272 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) { 273 return (DDI_FAILURE); 274 } 275 276 if ((iport_ua = scsi_hba_iport_unit_address(dip)) == NULL) { 277 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 278 "%s: invoked with NULL unit address, inst (%d)", 279 __func__, inst); 280 return (DDI_FAILURE); 281 } 282 283 if (ddi_soft_state_zalloc(pmcs_iport_softstate, inst) != DDI_SUCCESS) { 284 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 285 "Failed to alloc soft state for iport %d", inst); 286 return (DDI_FAILURE); 287 } 288 289 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 290 if (iport == NULL) { 291 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 292 "cannot get iport soft state"); 293 goto iport_attach_fail1; 294 } 295 296 mutex_init(&iport->lock, NULL, MUTEX_DRIVER, 297 DDI_INTR_PRI(pwp->intr_pri)); 298 cv_init(&iport->refcnt_cv, NULL, CV_DEFAULT, NULL); 299 cv_init(&iport->smp_cv, NULL, CV_DEFAULT, NULL); 300 mutex_init(&iport->refcnt_lock, NULL, MUTEX_DRIVER, 301 DDI_INTR_PRI(pwp->intr_pri)); 302 mutex_init(&iport->smp_lock, NULL, MUTEX_DRIVER, 303 DDI_INTR_PRI(pwp->intr_pri)); 304 305 /* Set some data on the iport handle */ 306 iport->dip = dip; 307 iport->pwp = pwp; 308 309 /* Dup the UA into the iport handle */ 310 iport->ua = strdup(iport_ua); 311 312 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 313 tran->tran_hba_private = iport; 314 315 list_create(&iport->phys, sizeof (pmcs_phy_t), 316 offsetof(pmcs_phy_t, list_node)); 317 318 /* 319 * If our unit address is active in the phymap, configure our 320 * iport's phylist. 321 */ 322 mutex_enter(&iport->lock); 323 ua_priv = sas_phymap_lookup_uapriv(pwp->hss_phymap, iport->ua); 324 if (ua_priv) { 325 /* Non-NULL private data indicates the unit address is active */ 326 iport->ua_state = UA_ACTIVE; 327 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) { 328 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 329 "%s: failed to " 330 "configure phys on iport handle (0x%p), " 331 " unit address [%s]", __func__, 332 (void *)iport, iport_ua); 333 mutex_exit(&iport->lock); 334 goto iport_attach_fail2; 335 } 336 } else { 337 iport->ua_state = UA_INACTIVE; 338 } 339 mutex_exit(&iport->lock); 340 341 /* Allocate string-based soft state pool for targets */ 342 iport->tgt_sstate = NULL; 343 if (ddi_soft_state_bystr_init(&iport->tgt_sstate, 344 sizeof (pmcs_xscsi_t), PMCS_TGT_SSTATE_SZ) != 0) { 345 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 346 "cannot get iport tgt soft state"); 347 goto iport_attach_fail2; 348 } 349 350 /* Create this iport's target map */ 351 if (pmcs_iport_tgtmap_create(iport) == B_FALSE) { 352 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 353 "Failed to create tgtmap on iport %d", inst); 354 goto iport_attach_fail3; 355 } 356 357 /* Set up the 'initiator-port' DDI property on this iport */ 358 init_port = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP); 359 if (pwp->separate_ports) { 360 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 361 "%s: separate ports not supported", __func__); 362 } else { 363 /* Set initiator-port value to the HBA's base WWN */ 364 (void) scsi_wwn_to_wwnstr(pwp->sas_wwns[0], 1, 365 init_port); 366 } 367 368 mutex_enter(&iport->lock); 369 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_STRING, 370 SCSI_ADDR_PROP_INITIATOR_PORT, init_port); 371 kmem_free(init_port, PMCS_MAX_UA_SIZE); 372 373 /* Set up a 'num-phys' DDI property for the iport node */ 374 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 375 &iport->nphy); 376 mutex_exit(&iport->lock); 377 378 /* Create kstats for each of the phys in this port */ 379 pmcs_create_phy_stats(iport); 380 381 /* 382 * Insert this iport handle into our list and set 383 * iports_attached on the HBA node. 384 */ 385 rw_enter(&pwp->iports_lock, RW_WRITER); 386 ASSERT(!list_link_active(&iport->list_node)); 387 list_insert_tail(&pwp->iports, iport); 388 pwp->iports_attached = 1; 389 pwp->num_iports++; 390 rw_exit(&pwp->iports_lock); 391 392 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 393 "iport%d attached", inst); 394 ddi_report_dev(dip); 395 return (DDI_SUCCESS); 396 397 /* teardown and fail */ 398 iport_attach_fail3: 399 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 400 iport_attach_fail2: 401 list_destroy(&iport->phys); 402 strfree(iport->ua); 403 mutex_destroy(&iport->refcnt_lock); 404 mutex_destroy(&iport->smp_lock); 405 cv_destroy(&iport->refcnt_cv); 406 cv_destroy(&iport->smp_cv); 407 mutex_destroy(&iport->lock); 408 iport_attach_fail1: 409 ddi_soft_state_free(pmcs_iport_softstate, inst); 410 return (DDI_FAILURE); 411 } 412 413 static int 414 pmcs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 415 { 416 scsi_hba_tran_t *tran; 417 char chiprev, *fwsupport, hw_rev[24], fw_rev[24]; 418 off_t set3size; 419 int inst, i; 420 int sm_hba = 1; 421 int protocol = 0; 422 int num_phys = 0; 423 pmcs_hw_t *pwp; 424 pmcs_phy_t *phyp; 425 uint32_t num_threads; 426 char buf[64]; 427 428 switch (cmd) { 429 case DDI_ATTACH: 430 break; 431 432 case DDI_PM_RESUME: 433 case DDI_RESUME: 434 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 435 if (!tran) { 436 return (DDI_FAILURE); 437 } 438 /* No DDI_?_RESUME on iport nodes */ 439 if (scsi_hba_iport_unit_address(dip) != NULL) { 440 return (DDI_SUCCESS); 441 } 442 pwp = TRAN2PMC(tran); 443 if (pwp == NULL) { 444 return (DDI_FAILURE); 445 } 446 447 mutex_enter(&pwp->lock); 448 pwp->suspended = 0; 449 if (pwp->tq) { 450 ddi_taskq_resume(pwp->tq); 451 } 452 mutex_exit(&pwp->lock); 453 return (DDI_SUCCESS); 454 455 default: 456 return (DDI_FAILURE); 457 } 458 459 /* 460 * If this is an iport node, invoke iport attach. 461 */ 462 if (scsi_hba_iport_unit_address(dip) != NULL) { 463 return (pmcs_iport_attach(dip)); 464 } 465 466 /* 467 * From here on is attach for the HBA node 468 */ 469 470 #ifdef DEBUG 471 /* 472 * Check to see if this unit is to be disabled. We can't disable 473 * on a per-iport node. It's either the entire HBA or nothing. 474 */ 475 (void) snprintf(buf, sizeof (buf), 476 "disable-instance-%d", ddi_get_instance(dip)); 477 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 478 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, buf, 0)) { 479 cmn_err(CE_NOTE, "pmcs%d: disabled by configuration", 480 ddi_get_instance(dip)); 481 return (DDI_FAILURE); 482 } 483 #endif 484 485 /* 486 * Allocate softstate 487 */ 488 inst = ddi_get_instance(dip); 489 if (ddi_soft_state_zalloc(pmcs_softc_state, inst) != DDI_SUCCESS) { 490 cmn_err(CE_WARN, "pmcs%d: Failed to alloc soft state", inst); 491 return (DDI_FAILURE); 492 } 493 494 pwp = ddi_get_soft_state(pmcs_softc_state, inst); 495 if (pwp == NULL) { 496 cmn_err(CE_WARN, "pmcs%d: cannot get soft state", inst); 497 ddi_soft_state_free(pmcs_softc_state, inst); 498 return (DDI_FAILURE); 499 } 500 pwp->dip = dip; 501 STAILQ_INIT(&pwp->dq); 502 STAILQ_INIT(&pwp->cq); 503 STAILQ_INIT(&pwp->wf); 504 STAILQ_INIT(&pwp->pf); 505 /* 506 * Create the list for iports 507 */ 508 list_create(&pwp->iports, sizeof (pmcs_iport_t), 509 offsetof(pmcs_iport_t, list_node)); 510 511 pwp->state = STATE_PROBING; 512 513 /* 514 * Get driver.conf properties 515 */ 516 pwp->debug_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 517 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-debug-mask", 518 debug_mask); 519 pwp->phyid_block_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 520 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phyid-block-mask", 521 block_mask); 522 pwp->physpeed = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 523 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-physpeed", physpeed); 524 pwp->phymode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 525 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phymode", phymode); 526 pwp->fwlog = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 527 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fwlog", fwlog_level); 528 if (pwp->fwlog > PMCS_FWLOG_MAX) { 529 pwp->fwlog = PMCS_FWLOG_MAX; 530 } 531 532 mutex_enter(&pmcs_trace_lock); 533 if (pmcs_tbuf == NULL) { 534 /* Allocate trace buffer */ 535 pmcs_tbuf_num_elems = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 536 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-tbuf-num-elems", 537 PMCS_TBUF_NUM_ELEMS_DEF); 538 if ((pmcs_tbuf_num_elems == DDI_PROP_NOT_FOUND) || 539 (pmcs_tbuf_num_elems == 0)) { 540 pmcs_tbuf_num_elems = PMCS_TBUF_NUM_ELEMS_DEF; 541 } 542 543 pmcs_tbuf = kmem_zalloc(pmcs_tbuf_num_elems * 544 sizeof (pmcs_tbuf_t), KM_SLEEP); 545 pmcs_tbuf_ptr = pmcs_tbuf; 546 pmcs_tbuf_idx = 0; 547 } 548 mutex_exit(&pmcs_trace_lock); 549 550 disable_msix = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 551 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msix", 552 disable_msix); 553 disable_msi = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 554 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msi", 555 disable_msi); 556 maxqdepth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 557 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-maxqdepth", maxqdepth); 558 pwp->fw_force_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 559 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fw-force-update", 0); 560 if (pwp->fw_force_update == 0) { 561 pwp->fw_disable_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 562 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 563 "pmcs-fw-disable-update", 0); 564 } 565 pwp->ioq_depth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 566 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-num-io-qentries", 567 PMCS_NQENTRY); 568 569 /* 570 * Initialize FMA 571 */ 572 pwp->dev_acc_attr = pwp->reg_acc_attr = rattr; 573 pwp->iqp_dma_attr = pwp->oqp_dma_attr = 574 pwp->regdump_dma_attr = pwp->cip_dma_attr = 575 pwp->fwlog_dma_attr = pmcs_dattr; 576 pwp->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, pwp->dip, 577 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "fm-capable", 578 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 579 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 580 pmcs_fm_init(pwp); 581 582 /* 583 * Map registers 584 */ 585 if (pci_config_setup(dip, &pwp->pci_acc_handle)) { 586 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 587 "pci config setup failed"); 588 ddi_soft_state_free(pmcs_softc_state, inst); 589 return (DDI_FAILURE); 590 } 591 592 /* 593 * Get the size of register set 3. 594 */ 595 if (ddi_dev_regsize(dip, PMCS_REGSET_3, &set3size) != DDI_SUCCESS) { 596 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 597 "unable to get size of register set %d", PMCS_REGSET_3); 598 pci_config_teardown(&pwp->pci_acc_handle); 599 ddi_soft_state_free(pmcs_softc_state, inst); 600 return (DDI_FAILURE); 601 } 602 603 /* 604 * Map registers 605 */ 606 pwp->reg_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 607 608 if (ddi_regs_map_setup(dip, PMCS_REGSET_0, (caddr_t *)&pwp->msg_regs, 609 0, 0, &pwp->reg_acc_attr, &pwp->msg_acc_handle)) { 610 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 611 "failed to map Message Unit registers"); 612 pci_config_teardown(&pwp->pci_acc_handle); 613 ddi_soft_state_free(pmcs_softc_state, inst); 614 return (DDI_FAILURE); 615 } 616 617 if (ddi_regs_map_setup(dip, PMCS_REGSET_1, (caddr_t *)&pwp->top_regs, 618 0, 0, &pwp->reg_acc_attr, &pwp->top_acc_handle)) { 619 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 620 "failed to map TOP registers"); 621 ddi_regs_map_free(&pwp->msg_acc_handle); 622 pci_config_teardown(&pwp->pci_acc_handle); 623 ddi_soft_state_free(pmcs_softc_state, inst); 624 return (DDI_FAILURE); 625 } 626 627 if (ddi_regs_map_setup(dip, PMCS_REGSET_2, (caddr_t *)&pwp->gsm_regs, 628 0, 0, &pwp->reg_acc_attr, &pwp->gsm_acc_handle)) { 629 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 630 "failed to map GSM registers"); 631 ddi_regs_map_free(&pwp->top_acc_handle); 632 ddi_regs_map_free(&pwp->msg_acc_handle); 633 pci_config_teardown(&pwp->pci_acc_handle); 634 ddi_soft_state_free(pmcs_softc_state, inst); 635 return (DDI_FAILURE); 636 } 637 638 if (ddi_regs_map_setup(dip, PMCS_REGSET_3, (caddr_t *)&pwp->mpi_regs, 639 0, 0, &pwp->reg_acc_attr, &pwp->mpi_acc_handle)) { 640 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 641 "failed to map MPI registers"); 642 ddi_regs_map_free(&pwp->top_acc_handle); 643 ddi_regs_map_free(&pwp->gsm_acc_handle); 644 ddi_regs_map_free(&pwp->msg_acc_handle); 645 pci_config_teardown(&pwp->pci_acc_handle); 646 ddi_soft_state_free(pmcs_softc_state, inst); 647 return (DDI_FAILURE); 648 } 649 pwp->mpibar = 650 (((5U << 2) + 0x10) << PMCS_MSGU_MPI_BAR_SHIFT) | set3size; 651 652 /* 653 * Make sure we can support this card. 654 */ 655 pwp->chiprev = pmcs_rd_topunit(pwp, PMCS_DEVICE_REVISION); 656 657 switch (pwp->chiprev) { 658 case PMCS_PM8001_REV_A: 659 case PMCS_PM8001_REV_B: 660 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 661 "Rev A/B Card no longer supported"); 662 goto failure; 663 case PMCS_PM8001_REV_C: 664 break; 665 default: 666 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 667 "Unknown chip revision (%d)", pwp->chiprev); 668 goto failure; 669 } 670 671 /* 672 * Allocate DMA addressable area for Inbound and Outbound Queue indices 673 * that the chip needs to access plus a space for scratch usage 674 */ 675 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 676 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pwp->cip_acchdls, 677 &pwp->cip_handles, ptob(1), (caddr_t *)&pwp->cip, 678 &pwp->ciaddr) == B_FALSE) { 679 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 680 "Failed to setup DMA for index/scratch"); 681 goto failure; 682 } 683 684 bzero(pwp->cip, ptob(1)); 685 pwp->scratch = &pwp->cip[PMCS_INDICES_SIZE]; 686 pwp->scratch_dma = pwp->ciaddr + PMCS_INDICES_SIZE; 687 688 /* 689 * Allocate DMA S/G list chunks 690 */ 691 (void) pmcs_add_more_chunks(pwp, ptob(1) * PMCS_MIN_CHUNK_PAGES); 692 693 /* 694 * Allocate a DMA addressable area for the firmware log (if needed) 695 */ 696 if (pwp->fwlog) { 697 /* 698 * Align to event log header and entry size 699 */ 700 pwp->fwlog_dma_attr.dma_attr_align = 32; 701 if (pmcs_dma_setup(pwp, &pwp->fwlog_dma_attr, 702 &pwp->fwlog_acchdl, 703 &pwp->fwlog_hndl, PMCS_FWLOG_SIZE, 704 (caddr_t *)&pwp->fwlogp, 705 &pwp->fwaddr) == B_FALSE) { 706 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 707 "Failed to setup DMA for fwlog area"); 708 pwp->fwlog = 0; 709 } else { 710 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE); 711 } 712 } 713 714 if (pwp->flash_chunk_addr == NULL) { 715 pwp->regdump_dma_attr.dma_attr_align = PMCS_FLASH_CHUNK_SIZE; 716 if (pmcs_dma_setup(pwp, &pwp->regdump_dma_attr, 717 &pwp->regdump_acchdl, 718 &pwp->regdump_hndl, PMCS_FLASH_CHUNK_SIZE, 719 (caddr_t *)&pwp->flash_chunkp, &pwp->flash_chunk_addr) == 720 B_FALSE) { 721 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 722 "Failed to setup DMA for register dump area"); 723 goto failure; 724 } 725 bzero(pwp->flash_chunkp, PMCS_FLASH_CHUNK_SIZE); 726 } 727 728 /* 729 * More bits of local initialization... 730 */ 731 pwp->tq = ddi_taskq_create(dip, "_tq", 4, TASKQ_DEFAULTPRI, 0); 732 if (pwp->tq == NULL) { 733 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 734 "unable to create worker taskq"); 735 goto failure; 736 } 737 738 /* 739 * Cache of structures for dealing with I/O completion callbacks. 740 */ 741 (void) snprintf(buf, sizeof (buf), "pmcs_iocomp_cb_cache%d", inst); 742 pwp->iocomp_cb_cache = kmem_cache_create(buf, 743 sizeof (pmcs_iocomp_cb_t), 16, NULL, NULL, NULL, NULL, NULL, 0); 744 745 /* 746 * Cache of PHY structures 747 */ 748 (void) snprintf(buf, sizeof (buf), "pmcs_phy_cache%d", inst); 749 pwp->phy_cache = kmem_cache_create(buf, sizeof (pmcs_phy_t), 8, 750 pmcs_phy_constructor, pmcs_phy_destructor, NULL, (void *)pwp, 751 NULL, 0); 752 753 /* 754 * Allocate space for the I/O completion threads 755 */ 756 num_threads = ncpus_online; 757 if (num_threads > PMCS_MAX_CQ_THREADS) { 758 num_threads = PMCS_MAX_CQ_THREADS; 759 } 760 761 pwp->cq_info.cq_thr_info = kmem_zalloc(sizeof (pmcs_cq_thr_info_t) * 762 num_threads, KM_SLEEP); 763 pwp->cq_info.cq_threads = num_threads; 764 pwp->cq_info.cq_next_disp_thr = 0; 765 pwp->cq_info.cq_stop = B_FALSE; 766 767 /* 768 * Set the quantum value in clock ticks for the I/O interrupt 769 * coalescing timer. 770 */ 771 pwp->io_intr_coal.quantum = drv_usectohz(PMCS_QUANTUM_TIME_USECS); 772 773 /* 774 * We have a delicate dance here. We need to set up 775 * interrupts so we know how to set up some OQC 776 * tables. However, while we're setting up table 777 * access, we may need to flash new firmware and 778 * reset the card, which will take some finessing. 779 */ 780 781 /* 782 * Set up interrupts here. 783 */ 784 switch (pmcs_setup_intr(pwp)) { 785 case 0: 786 break; 787 case EIO: 788 pwp->stuck = 1; 789 /* FALLTHROUGH */ 790 default: 791 goto failure; 792 } 793 794 /* 795 * Set these up now becuase they are used to initialize the OQC tables. 796 * 797 * If we have MSI or MSI-X interrupts set up and we have enough 798 * vectors for each OQ, the Outbound Queue vectors can all be the 799 * same as the appropriate interrupt routine will have been called 800 * and the doorbell register automatically cleared. 801 * This keeps us from having to check the Outbound Doorbell register 802 * when the routines for these interrupts are called. 803 * 804 * If we have Legacy INT-X interrupts set up or we didn't have enough 805 * MSI/MSI-X vectors to uniquely identify each OQ, we point these 806 * vectors to the bits we would like to have set in the Outbound 807 * Doorbell register because pmcs_all_intr will read the doorbell 808 * register to find out why we have an interrupt and write the 809 * corresponding 'clear' bit for that interrupt. 810 */ 811 812 switch (pwp->intr_cnt) { 813 case 1: 814 /* 815 * Only one vector, so we must check all OQs for MSI. For 816 * INT-X, there's only one vector anyway, so we can just 817 * use the outbound queue bits to keep from having to 818 * check each queue for each interrupt. 819 */ 820 if (pwp->int_type == PMCS_INT_FIXED) { 821 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 822 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 823 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 824 } else { 825 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 826 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_IODONE; 827 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_IODONE; 828 } 829 break; 830 case 2: 831 /* With 2, we can at least isolate IODONE */ 832 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 833 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 834 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_GENERAL; 835 break; 836 case 4: 837 /* With 4 vectors, everybody gets one */ 838 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 839 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 840 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 841 break; 842 } 843 844 /* 845 * Do the first part of setup 846 */ 847 if (pmcs_setup(pwp)) { 848 goto failure; 849 } 850 pmcs_report_fwversion(pwp); 851 852 /* 853 * Now do some additonal allocations based upon information 854 * gathered during MPI setup. 855 */ 856 pwp->root_phys = kmem_zalloc(pwp->nphy * sizeof (pmcs_phy_t), KM_SLEEP); 857 ASSERT(pwp->nphy < SAS2_PHYNUM_MAX); 858 phyp = pwp->root_phys; 859 for (i = 0; i < pwp->nphy; i++) { 860 if (i < pwp->nphy-1) { 861 phyp->sibling = (phyp + 1); 862 } 863 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER, 864 DDI_INTR_PRI(pwp->intr_pri)); 865 phyp->phynum = i & SAS2_PHYNUM_MASK; 866 pmcs_phy_name(pwp, phyp, phyp->path, sizeof (phyp->path)); 867 phyp->pwp = pwp; 868 phyp->device_id = PMCS_INVALID_DEVICE_ID; 869 phyp++; 870 } 871 872 pwp->work = kmem_zalloc(pwp->max_cmd * sizeof (pmcwork_t), KM_SLEEP); 873 for (i = 0; i < pwp->max_cmd - 1; i++) { 874 pmcwork_t *pwrk = &pwp->work[i]; 875 mutex_init(&pwrk->lock, NULL, MUTEX_DRIVER, 876 DDI_INTR_PRI(pwp->intr_pri)); 877 cv_init(&pwrk->sleep_cv, NULL, CV_DRIVER, NULL); 878 STAILQ_INSERT_TAIL(&pwp->wf, pwrk, next); 879 880 } 881 pwp->targets = (pmcs_xscsi_t **) 882 kmem_zalloc(pwp->max_dev * sizeof (pmcs_xscsi_t *), KM_SLEEP); 883 884 pwp->iqpt = (pmcs_iqp_trace_t *) 885 kmem_zalloc(sizeof (pmcs_iqp_trace_t), KM_SLEEP); 886 pwp->iqpt->head = kmem_zalloc(PMCS_IQP_TRACE_BUFFER_SIZE, KM_SLEEP); 887 pwp->iqpt->curpos = pwp->iqpt->head; 888 pwp->iqpt->size_left = PMCS_IQP_TRACE_BUFFER_SIZE; 889 890 /* 891 * Start MPI communication. 892 */ 893 if (pmcs_start_mpi(pwp)) { 894 if (pmcs_soft_reset(pwp, B_FALSE)) { 895 goto failure; 896 } 897 } 898 899 /* 900 * Do some initial acceptance tests. 901 * This tests interrupts and queues. 902 */ 903 if (pmcs_echo_test(pwp)) { 904 goto failure; 905 } 906 907 /* Read VPD - if it exists */ 908 if (pmcs_get_nvmd(pwp, PMCS_NVMD_VPD, PMCIN_NVMD_VPD, 0, NULL, 0)) { 909 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 910 "%s: Unable to read VPD: " 911 "attempting to fabricate", __func__); 912 /* 913 * When we release, this must goto failure and the call 914 * to pmcs_fabricate_wwid is removed. 915 */ 916 /* goto failure; */ 917 if (!pmcs_fabricate_wwid(pwp)) { 918 goto failure; 919 } 920 } 921 922 /* 923 * We're now officially running 924 */ 925 pwp->state = STATE_RUNNING; 926 927 /* 928 * Check firmware versions and load new firmware 929 * if needed and reset. 930 */ 931 if (pmcs_firmware_update(pwp)) { 932 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 933 "%s: Firmware update failed", __func__); 934 goto failure; 935 } 936 937 /* 938 * Create completion threads. 939 */ 940 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 941 pwp->cq_info.cq_thr_info[i].cq_pwp = pwp; 942 pwp->cq_info.cq_thr_info[i].cq_thread = 943 thread_create(NULL, 0, pmcs_scsa_cq_run, 944 &pwp->cq_info.cq_thr_info[i], 0, &p0, TS_RUN, minclsyspri); 945 } 946 947 /* 948 * Create one thread to deal with the updating of the interrupt 949 * coalescing timer. 950 */ 951 pwp->ict_thread = thread_create(NULL, 0, pmcs_check_intr_coal, 952 pwp, 0, &p0, TS_RUN, minclsyspri); 953 954 /* 955 * Kick off the watchdog 956 */ 957 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 958 drv_usectohz(PMCS_WATCH_INTERVAL)); 959 /* 960 * Do the SCSI attachment code (before starting phys) 961 */ 962 if (pmcs_scsa_init(pwp, &pmcs_dattr)) { 963 goto failure; 964 } 965 pwp->hba_attached = 1; 966 967 /* 968 * Initialize the rwlock for the iport elements. 969 */ 970 rw_init(&pwp->iports_lock, NULL, RW_DRIVER, NULL); 971 972 /* Check all acc & dma handles allocated in attach */ 973 if (pmcs_check_acc_dma_handle(pwp)) { 974 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 975 goto failure; 976 } 977 978 /* 979 * Create the phymap for this HBA instance 980 */ 981 if (sas_phymap_create(dip, phymap_usec, PHYMAP_MODE_SIMPLE, NULL, 982 pwp, pmcs_phymap_activate, pmcs_phymap_deactivate, 983 &pwp->hss_phymap) != DDI_SUCCESS) { 984 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 985 "%s: pmcs%d phymap_create failed", __func__, inst); 986 goto failure; 987 } 988 ASSERT(pwp->hss_phymap); 989 990 /* 991 * Create the iportmap for this HBA instance 992 */ 993 if (scsi_hba_iportmap_create(dip, iportmap_usec, 994 &pwp->hss_iportmap) != DDI_SUCCESS) { 995 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 996 "%s: pmcs%d iportmap_create failed", __func__, inst); 997 goto failure; 998 } 999 ASSERT(pwp->hss_iportmap); 1000 1001 /* 1002 * Start the PHYs. 1003 */ 1004 if (pmcs_start_phys(pwp)) { 1005 goto failure; 1006 } 1007 1008 /* 1009 * From this point on, we can't fail. 1010 */ 1011 ddi_report_dev(dip); 1012 1013 /* SM-HBA */ 1014 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SMHBA_SUPPORTED, 1015 &sm_hba); 1016 1017 /* SM-HBA */ 1018 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_DRV_VERSION, 1019 pmcs_driver_rev); 1020 1021 /* SM-HBA */ 1022 chiprev = 'A' + pwp->chiprev; 1023 (void) snprintf(hw_rev, 2, "%s", &chiprev); 1024 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_HWARE_VERSION, 1025 hw_rev); 1026 1027 /* SM-HBA */ 1028 switch (PMCS_FW_TYPE(pwp)) { 1029 case PMCS_FW_TYPE_RELEASED: 1030 fwsupport = "Released"; 1031 break; 1032 case PMCS_FW_TYPE_DEVELOPMENT: 1033 fwsupport = "Development"; 1034 break; 1035 case PMCS_FW_TYPE_ALPHA: 1036 fwsupport = "Alpha"; 1037 break; 1038 case PMCS_FW_TYPE_BETA: 1039 fwsupport = "Beta"; 1040 break; 1041 default: 1042 fwsupport = "Special"; 1043 break; 1044 } 1045 (void) snprintf(fw_rev, sizeof (fw_rev), "%x.%x.%x %s", 1046 PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp), 1047 fwsupport); 1048 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_FWARE_VERSION, 1049 fw_rev); 1050 1051 /* SM-HBA */ 1052 num_phys = pwp->nphy; 1053 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_NUM_PHYS_HBA, 1054 &num_phys); 1055 1056 /* SM-HBA */ 1057 protocol = SAS_SSP_SUPPORT | SAS_SATA_SUPPORT | SAS_SMP_SUPPORT; 1058 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SUPPORTED_PROTOCOL, 1059 &protocol); 1060 1061 return (DDI_SUCCESS); 1062 1063 failure: 1064 if (pmcs_unattach(pwp)) { 1065 pwp->stuck = 1; 1066 } 1067 return (DDI_FAILURE); 1068 } 1069 1070 int 1071 pmcs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1072 { 1073 int inst = ddi_get_instance(dip); 1074 pmcs_iport_t *iport = NULL; 1075 pmcs_hw_t *pwp = NULL; 1076 scsi_hba_tran_t *tran; 1077 1078 if (scsi_hba_iport_unit_address(dip) != NULL) { 1079 /* iport node */ 1080 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 1081 ASSERT(iport); 1082 if (iport == NULL) { 1083 return (DDI_FAILURE); 1084 } 1085 pwp = iport->pwp; 1086 } else { 1087 /* hba node */ 1088 pwp = (pmcs_hw_t *)ddi_get_soft_state(pmcs_softc_state, inst); 1089 ASSERT(pwp); 1090 if (pwp == NULL) { 1091 return (DDI_FAILURE); 1092 } 1093 } 1094 1095 switch (cmd) { 1096 case DDI_DETACH: 1097 if (iport) { 1098 /* iport detach */ 1099 if (pmcs_iport_unattach(iport)) { 1100 return (DDI_FAILURE); 1101 } 1102 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1103 "iport%d detached", inst); 1104 return (DDI_SUCCESS); 1105 } else { 1106 /* HBA detach */ 1107 if (pmcs_unattach(pwp)) { 1108 return (DDI_FAILURE); 1109 } 1110 return (DDI_SUCCESS); 1111 } 1112 1113 case DDI_SUSPEND: 1114 case DDI_PM_SUSPEND: 1115 /* No DDI_SUSPEND on iport nodes */ 1116 if (iport) { 1117 return (DDI_SUCCESS); 1118 } 1119 1120 if (pwp->stuck) { 1121 return (DDI_FAILURE); 1122 } 1123 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 1124 if (!tran) { 1125 return (DDI_FAILURE); 1126 } 1127 1128 pwp = TRAN2PMC(tran); 1129 if (pwp == NULL) { 1130 return (DDI_FAILURE); 1131 } 1132 mutex_enter(&pwp->lock); 1133 if (pwp->tq) { 1134 ddi_taskq_suspend(pwp->tq); 1135 } 1136 pwp->suspended = 1; 1137 mutex_exit(&pwp->lock); 1138 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "PMC8X6G suspending"); 1139 return (DDI_SUCCESS); 1140 1141 default: 1142 return (DDI_FAILURE); 1143 } 1144 } 1145 1146 static int 1147 pmcs_iport_unattach(pmcs_iport_t *iport) 1148 { 1149 pmcs_hw_t *pwp = iport->pwp; 1150 1151 /* 1152 * First, check if there are still any configured targets on this 1153 * iport. If so, we fail detach. 1154 */ 1155 if (pmcs_iport_has_targets(pwp, iport)) { 1156 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1157 "iport%d detach failure: iport has targets (luns)", 1158 ddi_get_instance(iport->dip)); 1159 return (DDI_FAILURE); 1160 } 1161 1162 /* 1163 * Remove this iport from our list if it is inactive in the phymap. 1164 */ 1165 rw_enter(&pwp->iports_lock, RW_WRITER); 1166 mutex_enter(&iport->lock); 1167 1168 if (iport->ua_state == UA_ACTIVE) { 1169 mutex_exit(&iport->lock); 1170 rw_exit(&pwp->iports_lock); 1171 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1172 "iport%d detach failure: " 1173 "iport unit address active in phymap", 1174 ddi_get_instance(iport->dip)); 1175 return (DDI_FAILURE); 1176 } 1177 1178 /* If it's our only iport, clear iports_attached */ 1179 ASSERT(pwp->num_iports >= 1); 1180 if (--pwp->num_iports == 0) { 1181 pwp->iports_attached = 0; 1182 } 1183 1184 ASSERT(list_link_active(&iport->list_node)); 1185 list_remove(&pwp->iports, iport); 1186 rw_exit(&pwp->iports_lock); 1187 1188 /* 1189 * We have removed the iport handle from the HBA's iports list, 1190 * there will be no new references to it. Two things must be 1191 * guarded against here. First, we could have PHY up events, 1192 * adding themselves to the iport->phys list and grabbing ref's 1193 * on our iport handle. Second, we could have existing references 1194 * to this iport handle from a point in time prior to the list 1195 * removal above. 1196 * 1197 * So first, destroy the phys list. Remove any phys that have snuck 1198 * in after the phymap deactivate, dropping the refcnt accordingly. 1199 * If these PHYs are still up if and when the phymap reactivates 1200 * (i.e. when this iport reattaches), we'll populate the list with 1201 * them and bump the refcnt back up. 1202 */ 1203 pmcs_remove_phy_from_iport(iport, NULL); 1204 ASSERT(list_is_empty(&iport->phys)); 1205 list_destroy(&iport->phys); 1206 mutex_exit(&iport->lock); 1207 1208 /* 1209 * Second, wait for any other references to this iport to be 1210 * dropped, then continue teardown. 1211 */ 1212 mutex_enter(&iport->refcnt_lock); 1213 while (iport->refcnt != 0) { 1214 cv_wait(&iport->refcnt_cv, &iport->refcnt_lock); 1215 } 1216 mutex_exit(&iport->refcnt_lock); 1217 1218 /* Delete kstats */ 1219 pmcs_destroy_phy_stats(iport); 1220 1221 /* Destroy the iport target map */ 1222 if (pmcs_iport_tgtmap_destroy(iport) == B_FALSE) { 1223 return (DDI_FAILURE); 1224 } 1225 1226 /* Free the tgt soft state */ 1227 if (iport->tgt_sstate != NULL) { 1228 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 1229 } 1230 1231 /* Free our unit address string */ 1232 strfree(iport->ua); 1233 1234 /* Finish teardown and free the softstate */ 1235 mutex_destroy(&iport->refcnt_lock); 1236 mutex_destroy(&iport->smp_lock); 1237 ASSERT(iport->refcnt == 0); 1238 cv_destroy(&iport->refcnt_cv); 1239 cv_destroy(&iport->smp_cv); 1240 mutex_destroy(&iport->lock); 1241 ddi_soft_state_free(pmcs_iport_softstate, ddi_get_instance(iport->dip)); 1242 1243 return (DDI_SUCCESS); 1244 } 1245 1246 static int 1247 pmcs_unattach(pmcs_hw_t *pwp) 1248 { 1249 int i; 1250 enum pwpstate curstate; 1251 pmcs_cq_thr_info_t *cqti; 1252 1253 /* 1254 * Tear down the interrupt infrastructure. 1255 */ 1256 if (pmcs_teardown_intr(pwp)) { 1257 pwp->stuck = 1; 1258 } 1259 pwp->intr_cnt = 0; 1260 1261 /* 1262 * Grab a lock, if initted, to set state. 1263 */ 1264 if (pwp->locks_initted) { 1265 mutex_enter(&pwp->lock); 1266 if (pwp->state != STATE_DEAD) { 1267 pwp->state = STATE_UNPROBING; 1268 } 1269 curstate = pwp->state; 1270 mutex_exit(&pwp->lock); 1271 1272 /* 1273 * Stop the I/O completion threads. 1274 */ 1275 mutex_enter(&pwp->cq_lock); 1276 pwp->cq_info.cq_stop = B_TRUE; 1277 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 1278 if (pwp->cq_info.cq_thr_info[i].cq_thread) { 1279 cqti = &pwp->cq_info.cq_thr_info[i]; 1280 mutex_enter(&cqti->cq_thr_lock); 1281 cv_signal(&cqti->cq_cv); 1282 mutex_exit(&cqti->cq_thr_lock); 1283 mutex_exit(&pwp->cq_lock); 1284 thread_join(cqti->cq_thread->t_did); 1285 mutex_enter(&pwp->cq_lock); 1286 } 1287 } 1288 mutex_exit(&pwp->cq_lock); 1289 1290 /* 1291 * Stop the interrupt coalescing timer thread 1292 */ 1293 if (pwp->ict_thread) { 1294 mutex_enter(&pwp->ict_lock); 1295 pwp->io_intr_coal.stop_thread = B_TRUE; 1296 cv_signal(&pwp->ict_cv); 1297 mutex_exit(&pwp->ict_lock); 1298 thread_join(pwp->ict_thread->t_did); 1299 } 1300 } else { 1301 if (pwp->state != STATE_DEAD) { 1302 pwp->state = STATE_UNPROBING; 1303 } 1304 curstate = pwp->state; 1305 } 1306 1307 if (&pwp->iports != NULL) { 1308 /* Destroy the iports lock */ 1309 rw_destroy(&pwp->iports_lock); 1310 /* Destroy the iports list */ 1311 ASSERT(list_is_empty(&pwp->iports)); 1312 list_destroy(&pwp->iports); 1313 } 1314 1315 if (pwp->hss_iportmap != NULL) { 1316 /* Destroy the iportmap */ 1317 scsi_hba_iportmap_destroy(pwp->hss_iportmap); 1318 } 1319 1320 if (pwp->hss_phymap != NULL) { 1321 /* Destroy the phymap */ 1322 sas_phymap_destroy(pwp->hss_phymap); 1323 } 1324 1325 /* 1326 * Make sure that any pending watchdog won't 1327 * be called from this point on out. 1328 */ 1329 (void) untimeout(pwp->wdhandle); 1330 /* 1331 * After the above action, the watchdog 1332 * timer that starts up the worker task 1333 * may trigger but will exit immediately 1334 * on triggering. 1335 * 1336 * Now that this is done, we can destroy 1337 * the task queue, which will wait if we're 1338 * running something on it. 1339 */ 1340 if (pwp->tq) { 1341 ddi_taskq_destroy(pwp->tq); 1342 pwp->tq = NULL; 1343 } 1344 1345 pmcs_fm_fini(pwp); 1346 1347 if (pwp->hba_attached) { 1348 (void) scsi_hba_detach(pwp->dip); 1349 pwp->hba_attached = 0; 1350 } 1351 1352 /* 1353 * If the chip hasn't been marked dead, shut it down now 1354 * to bring it back to a known state without attempting 1355 * a soft reset. 1356 */ 1357 if (curstate != STATE_DEAD && pwp->locks_initted) { 1358 /* 1359 * De-register all registered devices 1360 */ 1361 pmcs_deregister_devices(pwp, pwp->root_phys); 1362 1363 /* 1364 * Stop all the phys. 1365 */ 1366 pmcs_stop_phys(pwp); 1367 1368 /* 1369 * Shut Down Message Passing 1370 */ 1371 (void) pmcs_stop_mpi(pwp); 1372 1373 /* 1374 * Reset chip 1375 */ 1376 (void) pmcs_soft_reset(pwp, B_FALSE); 1377 } 1378 1379 /* 1380 * Turn off interrupts on the chip 1381 */ 1382 if (pwp->mpi_acc_handle) { 1383 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1384 } 1385 1386 /* Destroy pwp's lock */ 1387 if (pwp->locks_initted) { 1388 mutex_destroy(&pwp->lock); 1389 mutex_destroy(&pwp->dma_lock); 1390 mutex_destroy(&pwp->axil_lock); 1391 mutex_destroy(&pwp->cq_lock); 1392 mutex_destroy(&pwp->config_lock); 1393 mutex_destroy(&pwp->ict_lock); 1394 mutex_destroy(&pwp->wfree_lock); 1395 mutex_destroy(&pwp->pfree_lock); 1396 mutex_destroy(&pwp->dead_phylist_lock); 1397 #ifdef DEBUG 1398 mutex_destroy(&pwp->dbglock); 1399 #endif 1400 cv_destroy(&pwp->ict_cv); 1401 cv_destroy(&pwp->drain_cv); 1402 pwp->locks_initted = 0; 1403 } 1404 1405 /* 1406 * Free DMA handles and associated consistent memory 1407 */ 1408 if (pwp->regdump_hndl) { 1409 if (ddi_dma_unbind_handle(pwp->regdump_hndl) != DDI_SUCCESS) { 1410 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1411 "Condition check failed " 1412 "at %s():%d", __func__, __LINE__); 1413 } 1414 ddi_dma_free_handle(&pwp->regdump_hndl); 1415 ddi_dma_mem_free(&pwp->regdump_acchdl); 1416 pwp->regdump_hndl = 0; 1417 } 1418 if (pwp->fwlog_hndl) { 1419 if (ddi_dma_unbind_handle(pwp->fwlog_hndl) != DDI_SUCCESS) { 1420 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1421 "Condition check failed " 1422 "at %s():%d", __func__, __LINE__); 1423 } 1424 ddi_dma_free_handle(&pwp->fwlog_hndl); 1425 ddi_dma_mem_free(&pwp->fwlog_acchdl); 1426 pwp->fwlog_hndl = 0; 1427 } 1428 if (pwp->cip_handles) { 1429 if (ddi_dma_unbind_handle(pwp->cip_handles) != DDI_SUCCESS) { 1430 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1431 "Condition check failed " 1432 "at %s():%d", __func__, __LINE__); 1433 } 1434 ddi_dma_free_handle(&pwp->cip_handles); 1435 ddi_dma_mem_free(&pwp->cip_acchdls); 1436 pwp->cip_handles = 0; 1437 } 1438 for (i = 0; i < PMCS_NOQ; i++) { 1439 if (pwp->oqp_handles[i]) { 1440 if (ddi_dma_unbind_handle(pwp->oqp_handles[i]) != 1441 DDI_SUCCESS) { 1442 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1443 "Condition check failed at %s():%d", 1444 __func__, __LINE__); 1445 } 1446 ddi_dma_free_handle(&pwp->oqp_handles[i]); 1447 ddi_dma_mem_free(&pwp->oqp_acchdls[i]); 1448 pwp->oqp_handles[i] = 0; 1449 } 1450 } 1451 for (i = 0; i < PMCS_NIQ; i++) { 1452 if (pwp->iqp_handles[i]) { 1453 if (ddi_dma_unbind_handle(pwp->iqp_handles[i]) != 1454 DDI_SUCCESS) { 1455 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1456 "Condition check failed at %s():%d", 1457 __func__, __LINE__); 1458 } 1459 ddi_dma_free_handle(&pwp->iqp_handles[i]); 1460 ddi_dma_mem_free(&pwp->iqp_acchdls[i]); 1461 pwp->iqp_handles[i] = 0; 1462 } 1463 } 1464 1465 pmcs_free_dma_chunklist(pwp); 1466 1467 /* 1468 * Unmap registers and destroy access handles 1469 */ 1470 if (pwp->mpi_acc_handle) { 1471 ddi_regs_map_free(&pwp->mpi_acc_handle); 1472 pwp->mpi_acc_handle = 0; 1473 } 1474 if (pwp->top_acc_handle) { 1475 ddi_regs_map_free(&pwp->top_acc_handle); 1476 pwp->top_acc_handle = 0; 1477 } 1478 if (pwp->gsm_acc_handle) { 1479 ddi_regs_map_free(&pwp->gsm_acc_handle); 1480 pwp->gsm_acc_handle = 0; 1481 } 1482 if (pwp->msg_acc_handle) { 1483 ddi_regs_map_free(&pwp->msg_acc_handle); 1484 pwp->msg_acc_handle = 0; 1485 } 1486 if (pwp->pci_acc_handle) { 1487 pci_config_teardown(&pwp->pci_acc_handle); 1488 pwp->pci_acc_handle = 0; 1489 } 1490 1491 /* 1492 * Do memory allocation cleanup. 1493 */ 1494 while (pwp->dma_freelist) { 1495 pmcs_dmachunk_t *this = pwp->dma_freelist; 1496 pwp->dma_freelist = this->nxt; 1497 kmem_free(this, sizeof (pmcs_dmachunk_t)); 1498 } 1499 1500 /* 1501 * Free pools 1502 */ 1503 if (pwp->iocomp_cb_cache) { 1504 kmem_cache_destroy(pwp->iocomp_cb_cache); 1505 } 1506 1507 /* 1508 * Free all PHYs (at level > 0), then free the cache 1509 */ 1510 pmcs_free_all_phys(pwp, pwp->root_phys); 1511 if (pwp->phy_cache) { 1512 kmem_cache_destroy(pwp->phy_cache); 1513 } 1514 1515 /* 1516 * Free root PHYs 1517 */ 1518 if (pwp->root_phys) { 1519 pmcs_phy_t *phyp = pwp->root_phys; 1520 for (i = 0; i < pwp->nphy; i++) { 1521 mutex_destroy(&phyp->phy_lock); 1522 phyp = phyp->sibling; 1523 } 1524 kmem_free(pwp->root_phys, pwp->nphy * sizeof (pmcs_phy_t)); 1525 pwp->root_phys = NULL; 1526 pwp->nphy = 0; 1527 } 1528 1529 /* Free the targets list */ 1530 if (pwp->targets) { 1531 kmem_free(pwp->targets, 1532 sizeof (pmcs_xscsi_t *) * pwp->max_dev); 1533 } 1534 1535 /* 1536 * Free work structures 1537 */ 1538 1539 if (pwp->work && pwp->max_cmd) { 1540 for (i = 0; i < pwp->max_cmd - 1; i++) { 1541 pmcwork_t *pwrk = &pwp->work[i]; 1542 mutex_destroy(&pwrk->lock); 1543 cv_destroy(&pwrk->sleep_cv); 1544 } 1545 kmem_free(pwp->work, sizeof (pmcwork_t) * pwp->max_cmd); 1546 pwp->work = NULL; 1547 pwp->max_cmd = 0; 1548 } 1549 1550 /* 1551 * Do last property and SCSA cleanup 1552 */ 1553 if (pwp->tran) { 1554 scsi_hba_tran_free(pwp->tran); 1555 pwp->tran = NULL; 1556 } 1557 if (pwp->reset_notify_listf) { 1558 scsi_hba_reset_notify_tear_down(pwp->reset_notify_listf); 1559 pwp->reset_notify_listf = NULL; 1560 } 1561 ddi_prop_remove_all(pwp->dip); 1562 if (pwp->stuck) { 1563 return (-1); 1564 } 1565 1566 /* Free register dump area if allocated */ 1567 if (pwp->regdumpp) { 1568 kmem_free(pwp->regdumpp, PMCS_REG_DUMP_SIZE); 1569 pwp->regdumpp = NULL; 1570 } 1571 if (pwp->iqpt && pwp->iqpt->head) { 1572 kmem_free(pwp->iqpt->head, PMCS_IQP_TRACE_BUFFER_SIZE); 1573 pwp->iqpt->head = pwp->iqpt->curpos = NULL; 1574 } 1575 if (pwp->iqpt) { 1576 kmem_free(pwp->iqpt, sizeof (pmcs_iqp_trace_t)); 1577 pwp->iqpt = NULL; 1578 } 1579 1580 ddi_soft_state_free(pmcs_softc_state, ddi_get_instance(pwp->dip)); 1581 return (0); 1582 } 1583 1584 /* 1585 * quiesce (9E) entry point 1586 * 1587 * This function is called when the system is single-threaded at high PIL 1588 * with preemption disabled. Therefore, the function must not block/wait/sleep. 1589 * 1590 * Returns DDI_SUCCESS or DDI_FAILURE. 1591 * 1592 */ 1593 static int 1594 pmcs_quiesce(dev_info_t *dip) 1595 { 1596 pmcs_hw_t *pwp; 1597 scsi_hba_tran_t *tran; 1598 1599 if ((tran = ddi_get_driver_private(dip)) == NULL) 1600 return (DDI_SUCCESS); 1601 1602 /* No quiesce necessary on a per-iport basis */ 1603 if (scsi_hba_iport_unit_address(dip) != NULL) { 1604 return (DDI_SUCCESS); 1605 } 1606 1607 if ((pwp = TRAN2PMC(tran)) == NULL) 1608 return (DDI_SUCCESS); 1609 1610 /* Stop MPI & Reset chip (no need to re-initialize) */ 1611 (void) pmcs_stop_mpi(pwp); 1612 (void) pmcs_soft_reset(pwp, B_TRUE); 1613 1614 return (DDI_SUCCESS); 1615 } 1616 1617 /* 1618 * Called with xp->statlock and PHY lock and scratch acquired. 1619 */ 1620 static int 1621 pmcs_add_sata_device(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1622 { 1623 ata_identify_t *ati; 1624 int result, i; 1625 pmcs_phy_t *pptr; 1626 uint16_t *a; 1627 union { 1628 uint8_t nsa[8]; 1629 uint16_t nsb[4]; 1630 } u; 1631 1632 /* 1633 * Safe defaults - use only if this target is brand new (i.e. doesn't 1634 * already have these settings configured) 1635 */ 1636 if (xp->capacity == 0) { 1637 xp->capacity = (uint64_t)-1; 1638 xp->ca = 1; 1639 xp->qdepth = 1; 1640 xp->pio = 1; 1641 } 1642 1643 pptr = xp->phy; 1644 1645 /* 1646 * We only try and issue an IDENTIFY for first level 1647 * (direct attached) devices. We don't try and 1648 * set other quirks here (this will happen later, 1649 * if the device is fully configured) 1650 */ 1651 if (pptr->level) { 1652 return (0); 1653 } 1654 1655 mutex_exit(&xp->statlock); 1656 result = pmcs_sata_identify(pwp, pptr); 1657 mutex_enter(&xp->statlock); 1658 1659 if (result) { 1660 return (result); 1661 } 1662 ati = pwp->scratch; 1663 a = &ati->word108; 1664 for (i = 0; i < 4; i++) { 1665 u.nsb[i] = ddi_swap16(*a++); 1666 } 1667 1668 /* 1669 * Check the returned data for being a valid (NAA=5) WWN. 1670 * If so, use that and override the SAS address we were 1671 * given at Link Up time. 1672 */ 1673 if ((u.nsa[0] >> 4) == 5) { 1674 (void) memcpy(pptr->sas_address, u.nsa, 8); 1675 } 1676 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 1677 "%s: %s has SAS ADDRESS " SAS_ADDR_FMT, 1678 __func__, pptr->path, SAS_ADDR_PRT(pptr->sas_address)); 1679 return (0); 1680 } 1681 1682 /* 1683 * Called with PHY lock and target statlock held and scratch acquired 1684 */ 1685 static boolean_t 1686 pmcs_add_new_device(pmcs_hw_t *pwp, pmcs_xscsi_t *target) 1687 { 1688 ASSERT(target != NULL); 1689 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target, "%s: target = 0x%p", 1690 __func__, (void *) target); 1691 1692 switch (target->phy->dtype) { 1693 case SATA: 1694 if (pmcs_add_sata_device(pwp, target) != 0) { 1695 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, target->phy, 1696 target, "%s: add_sata_device failed for tgt 0x%p", 1697 __func__, (void *) target); 1698 return (B_FALSE); 1699 } 1700 break; 1701 case SAS: 1702 target->qdepth = maxqdepth; 1703 break; 1704 case EXPANDER: 1705 target->qdepth = 1; 1706 break; 1707 } 1708 1709 target->new = 0; 1710 target->assigned = 1; 1711 target->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1712 target->dtype = target->phy->dtype; 1713 1714 /* 1715 * Set the PHY's config stop time to 0. This is one of the final 1716 * stops along the config path, so we're indicating that we 1717 * successfully configured the PHY. 1718 */ 1719 target->phy->config_stop = 0; 1720 1721 return (B_TRUE); 1722 } 1723 1724 void 1725 pmcs_worker(void *arg) 1726 { 1727 pmcs_hw_t *pwp = arg; 1728 ulong_t work_flags; 1729 1730 DTRACE_PROBE2(pmcs__worker, ulong_t, pwp->work_flags, boolean_t, 1731 pwp->config_changed); 1732 1733 if (pwp->state != STATE_RUNNING) { 1734 return; 1735 } 1736 1737 work_flags = atomic_swap_ulong(&pwp->work_flags, 0); 1738 1739 if (work_flags & PMCS_WORK_FLAG_SAS_HW_ACK) { 1740 pmcs_ack_events(pwp); 1741 } 1742 1743 if (work_flags & PMCS_WORK_FLAG_SPINUP_RELEASE) { 1744 mutex_enter(&pwp->lock); 1745 pmcs_spinup_release(pwp, NULL); 1746 mutex_exit(&pwp->lock); 1747 } 1748 1749 if (work_flags & PMCS_WORK_FLAG_SSP_EVT_RECOVERY) { 1750 pmcs_ssp_event_recovery(pwp); 1751 } 1752 1753 if (work_flags & PMCS_WORK_FLAG_DS_ERR_RECOVERY) { 1754 pmcs_dev_state_recovery(pwp, NULL); 1755 } 1756 1757 if (work_flags & PMCS_WORK_FLAG_DISCOVER) { 1758 pmcs_discover(pwp); 1759 } 1760 1761 if (work_flags & PMCS_WORK_FLAG_ABORT_HANDLE) { 1762 if (pmcs_abort_handler(pwp)) { 1763 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 1764 } 1765 } 1766 1767 if (work_flags & PMCS_WORK_FLAG_SATA_RUN) { 1768 pmcs_sata_work(pwp); 1769 } 1770 1771 if (work_flags & PMCS_WORK_FLAG_RUN_QUEUES) { 1772 pmcs_scsa_wq_run(pwp); 1773 mutex_enter(&pwp->lock); 1774 PMCS_CQ_RUN(pwp); 1775 mutex_exit(&pwp->lock); 1776 } 1777 1778 if (work_flags & PMCS_WORK_FLAG_ADD_DMA_CHUNKS) { 1779 if (pmcs_add_more_chunks(pwp, 1780 ptob(1) * PMCS_ADDTL_CHUNK_PAGES)) { 1781 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 1782 } else { 1783 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1784 } 1785 } 1786 } 1787 1788 static int 1789 pmcs_add_more_chunks(pmcs_hw_t *pwp, unsigned long nsize) 1790 { 1791 pmcs_dmachunk_t *dc; 1792 unsigned long dl; 1793 pmcs_chunk_t *pchunk = NULL; 1794 1795 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 1796 1797 pchunk = kmem_zalloc(sizeof (pmcs_chunk_t), KM_SLEEP); 1798 if (pchunk == NULL) { 1799 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1800 "Not enough memory for DMA chunks"); 1801 return (-1); 1802 } 1803 1804 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pchunk->acc_handle, 1805 &pchunk->dma_handle, nsize, (caddr_t *)&pchunk->addrp, 1806 &pchunk->dma_addr) == B_FALSE) { 1807 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1808 "Failed to setup DMA for chunks"); 1809 kmem_free(pchunk, sizeof (pmcs_chunk_t)); 1810 return (-1); 1811 } 1812 1813 if ((pmcs_check_acc_handle(pchunk->acc_handle) != DDI_SUCCESS) || 1814 (pmcs_check_dma_handle(pchunk->dma_handle) != DDI_SUCCESS)) { 1815 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1816 return (-1); 1817 } 1818 1819 bzero(pchunk->addrp, nsize); 1820 dc = NULL; 1821 for (dl = 0; dl < (nsize / PMCS_SGL_CHUNKSZ); dl++) { 1822 pmcs_dmachunk_t *tmp; 1823 tmp = kmem_alloc(sizeof (pmcs_dmachunk_t), KM_SLEEP); 1824 tmp->nxt = dc; 1825 dc = tmp; 1826 } 1827 mutex_enter(&pwp->dma_lock); 1828 pmcs_idma_chunks(pwp, dc, pchunk, nsize); 1829 pwp->nchunks++; 1830 mutex_exit(&pwp->dma_lock); 1831 return (0); 1832 } 1833 1834 1835 static void 1836 pmcs_check_commands(pmcs_hw_t *pwp) 1837 { 1838 pmcs_cmd_t *sp; 1839 size_t amt; 1840 char path[32]; 1841 pmcwork_t *pwrk; 1842 pmcs_xscsi_t *target; 1843 pmcs_phy_t *phyp; 1844 1845 for (pwrk = pwp->work; pwrk < &pwp->work[pwp->max_cmd]; pwrk++) { 1846 mutex_enter(&pwrk->lock); 1847 1848 /* 1849 * If the command isn't active, we can't be timing it still. 1850 * Active means the tag is not free and the state is "on chip". 1851 */ 1852 if (!PMCS_COMMAND_ACTIVE(pwrk)) { 1853 mutex_exit(&pwrk->lock); 1854 continue; 1855 } 1856 1857 /* 1858 * No timer active for this command. 1859 */ 1860 if (pwrk->timer == 0) { 1861 mutex_exit(&pwrk->lock); 1862 continue; 1863 } 1864 1865 /* 1866 * Knock off bits for the time interval. 1867 */ 1868 if (pwrk->timer >= US2WT(PMCS_WATCH_INTERVAL)) { 1869 pwrk->timer -= US2WT(PMCS_WATCH_INTERVAL); 1870 } else { 1871 pwrk->timer = 0; 1872 } 1873 if (pwrk->timer > 0) { 1874 mutex_exit(&pwrk->lock); 1875 continue; 1876 } 1877 1878 /* 1879 * The command has now officially timed out. 1880 * Get the path for it. If it doesn't have 1881 * a phy pointer any more, it's really dead 1882 * and can just be put back on the free list. 1883 * There should *not* be any commands associated 1884 * with it any more. 1885 */ 1886 if (pwrk->phy == NULL) { 1887 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1888 "dead command with gone phy being recycled"); 1889 ASSERT(pwrk->xp == NULL); 1890 pmcs_pwork(pwp, pwrk); 1891 continue; 1892 } 1893 amt = sizeof (path); 1894 amt = min(sizeof (pwrk->phy->path), amt); 1895 (void) memcpy(path, pwrk->phy->path, amt); 1896 1897 /* 1898 * If this is a non-SCSA command, stop here. Eventually 1899 * we might do something with non-SCSA commands here- 1900 * but so far their timeout mechanisms are handled in 1901 * the WAIT_FOR macro. 1902 */ 1903 if (pwrk->xp == NULL) { 1904 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1905 "%s: non-SCSA cmd tag 0x%x timed out", 1906 path, pwrk->htag); 1907 mutex_exit(&pwrk->lock); 1908 continue; 1909 } 1910 1911 sp = pwrk->arg; 1912 ASSERT(sp != NULL); 1913 1914 /* 1915 * Mark it as timed out. 1916 */ 1917 CMD2PKT(sp)->pkt_reason = CMD_TIMEOUT; 1918 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 1919 #ifdef DEBUG 1920 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 1921 "%s: SCSA cmd tag 0x%x timed out (state %x) onwire=%d", 1922 path, pwrk->htag, pwrk->state, pwrk->onwire); 1923 #else 1924 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 1925 "%s: SCSA cmd tag 0x%x timed out (state %x)", 1926 path, pwrk->htag, pwrk->state); 1927 #endif 1928 /* 1929 * Mark the work structure as timed out. 1930 */ 1931 pwrk->state = PMCS_WORK_STATE_TIMED_OUT; 1932 phyp = pwrk->phy; 1933 target = pwrk->xp; 1934 mutex_exit(&pwrk->lock); 1935 1936 pmcs_lock_phy(phyp); 1937 mutex_enter(&target->statlock); 1938 1939 /* 1940 * No point attempting recovery if the device is gone 1941 */ 1942 if (target->dev_gone) { 1943 mutex_exit(&target->statlock); 1944 pmcs_unlock_phy(phyp); 1945 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 1946 "%s: tgt(0x%p) is gone. Returning CMD_DEV_GONE " 1947 "for htag 0x%08x", __func__, 1948 (void *)target, pwrk->htag); 1949 mutex_enter(&pwrk->lock); 1950 if (!PMCS_COMMAND_DONE(pwrk)) { 1951 /* Complete this command here */ 1952 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 1953 "%s: Completing cmd (htag 0x%08x) " 1954 "anyway", __func__, pwrk->htag); 1955 pwrk->dead = 1; 1956 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 1957 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 1958 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 1959 } else { 1960 mutex_exit(&pwrk->lock); 1961 } 1962 continue; 1963 } 1964 1965 /* 1966 * See if we're already waiting for device state recovery 1967 */ 1968 if (target->recover_wait) { 1969 pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, phyp, target, 1970 "%s: Target %p already in recovery", __func__, 1971 (void *)target); 1972 mutex_exit(&target->statlock); 1973 pmcs_unlock_phy(phyp); 1974 continue; 1975 } 1976 1977 pmcs_start_dev_state_recovery(target, phyp); 1978 mutex_exit(&target->statlock); 1979 pmcs_unlock_phy(phyp); 1980 } 1981 /* 1982 * Run any completions that may have been queued up. 1983 */ 1984 PMCS_CQ_RUN(pwp); 1985 } 1986 1987 static void 1988 pmcs_watchdog(void *arg) 1989 { 1990 pmcs_hw_t *pwp = arg; 1991 1992 DTRACE_PROBE2(pmcs__watchdog, ulong_t, pwp->work_flags, boolean_t, 1993 pwp->config_changed); 1994 1995 mutex_enter(&pwp->lock); 1996 1997 if (pwp->state != STATE_RUNNING) { 1998 mutex_exit(&pwp->lock); 1999 return; 2000 } 2001 2002 if (atomic_cas_ulong(&pwp->work_flags, 0, 0) != 0) { 2003 if (ddi_taskq_dispatch(pwp->tq, pmcs_worker, pwp, 2004 DDI_NOSLEEP) != DDI_SUCCESS) { 2005 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2006 "Could not dispatch to worker thread"); 2007 } 2008 } 2009 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 2010 drv_usectohz(PMCS_WATCH_INTERVAL)); 2011 mutex_exit(&pwp->lock); 2012 pmcs_check_commands(pwp); 2013 pmcs_handle_dead_phys(pwp); 2014 } 2015 2016 static int 2017 pmcs_remove_ihandlers(pmcs_hw_t *pwp, int icnt) 2018 { 2019 int i, r, rslt = 0; 2020 for (i = 0; i < icnt; i++) { 2021 r = ddi_intr_remove_handler(pwp->ih_table[i]); 2022 if (r == DDI_SUCCESS) { 2023 continue; 2024 } 2025 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2026 "%s: unable to remove interrupt handler %d", __func__, i); 2027 rslt = -1; 2028 break; 2029 } 2030 return (rslt); 2031 } 2032 2033 static int 2034 pmcs_disable_intrs(pmcs_hw_t *pwp, int icnt) 2035 { 2036 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2037 int r = ddi_intr_block_disable(&pwp->ih_table[0], 2038 pwp->intr_cnt); 2039 if (r != DDI_SUCCESS) { 2040 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2041 "unable to disable interrupt block"); 2042 return (-1); 2043 } 2044 } else { 2045 int i; 2046 for (i = 0; i < icnt; i++) { 2047 if (ddi_intr_disable(pwp->ih_table[i]) == DDI_SUCCESS) { 2048 continue; 2049 } 2050 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2051 "unable to disable interrupt %d", i); 2052 return (-1); 2053 } 2054 } 2055 return (0); 2056 } 2057 2058 static int 2059 pmcs_free_intrs(pmcs_hw_t *pwp, int icnt) 2060 { 2061 int i; 2062 for (i = 0; i < icnt; i++) { 2063 if (ddi_intr_free(pwp->ih_table[i]) == DDI_SUCCESS) { 2064 continue; 2065 } 2066 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2067 "unable to free interrupt %d", i); 2068 return (-1); 2069 } 2070 kmem_free(pwp->ih_table, pwp->ih_table_size); 2071 pwp->ih_table_size = 0; 2072 return (0); 2073 } 2074 2075 /* 2076 * Try to set up interrupts of type "type" with a minimum number of interrupts 2077 * of "min". 2078 */ 2079 static void 2080 pmcs_setup_intr_impl(pmcs_hw_t *pwp, int type, int min) 2081 { 2082 int rval, avail, count, actual, max; 2083 2084 rval = ddi_intr_get_nintrs(pwp->dip, type, &count); 2085 if ((rval != DDI_SUCCESS) || (count < min)) { 2086 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2087 "%s: get_nintrs failed; type: %d rc: %d count: %d min: %d", 2088 __func__, type, rval, count, min); 2089 return; 2090 } 2091 2092 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2093 "%s: nintrs = %d for type: %d", __func__, count, type); 2094 2095 rval = ddi_intr_get_navail(pwp->dip, type, &avail); 2096 if ((rval != DDI_SUCCESS) || (avail < min)) { 2097 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2098 "%s: get_navail failed; type: %d rc: %d avail: %d min: %d", 2099 __func__, type, rval, avail, min); 2100 return; 2101 } 2102 2103 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2104 "%s: navail = %d for type: %d", __func__, avail, type); 2105 2106 pwp->ih_table_size = avail * sizeof (ddi_intr_handle_t); 2107 pwp->ih_table = kmem_alloc(pwp->ih_table_size, KM_SLEEP); 2108 2109 switch (type) { 2110 case DDI_INTR_TYPE_MSIX: 2111 pwp->int_type = PMCS_INT_MSIX; 2112 max = PMCS_MAX_MSIX; 2113 break; 2114 case DDI_INTR_TYPE_MSI: 2115 pwp->int_type = PMCS_INT_MSI; 2116 max = PMCS_MAX_MSI; 2117 break; 2118 case DDI_INTR_TYPE_FIXED: 2119 default: 2120 pwp->int_type = PMCS_INT_FIXED; 2121 max = PMCS_MAX_FIXED; 2122 break; 2123 } 2124 2125 rval = ddi_intr_alloc(pwp->dip, pwp->ih_table, type, 0, max, &actual, 2126 DDI_INTR_ALLOC_NORMAL); 2127 if (rval != DDI_SUCCESS) { 2128 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2129 "%s: ddi_intr_alloc failed; type: %d rc: %d", 2130 __func__, type, rval); 2131 kmem_free(pwp->ih_table, pwp->ih_table_size); 2132 pwp->ih_table = NULL; 2133 pwp->ih_table_size = 0; 2134 pwp->intr_cnt = 0; 2135 pwp->int_type = PMCS_INT_NONE; 2136 return; 2137 } 2138 2139 pwp->intr_cnt = actual; 2140 } 2141 2142 /* 2143 * Set up interrupts. 2144 * We return one of three values: 2145 * 2146 * 0 - success 2147 * EAGAIN - failure to set up interrupts 2148 * EIO - "" + we're now stuck partly enabled 2149 * 2150 * If EIO is returned, we can't unload the driver. 2151 */ 2152 static int 2153 pmcs_setup_intr(pmcs_hw_t *pwp) 2154 { 2155 int i, r, itypes, oqv_count; 2156 ddi_intr_handler_t **iv_table; 2157 size_t iv_table_size; 2158 uint_t pri; 2159 2160 if (ddi_intr_get_supported_types(pwp->dip, &itypes) != DDI_SUCCESS) { 2161 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2162 "cannot get interrupt types"); 2163 return (EAGAIN); 2164 } 2165 2166 if (disable_msix) { 2167 itypes &= ~DDI_INTR_TYPE_MSIX; 2168 } 2169 if (disable_msi) { 2170 itypes &= ~DDI_INTR_TYPE_MSI; 2171 } 2172 2173 /* 2174 * We won't know what firmware we're running until we call pmcs_setup, 2175 * and we can't call pmcs_setup until we establish interrupts. 2176 */ 2177 2178 pwp->int_type = PMCS_INT_NONE; 2179 2180 /* 2181 * We want PMCS_MAX_MSIX vectors for MSI-X. Anything less would be 2182 * uncivilized. 2183 */ 2184 if (itypes & DDI_INTR_TYPE_MSIX) { 2185 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSIX, PMCS_MAX_MSIX); 2186 if (pwp->int_type == PMCS_INT_MSIX) { 2187 itypes = 0; 2188 } 2189 } 2190 2191 if (itypes & DDI_INTR_TYPE_MSI) { 2192 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSI, 1); 2193 if (pwp->int_type == PMCS_INT_MSI) { 2194 itypes = 0; 2195 } 2196 } 2197 2198 if (itypes & DDI_INTR_TYPE_FIXED) { 2199 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_FIXED, 1); 2200 if (pwp->int_type == PMCS_INT_FIXED) { 2201 itypes = 0; 2202 } 2203 } 2204 2205 if (pwp->intr_cnt == 0) { 2206 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2207 "No interrupts available"); 2208 return (EAGAIN); 2209 } 2210 2211 iv_table_size = sizeof (ddi_intr_handler_t *) * pwp->intr_cnt; 2212 iv_table = kmem_alloc(iv_table_size, KM_SLEEP); 2213 2214 /* 2215 * Get iblock cookie and add handlers. 2216 */ 2217 switch (pwp->intr_cnt) { 2218 case 1: 2219 iv_table[0] = pmcs_all_intr; 2220 break; 2221 case 2: 2222 iv_table[0] = pmcs_iodone_ix; 2223 iv_table[1] = pmcs_nonio_ix; 2224 break; 2225 case 4: 2226 iv_table[PMCS_MSIX_GENERAL] = pmcs_general_ix; 2227 iv_table[PMCS_MSIX_IODONE] = pmcs_iodone_ix; 2228 iv_table[PMCS_MSIX_EVENTS] = pmcs_event_ix; 2229 iv_table[PMCS_MSIX_FATAL] = pmcs_fatal_ix; 2230 break; 2231 default: 2232 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2233 "%s: intr_cnt = %d - unexpected", __func__, pwp->intr_cnt); 2234 kmem_free(iv_table, iv_table_size); 2235 return (EAGAIN); 2236 } 2237 2238 for (i = 0; i < pwp->intr_cnt; i++) { 2239 r = ddi_intr_add_handler(pwp->ih_table[i], iv_table[i], 2240 (caddr_t)pwp, NULL); 2241 if (r != DDI_SUCCESS) { 2242 kmem_free(iv_table, iv_table_size); 2243 if (pmcs_remove_ihandlers(pwp, i)) { 2244 return (EIO); 2245 } 2246 if (pmcs_free_intrs(pwp, i)) { 2247 return (EIO); 2248 } 2249 pwp->intr_cnt = 0; 2250 return (EAGAIN); 2251 } 2252 } 2253 2254 kmem_free(iv_table, iv_table_size); 2255 2256 if (ddi_intr_get_cap(pwp->ih_table[0], &pwp->intr_cap) != DDI_SUCCESS) { 2257 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2258 "unable to get int capabilities"); 2259 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2260 return (EIO); 2261 } 2262 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2263 return (EIO); 2264 } 2265 pwp->intr_cnt = 0; 2266 return (EAGAIN); 2267 } 2268 2269 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2270 r = ddi_intr_block_enable(&pwp->ih_table[0], pwp->intr_cnt); 2271 if (r != DDI_SUCCESS) { 2272 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2273 "intr blk enable failed"); 2274 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2275 return (EIO); 2276 } 2277 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2278 return (EIO); 2279 } 2280 pwp->intr_cnt = 0; 2281 return (EFAULT); 2282 } 2283 } else { 2284 for (i = 0; i < pwp->intr_cnt; i++) { 2285 r = ddi_intr_enable(pwp->ih_table[i]); 2286 if (r == DDI_SUCCESS) { 2287 continue; 2288 } 2289 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2290 "unable to enable interrupt %d", i); 2291 if (pmcs_disable_intrs(pwp, i)) { 2292 return (EIO); 2293 } 2294 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2295 return (EIO); 2296 } 2297 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2298 return (EIO); 2299 } 2300 pwp->intr_cnt = 0; 2301 return (EAGAIN); 2302 } 2303 } 2304 2305 /* 2306 * Set up locks. 2307 */ 2308 if (ddi_intr_get_pri(pwp->ih_table[0], &pri) != DDI_SUCCESS) { 2309 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2310 "unable to get interrupt priority"); 2311 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2312 return (EIO); 2313 } 2314 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2315 return (EIO); 2316 } 2317 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2318 return (EIO); 2319 } 2320 pwp->intr_cnt = 0; 2321 return (EAGAIN); 2322 } 2323 2324 pwp->locks_initted = 1; 2325 pwp->intr_pri = pri; 2326 mutex_init(&pwp->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2327 mutex_init(&pwp->dma_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2328 mutex_init(&pwp->axil_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2329 mutex_init(&pwp->cq_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2330 mutex_init(&pwp->ict_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2331 mutex_init(&pwp->config_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2332 mutex_init(&pwp->wfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2333 mutex_init(&pwp->pfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2334 mutex_init(&pwp->dead_phylist_lock, NULL, MUTEX_DRIVER, 2335 DDI_INTR_PRI(pri)); 2336 #ifdef DEBUG 2337 mutex_init(&pwp->dbglock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2338 #endif 2339 cv_init(&pwp->ict_cv, NULL, CV_DRIVER, NULL); 2340 cv_init(&pwp->drain_cv, NULL, CV_DRIVER, NULL); 2341 for (i = 0; i < PMCS_NIQ; i++) { 2342 mutex_init(&pwp->iqp_lock[i], NULL, 2343 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2344 } 2345 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 2346 mutex_init(&pwp->cq_info.cq_thr_info[i].cq_thr_lock, NULL, 2347 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2348 cv_init(&pwp->cq_info.cq_thr_info[i].cq_cv, NULL, 2349 CV_DRIVER, NULL); 2350 } 2351 2352 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%d %s interrup%s configured", 2353 pwp->intr_cnt, (pwp->int_type == PMCS_INT_MSIX)? "MSI-X" : 2354 ((pwp->int_type == PMCS_INT_MSI)? "MSI" : "INT-X"), 2355 pwp->intr_cnt == 1? "t" : "ts"); 2356 2357 2358 /* 2359 * Enable Interrupts 2360 */ 2361 if (pwp->intr_cnt > PMCS_NOQ) { 2362 oqv_count = pwp->intr_cnt; 2363 } else { 2364 oqv_count = PMCS_NOQ; 2365 } 2366 for (pri = 0xffffffff, i = 0; i < oqv_count; i++) { 2367 pri ^= (1 << i); 2368 } 2369 2370 mutex_enter(&pwp->lock); 2371 pwp->intr_mask = pri; 2372 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask); 2373 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 2374 mutex_exit(&pwp->lock); 2375 2376 return (0); 2377 } 2378 2379 static int 2380 pmcs_teardown_intr(pmcs_hw_t *pwp) 2381 { 2382 if (pwp->intr_cnt) { 2383 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2384 return (EIO); 2385 } 2386 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2387 return (EIO); 2388 } 2389 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2390 return (EIO); 2391 } 2392 pwp->intr_cnt = 0; 2393 } 2394 return (0); 2395 } 2396 2397 static uint_t 2398 pmcs_general_ix(caddr_t arg1, caddr_t arg2) 2399 { 2400 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2401 _NOTE(ARGUNUSED(arg2)); 2402 pmcs_general_intr(pwp); 2403 return (DDI_INTR_CLAIMED); 2404 } 2405 2406 static uint_t 2407 pmcs_event_ix(caddr_t arg1, caddr_t arg2) 2408 { 2409 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2410 _NOTE(ARGUNUSED(arg2)); 2411 pmcs_event_intr(pwp); 2412 return (DDI_INTR_CLAIMED); 2413 } 2414 2415 static uint_t 2416 pmcs_iodone_ix(caddr_t arg1, caddr_t arg2) 2417 { 2418 _NOTE(ARGUNUSED(arg2)); 2419 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2420 2421 /* 2422 * It's possible that if we just turned interrupt coalescing off 2423 * (and thus, re-enabled auto clear for interrupts on the I/O outbound 2424 * queue) that there was an interrupt already pending. We use 2425 * io_intr_coal.int_cleared to ensure that we still drop in here and 2426 * clear the appropriate interrupt bit one last time. 2427 */ 2428 mutex_enter(&pwp->ict_lock); 2429 if (pwp->io_intr_coal.timer_on || 2430 (pwp->io_intr_coal.int_cleared == B_FALSE)) { 2431 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2432 (1 << PMCS_OQ_IODONE)); 2433 pwp->io_intr_coal.int_cleared = B_TRUE; 2434 } 2435 mutex_exit(&pwp->ict_lock); 2436 2437 pmcs_iodone_intr(pwp); 2438 2439 return (DDI_INTR_CLAIMED); 2440 } 2441 2442 static uint_t 2443 pmcs_fatal_ix(caddr_t arg1, caddr_t arg2) 2444 { 2445 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2446 _NOTE(ARGUNUSED(arg2)); 2447 pmcs_fatal_handler(pwp); 2448 return (DDI_INTR_CLAIMED); 2449 } 2450 2451 static uint_t 2452 pmcs_nonio_ix(caddr_t arg1, caddr_t arg2) 2453 { 2454 _NOTE(ARGUNUSED(arg2)); 2455 pmcs_hw_t *pwp = (void *)arg1; 2456 uint32_t obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2457 2458 /* 2459 * Check for Fatal Interrupts 2460 */ 2461 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2462 pmcs_fatal_handler(pwp); 2463 return (DDI_INTR_CLAIMED); 2464 } 2465 2466 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2467 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2468 (1 << PMCS_OQ_GENERAL)); 2469 pmcs_general_intr(pwp); 2470 pmcs_event_intr(pwp); 2471 } 2472 2473 return (DDI_INTR_CLAIMED); 2474 } 2475 2476 static uint_t 2477 pmcs_all_intr(caddr_t arg1, caddr_t arg2) 2478 { 2479 _NOTE(ARGUNUSED(arg2)); 2480 pmcs_hw_t *pwp = (void *) arg1; 2481 uint32_t obdb; 2482 int handled = 0; 2483 2484 obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2485 2486 /* 2487 * Check for Fatal Interrupts 2488 */ 2489 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2490 pmcs_fatal_handler(pwp); 2491 return (DDI_INTR_CLAIMED); 2492 } 2493 2494 /* 2495 * Check for Outbound Queue service needed 2496 */ 2497 if (obdb & (1 << PMCS_OQ_IODONE)) { 2498 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2499 (1 << PMCS_OQ_IODONE)); 2500 obdb ^= (1 << PMCS_OQ_IODONE); 2501 handled++; 2502 pmcs_iodone_intr(pwp); 2503 } 2504 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2505 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2506 (1 << PMCS_OQ_GENERAL)); 2507 obdb ^= (1 << PMCS_OQ_GENERAL); 2508 handled++; 2509 pmcs_general_intr(pwp); 2510 } 2511 if (obdb & (1 << PMCS_OQ_EVENTS)) { 2512 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2513 (1 << PMCS_OQ_EVENTS)); 2514 obdb ^= (1 << PMCS_OQ_EVENTS); 2515 handled++; 2516 pmcs_event_intr(pwp); 2517 } 2518 if (obdb) { 2519 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2520 "interrupt bits not handled (0x%x)", obdb); 2521 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, obdb); 2522 handled++; 2523 } 2524 if (pwp->int_type == PMCS_INT_MSI) { 2525 handled++; 2526 } 2527 return (handled? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 2528 } 2529 2530 void 2531 pmcs_fatal_handler(pmcs_hw_t *pwp) 2532 { 2533 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, "Fatal Interrupt caught"); 2534 mutex_enter(&pwp->lock); 2535 pwp->state = STATE_DEAD; 2536 pmcs_register_dump_int(pwp); 2537 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 2538 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 2539 mutex_exit(&pwp->lock); 2540 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE); 2541 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 2542 2543 #ifdef DEBUG 2544 cmn_err(CE_PANIC, "PMCS Fatal Firmware Error"); 2545 #endif 2546 } 2547 2548 /* 2549 * Called with PHY lock and target statlock held and scratch acquired. 2550 */ 2551 boolean_t 2552 pmcs_assign_device(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt) 2553 { 2554 pmcs_phy_t *pptr = tgt->phy; 2555 2556 switch (pptr->dtype) { 2557 case SAS: 2558 case EXPANDER: 2559 break; 2560 case SATA: 2561 tgt->ca = 1; 2562 break; 2563 default: 2564 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2565 "%s: Target %p has PHY %p with invalid dtype", 2566 __func__, (void *)tgt, (void *)pptr); 2567 return (B_FALSE); 2568 } 2569 2570 tgt->new = 1; 2571 tgt->dev_gone = 0; 2572 tgt->recover_wait = 0; 2573 2574 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2575 "%s: config %s vtgt %u for " SAS_ADDR_FMT, __func__, 2576 pptr->path, tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2577 2578 if (pmcs_add_new_device(pwp, tgt) != B_TRUE) { 2579 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2580 "%s: Failed for vtgt %u / WWN " SAS_ADDR_FMT, __func__, 2581 tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2582 mutex_destroy(&tgt->statlock); 2583 mutex_destroy(&tgt->wqlock); 2584 mutex_destroy(&tgt->aqlock); 2585 return (B_FALSE); 2586 } 2587 2588 return (B_TRUE); 2589 } 2590 2591 /* 2592 * Called with softstate lock held 2593 */ 2594 void 2595 pmcs_remove_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2596 { 2597 pmcs_xscsi_t *xp; 2598 unsigned int vtgt; 2599 2600 ASSERT(mutex_owned(&pwp->lock)); 2601 2602 for (vtgt = 0; vtgt < pwp->max_dev; vtgt++) { 2603 xp = pwp->targets[vtgt]; 2604 if (xp == NULL) { 2605 continue; 2606 } 2607 2608 mutex_enter(&xp->statlock); 2609 if (xp->phy == pptr) { 2610 if (xp->new) { 2611 xp->new = 0; 2612 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2613 "cancel config of vtgt %u", vtgt); 2614 } else { 2615 pmcs_clear_xp(pwp, xp); 2616 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2617 "Removed tgt 0x%p vtgt %u", 2618 (void *)xp, vtgt); 2619 } 2620 mutex_exit(&xp->statlock); 2621 break; 2622 } 2623 mutex_exit(&xp->statlock); 2624 } 2625 } 2626 2627 void 2628 pmcs_prt_impl(pmcs_hw_t *pwp, pmcs_prt_level_t level, 2629 pmcs_phy_t *phyp, pmcs_xscsi_t *target, const char *fmt, ...) 2630 { 2631 va_list ap; 2632 int written = 0; 2633 char *ptr; 2634 uint32_t elem_size = PMCS_TBUF_ELEM_SIZE - 1; 2635 boolean_t system_log; 2636 int system_log_level; 2637 2638 switch (level) { 2639 case PMCS_PRT_DEBUG_DEVEL: 2640 case PMCS_PRT_DEBUG_DEV_STATE: 2641 case PMCS_PRT_DEBUG_PHY_LOCKING: 2642 case PMCS_PRT_DEBUG_SCSI_STATUS: 2643 case PMCS_PRT_DEBUG_UNDERFLOW: 2644 case PMCS_PRT_DEBUG_CONFIG: 2645 case PMCS_PRT_DEBUG_IPORT: 2646 case PMCS_PRT_DEBUG_MAP: 2647 case PMCS_PRT_DEBUG3: 2648 case PMCS_PRT_DEBUG2: 2649 case PMCS_PRT_DEBUG1: 2650 case PMCS_PRT_DEBUG: 2651 system_log = B_FALSE; 2652 break; 2653 case PMCS_PRT_INFO: 2654 system_log = B_TRUE; 2655 system_log_level = CE_CONT; 2656 break; 2657 case PMCS_PRT_WARN: 2658 system_log = B_TRUE; 2659 system_log_level = CE_NOTE; 2660 break; 2661 case PMCS_PRT_ERR: 2662 system_log = B_TRUE; 2663 system_log_level = CE_WARN; 2664 break; 2665 default: 2666 return; 2667 } 2668 2669 mutex_enter(&pmcs_trace_lock); 2670 gethrestime(&pmcs_tbuf_ptr->timestamp); 2671 ptr = pmcs_tbuf_ptr->buf; 2672 2673 /* 2674 * Store the pertinent PHY and target information if there is any 2675 */ 2676 if (target == NULL) { 2677 pmcs_tbuf_ptr->target_num = PMCS_INVALID_TARGET_NUM; 2678 pmcs_tbuf_ptr->target_ua[0] = '\0'; 2679 } else { 2680 pmcs_tbuf_ptr->target_num = target->target_num; 2681 (void) strncpy(pmcs_tbuf_ptr->target_ua, target->ua, 2682 PMCS_TBUF_UA_MAX_SIZE); 2683 } 2684 2685 if (phyp == NULL) { 2686 (void) memset(pmcs_tbuf_ptr->phy_sas_address, 0, 8); 2687 pmcs_tbuf_ptr->phy_path[0] = '\0'; 2688 pmcs_tbuf_ptr->phy_dtype = NOTHING; 2689 } else { 2690 (void) memcpy(pmcs_tbuf_ptr->phy_sas_address, 2691 phyp->sas_address, 8); 2692 (void) strncpy(pmcs_tbuf_ptr->phy_path, phyp->path, 32); 2693 pmcs_tbuf_ptr->phy_dtype = phyp->dtype; 2694 } 2695 2696 written += snprintf(ptr, elem_size, "pmcs%d:%d: ", 2697 ddi_get_instance(pwp->dip), level); 2698 ptr += strlen(ptr); 2699 va_start(ap, fmt); 2700 written += vsnprintf(ptr, elem_size - written, fmt, ap); 2701 va_end(ap); 2702 if (written > elem_size - 1) { 2703 /* Indicate truncation */ 2704 pmcs_tbuf_ptr->buf[elem_size - 1] = '+'; 2705 } 2706 if (++pmcs_tbuf_idx == pmcs_tbuf_num_elems) { 2707 pmcs_tbuf_ptr = pmcs_tbuf; 2708 pmcs_tbuf_wrap = B_TRUE; 2709 pmcs_tbuf_idx = 0; 2710 } else { 2711 ++pmcs_tbuf_ptr; 2712 } 2713 mutex_exit(&pmcs_trace_lock); 2714 2715 /* 2716 * When pmcs_force_syslog in non-zero, everything goes also 2717 * to syslog, at CE_CONT level. 2718 */ 2719 if (pmcs_force_syslog) { 2720 system_log = B_TRUE; 2721 system_log_level = CE_CONT; 2722 } 2723 2724 /* 2725 * Anything that comes in with PMCS_PRT_INFO, WARN, or ERR also 2726 * goes to syslog. 2727 */ 2728 if (system_log) { 2729 char local[196]; 2730 2731 switch (system_log_level) { 2732 case CE_CONT: 2733 (void) snprintf(local, sizeof (local), "%sINFO: ", 2734 pmcs_console ? "" : "?"); 2735 break; 2736 case CE_NOTE: 2737 case CE_WARN: 2738 local[0] = 0; 2739 break; 2740 default: 2741 return; 2742 } 2743 2744 ptr = local; 2745 ptr += strlen(local); 2746 (void) snprintf(ptr, (sizeof (local)) - 2747 ((size_t)ptr - (size_t)local), "pmcs%d: ", 2748 ddi_get_instance(pwp->dip)); 2749 ptr += strlen(ptr); 2750 va_start(ap, fmt); 2751 (void) vsnprintf(ptr, 2752 (sizeof (local)) - ((size_t)ptr - (size_t)local), fmt, ap); 2753 va_end(ap); 2754 if (level == CE_CONT) { 2755 (void) strlcat(local, "\n", sizeof (local)); 2756 } 2757 cmn_err(system_log_level, local); 2758 } 2759 2760 } 2761 2762 /* 2763 * pmcs_acquire_scratch 2764 * 2765 * If "wait" is true, the caller will wait until it can acquire the scratch. 2766 * This implies the caller needs to be in a context where spinning for an 2767 * indeterminate amount of time is acceptable. 2768 */ 2769 int 2770 pmcs_acquire_scratch(pmcs_hw_t *pwp, boolean_t wait) 2771 { 2772 int rval; 2773 2774 if (!wait) { 2775 return (atomic_swap_8(&pwp->scratch_locked, 1)); 2776 } 2777 2778 /* 2779 * Caller will wait for scratch. 2780 */ 2781 while ((rval = atomic_swap_8(&pwp->scratch_locked, 1)) != 0) { 2782 drv_usecwait(100); 2783 } 2784 2785 return (rval); 2786 } 2787 2788 void 2789 pmcs_release_scratch(pmcs_hw_t *pwp) 2790 { 2791 pwp->scratch_locked = 0; 2792 } 2793 2794 static void 2795 pmcs_create_phy_stats(pmcs_iport_t *iport) 2796 { 2797 sas_phy_stats_t *ps; 2798 pmcs_hw_t *pwp; 2799 pmcs_phy_t *phyp; 2800 int ndata; 2801 char ks_name[KSTAT_STRLEN]; 2802 2803 ASSERT(iport != NULL); 2804 pwp = iport->pwp; 2805 ASSERT(pwp != NULL); 2806 2807 mutex_enter(&iport->lock); 2808 2809 for (phyp = list_head(&iport->phys); 2810 phyp != NULL; 2811 phyp = list_next(&iport->phys, phyp)) { 2812 2813 pmcs_lock_phy(phyp); 2814 2815 if (phyp->phy_stats != NULL) { 2816 pmcs_unlock_phy(phyp); 2817 /* We've already created this kstat instance */ 2818 continue; 2819 } 2820 2821 ndata = (sizeof (sas_phy_stats_t)/sizeof (kstat_named_t)); 2822 2823 (void) snprintf(ks_name, sizeof (ks_name), 2824 "%s.%llx.%d.%d", ddi_driver_name(iport->dip), 2825 (longlong_t)pwp->sas_wwns[0], 2826 ddi_get_instance(iport->dip), phyp->phynum); 2827 2828 phyp->phy_stats = kstat_create("pmcs", 2829 ddi_get_instance(iport->dip), ks_name, KSTAT_SAS_PHY_CLASS, 2830 KSTAT_TYPE_NAMED, ndata, 0); 2831 2832 if (phyp->phy_stats == NULL) { 2833 pmcs_unlock_phy(phyp); 2834 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2835 "%s: Failed to create %s kstats", __func__, 2836 ks_name); 2837 continue; 2838 } 2839 2840 ps = (sas_phy_stats_t *)phyp->phy_stats->ks_data; 2841 2842 kstat_named_init(&ps->seconds_since_last_reset, 2843 "SecondsSinceLastReset", KSTAT_DATA_ULONGLONG); 2844 kstat_named_init(&ps->tx_frames, 2845 "TxFrames", KSTAT_DATA_ULONGLONG); 2846 kstat_named_init(&ps->rx_frames, 2847 "RxFrames", KSTAT_DATA_ULONGLONG); 2848 kstat_named_init(&ps->tx_words, 2849 "TxWords", KSTAT_DATA_ULONGLONG); 2850 kstat_named_init(&ps->rx_words, 2851 "RxWords", KSTAT_DATA_ULONGLONG); 2852 kstat_named_init(&ps->invalid_dword_count, 2853 "InvalidDwordCount", KSTAT_DATA_ULONGLONG); 2854 kstat_named_init(&ps->running_disparity_error_count, 2855 "RunningDisparityErrorCount", KSTAT_DATA_ULONGLONG); 2856 kstat_named_init(&ps->loss_of_dword_sync_count, 2857 "LossofDwordSyncCount", KSTAT_DATA_ULONGLONG); 2858 kstat_named_init(&ps->phy_reset_problem_count, 2859 "PhyResetProblemCount", KSTAT_DATA_ULONGLONG); 2860 2861 phyp->phy_stats->ks_private = phyp; 2862 phyp->phy_stats->ks_update = pmcs_update_phy_stats; 2863 kstat_install(phyp->phy_stats); 2864 pmcs_unlock_phy(phyp); 2865 } 2866 2867 mutex_exit(&iport->lock); 2868 } 2869 2870 int 2871 pmcs_update_phy_stats(kstat_t *ks, int rw) 2872 { 2873 int val, ret = DDI_FAILURE; 2874 pmcs_phy_t *pptr = (pmcs_phy_t *)ks->ks_private; 2875 pmcs_hw_t *pwp = pptr->pwp; 2876 sas_phy_stats_t *ps = ks->ks_data; 2877 2878 _NOTE(ARGUNUSED(rw)); 2879 ASSERT((pptr != NULL) && (pwp != NULL)); 2880 2881 /* 2882 * We just want to lock against other invocations of kstat; 2883 * we don't need to pmcs_lock_phy() for this. 2884 */ 2885 mutex_enter(&pptr->phy_lock); 2886 2887 /* Get Stats from Chip */ 2888 val = pmcs_get_diag_report(pwp, PMCS_INVALID_DWORD_CNT, pptr->phynum); 2889 if (val == DDI_FAILURE) 2890 goto fail; 2891 ps->invalid_dword_count.value.ull = (unsigned long long)val; 2892 2893 val = pmcs_get_diag_report(pwp, PMCS_DISPARITY_ERR_CNT, pptr->phynum); 2894 if (val == DDI_FAILURE) 2895 goto fail; 2896 ps->running_disparity_error_count.value.ull = (unsigned long long)val; 2897 2898 val = pmcs_get_diag_report(pwp, PMCS_LOST_DWORD_SYNC_CNT, pptr->phynum); 2899 if (val == DDI_FAILURE) 2900 goto fail; 2901 ps->loss_of_dword_sync_count.value.ull = (unsigned long long)val; 2902 2903 val = pmcs_get_diag_report(pwp, PMCS_RESET_FAILED_CNT, pptr->phynum); 2904 if (val == DDI_FAILURE) 2905 goto fail; 2906 ps->phy_reset_problem_count.value.ull = (unsigned long long)val; 2907 2908 ret = DDI_SUCCESS; 2909 fail: 2910 mutex_exit(&pptr->phy_lock); 2911 return (ret); 2912 } 2913 2914 static void 2915 pmcs_destroy_phy_stats(pmcs_iport_t *iport) 2916 { 2917 pmcs_phy_t *phyp; 2918 2919 ASSERT(iport != NULL); 2920 mutex_enter(&iport->lock); 2921 phyp = iport->pptr; 2922 if (phyp == NULL) { 2923 mutex_exit(&iport->lock); 2924 return; 2925 } 2926 2927 pmcs_lock_phy(phyp); 2928 if (phyp->phy_stats != NULL) { 2929 kstat_delete(phyp->phy_stats); 2930 phyp->phy_stats = NULL; 2931 } 2932 pmcs_unlock_phy(phyp); 2933 2934 mutex_exit(&iport->lock); 2935 } 2936 2937 /*ARGSUSED*/ 2938 static int 2939 pmcs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2940 { 2941 /* 2942 * as the driver can always deal with an error in any dma or 2943 * access handle, we can just return the fme_status value. 2944 */ 2945 pci_ereport_post(dip, err, NULL); 2946 return (err->fme_status); 2947 } 2948 2949 static void 2950 pmcs_fm_init(pmcs_hw_t *pwp) 2951 { 2952 ddi_iblock_cookie_t fm_ibc; 2953 2954 /* Only register with IO Fault Services if we have some capability */ 2955 if (pwp->fm_capabilities) { 2956 /* Adjust access and dma attributes for FMA */ 2957 pwp->reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 2958 pwp->iqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2959 pwp->oqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2960 pwp->cip_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2961 pwp->fwlog_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2962 2963 /* 2964 * Register capabilities with IO Fault Services. 2965 */ 2966 ddi_fm_init(pwp->dip, &pwp->fm_capabilities, &fm_ibc); 2967 2968 /* 2969 * Initialize pci ereport capabilities if ereport 2970 * capable (should always be.) 2971 */ 2972 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 2973 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2974 pci_ereport_setup(pwp->dip); 2975 } 2976 2977 /* 2978 * Register error callback if error callback capable. 2979 */ 2980 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2981 ddi_fm_handler_register(pwp->dip, 2982 pmcs_fm_error_cb, (void *) pwp); 2983 } 2984 } 2985 } 2986 2987 static void 2988 pmcs_fm_fini(pmcs_hw_t *pwp) 2989 { 2990 /* Only unregister FMA capabilities if registered */ 2991 if (pwp->fm_capabilities) { 2992 /* 2993 * Un-register error callback if error callback capable. 2994 */ 2995 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2996 ddi_fm_handler_unregister(pwp->dip); 2997 } 2998 2999 /* 3000 * Release any resources allocated by pci_ereport_setup() 3001 */ 3002 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 3003 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3004 pci_ereport_teardown(pwp->dip); 3005 } 3006 3007 /* Unregister from IO Fault Services */ 3008 ddi_fm_fini(pwp->dip); 3009 3010 /* Adjust access and dma attributes for FMA */ 3011 pwp->reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 3012 pwp->iqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3013 pwp->oqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3014 pwp->cip_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3015 pwp->fwlog_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3016 } 3017 } 3018 3019 static boolean_t 3020 pmcs_fabricate_wwid(pmcs_hw_t *pwp) 3021 { 3022 char *cp, c; 3023 uint64_t adr; 3024 int i; 3025 3026 cp = &c; 3027 (void) ddi_strtoul(hw_serial, &cp, 10, (unsigned long *)&adr); 3028 3029 if (adr == 0) { 3030 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 3031 "%s: No serial number available to fabricate WWN", 3032 __func__); 3033 3034 adr = (uint64_t)gethrtime(); 3035 } 3036 3037 adr <<= 8; 3038 adr |= ((uint64_t)ddi_get_instance(pwp->dip) << 52); 3039 adr |= (5ULL << 60); 3040 3041 for (i = 0; i < PMCS_MAX_PORTS; i++) { 3042 pwp->sas_wwns[i] = adr + i; 3043 } 3044 3045 return (B_TRUE); 3046 } 3047