1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 #include <sys/scsi/adapters/pmcs/pmcs.h> 26 27 #define PMCS_DRIVER_VERSION "pmcs HBA device driver" 28 29 static char *pmcs_driver_rev = PMCS_DRIVER_VERSION; 30 31 /* 32 * Non-DDI Compliant stuff 33 */ 34 extern char hw_serial[]; 35 36 /* 37 * Global driver data 38 */ 39 void *pmcs_softc_state = NULL; 40 void *pmcs_iport_softstate = NULL; 41 42 /* 43 * Tracing and Logging info 44 */ 45 pmcs_tbuf_t *pmcs_tbuf = NULL; 46 uint32_t pmcs_tbuf_num_elems = 0; 47 pmcs_tbuf_t *pmcs_tbuf_ptr; 48 uint32_t pmcs_tbuf_idx = 0; 49 boolean_t pmcs_tbuf_wrap = B_FALSE; 50 static kmutex_t pmcs_trace_lock; 51 52 /* 53 * If pmcs_force_syslog value is non-zero, all messages put in the trace log 54 * will also be sent to system log. 55 */ 56 int pmcs_force_syslog = 0; 57 int pmcs_console = 0; 58 59 /* 60 * External References 61 */ 62 extern int ncpus_online; 63 64 /* 65 * Local static data 66 */ 67 static int fwlog_level = 3; 68 static int physpeed = PHY_LINK_ALL; 69 static int phymode = PHY_LM_AUTO; 70 static int block_mask = 0; 71 static int phymap_usec = 3 * MICROSEC; 72 static int iportmap_usec = 2 * MICROSEC; 73 74 #ifdef DEBUG 75 static int debug_mask = 1; 76 #else 77 static int debug_mask = 0; 78 #endif 79 80 #ifdef DISABLE_MSIX 81 static int disable_msix = 1; 82 #else 83 static int disable_msix = 0; 84 #endif 85 86 #ifdef DISABLE_MSI 87 static int disable_msi = 1; 88 #else 89 static int disable_msi = 0; 90 #endif 91 92 static uint16_t maxqdepth = 0xfffe; 93 94 /* 95 * Local prototypes 96 */ 97 static int pmcs_attach(dev_info_t *, ddi_attach_cmd_t); 98 static int pmcs_detach(dev_info_t *, ddi_detach_cmd_t); 99 static int pmcs_unattach(pmcs_hw_t *); 100 static int pmcs_iport_unattach(pmcs_iport_t *); 101 static int pmcs_add_more_chunks(pmcs_hw_t *, unsigned long); 102 static void pmcs_watchdog(void *); 103 static int pmcs_setup_intr(pmcs_hw_t *); 104 static int pmcs_teardown_intr(pmcs_hw_t *); 105 106 static uint_t pmcs_nonio_ix(caddr_t, caddr_t); 107 static uint_t pmcs_general_ix(caddr_t, caddr_t); 108 static uint_t pmcs_event_ix(caddr_t, caddr_t); 109 static uint_t pmcs_iodone_ix(caddr_t, caddr_t); 110 static uint_t pmcs_fatal_ix(caddr_t, caddr_t); 111 static uint_t pmcs_all_intr(caddr_t, caddr_t); 112 static int pmcs_quiesce(dev_info_t *dip); 113 static boolean_t pmcs_fabricate_wwid(pmcs_hw_t *); 114 115 static void pmcs_create_phy_stats(pmcs_iport_t *); 116 int pmcs_update_phy_stats(kstat_t *, int); 117 static void pmcs_destroy_phy_stats(pmcs_iport_t *); 118 119 static void pmcs_fm_fini(pmcs_hw_t *pwp); 120 static void pmcs_fm_init(pmcs_hw_t *pwp); 121 static int pmcs_fm_error_cb(dev_info_t *dip, 122 ddi_fm_error_t *err, const void *impl_data); 123 124 /* 125 * Local configuration data 126 */ 127 static struct dev_ops pmcs_ops = { 128 DEVO_REV, /* devo_rev, */ 129 0, /* refcnt */ 130 ddi_no_info, /* info */ 131 nulldev, /* identify */ 132 nulldev, /* probe */ 133 pmcs_attach, /* attach */ 134 pmcs_detach, /* detach */ 135 nodev, /* reset */ 136 NULL, /* driver operations */ 137 NULL, /* bus operations */ 138 ddi_power, /* power management */ 139 pmcs_quiesce /* quiesce */ 140 }; 141 142 static struct modldrv modldrv = { 143 &mod_driverops, 144 PMCS_DRIVER_VERSION, 145 &pmcs_ops, /* driver ops */ 146 }; 147 static struct modlinkage modlinkage = { 148 MODREV_1, &modldrv, NULL 149 }; 150 151 const ddi_dma_attr_t pmcs_dattr = { 152 DMA_ATTR_V0, /* dma_attr version */ 153 0x0000000000000000ull, /* dma_attr_addr_lo */ 154 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 155 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 156 0x0000000000000001ull, /* dma_attr_align */ 157 0x00000078, /* dma_attr_burstsizes */ 158 0x00000001, /* dma_attr_minxfer */ 159 0x00000000FFFFFFFFull, /* dma_attr_maxxfer */ 160 0x00000000FFFFFFFFull, /* dma_attr_seg */ 161 1, /* dma_attr_sgllen */ 162 512, /* dma_attr_granular */ 163 0 /* dma_attr_flags */ 164 }; 165 166 static ddi_device_acc_attr_t rattr = { 167 DDI_DEVICE_ATTR_V0, 168 DDI_STRUCTURE_LE_ACC, 169 DDI_STRICTORDER_ACC, 170 DDI_DEFAULT_ACC 171 }; 172 173 174 /* 175 * Attach/Detach functions 176 */ 177 178 int 179 _init(void) 180 { 181 int ret; 182 183 ret = ddi_soft_state_init(&pmcs_softc_state, sizeof (pmcs_hw_t), 1); 184 if (ret != 0) { 185 cmn_err(CE_WARN, "?soft state init failed for pmcs"); 186 return (ret); 187 } 188 189 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 190 cmn_err(CE_WARN, "?scsi_hba_init failed for pmcs"); 191 ddi_soft_state_fini(&pmcs_softc_state); 192 return (ret); 193 } 194 195 /* 196 * Allocate soft state for iports 197 */ 198 ret = ddi_soft_state_init(&pmcs_iport_softstate, 199 sizeof (pmcs_iport_t), 2); 200 if (ret != 0) { 201 cmn_err(CE_WARN, "?iport soft state init failed for pmcs"); 202 ddi_soft_state_fini(&pmcs_softc_state); 203 return (ret); 204 } 205 206 ret = mod_install(&modlinkage); 207 if (ret != 0) { 208 cmn_err(CE_WARN, "?mod_install failed for pmcs (%d)", ret); 209 scsi_hba_fini(&modlinkage); 210 ddi_soft_state_fini(&pmcs_iport_softstate); 211 ddi_soft_state_fini(&pmcs_softc_state); 212 return (ret); 213 } 214 215 /* Initialize the global trace lock */ 216 mutex_init(&pmcs_trace_lock, NULL, MUTEX_DRIVER, NULL); 217 218 return (0); 219 } 220 221 int 222 _fini(void) 223 { 224 int ret; 225 if ((ret = mod_remove(&modlinkage)) != 0) { 226 return (ret); 227 } 228 scsi_hba_fini(&modlinkage); 229 230 /* Free pmcs log buffer and destroy the global lock */ 231 if (pmcs_tbuf) { 232 kmem_free(pmcs_tbuf, 233 pmcs_tbuf_num_elems * sizeof (pmcs_tbuf_t)); 234 pmcs_tbuf = NULL; 235 } 236 mutex_destroy(&pmcs_trace_lock); 237 238 ddi_soft_state_fini(&pmcs_iport_softstate); 239 ddi_soft_state_fini(&pmcs_softc_state); 240 return (0); 241 } 242 243 int 244 _info(struct modinfo *modinfop) 245 { 246 return (mod_info(&modlinkage, modinfop)); 247 } 248 249 static int 250 pmcs_iport_attach(dev_info_t *dip) 251 { 252 pmcs_iport_t *iport; 253 pmcs_hw_t *pwp; 254 scsi_hba_tran_t *tran; 255 void *ua_priv = NULL; 256 char *iport_ua; 257 char *init_port; 258 int hba_inst; 259 int inst; 260 261 hba_inst = ddi_get_instance(ddi_get_parent(dip)); 262 inst = ddi_get_instance(dip); 263 264 pwp = ddi_get_soft_state(pmcs_softc_state, hba_inst); 265 if (pwp == NULL) { 266 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 267 "%s: iport%d attach invoked with NULL parent (HBA) node)", 268 __func__, inst); 269 return (DDI_FAILURE); 270 } 271 272 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) { 273 return (DDI_FAILURE); 274 } 275 276 if ((iport_ua = scsi_hba_iport_unit_address(dip)) == NULL) { 277 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 278 "%s: invoked with NULL unit address, inst (%d)", 279 __func__, inst); 280 return (DDI_FAILURE); 281 } 282 283 if (ddi_soft_state_zalloc(pmcs_iport_softstate, inst) != DDI_SUCCESS) { 284 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 285 "Failed to alloc soft state for iport %d", inst); 286 return (DDI_FAILURE); 287 } 288 289 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 290 if (iport == NULL) { 291 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 292 "cannot get iport soft state"); 293 goto iport_attach_fail1; 294 } 295 296 mutex_init(&iport->lock, NULL, MUTEX_DRIVER, 297 DDI_INTR_PRI(pwp->intr_pri)); 298 cv_init(&iport->refcnt_cv, NULL, CV_DEFAULT, NULL); 299 mutex_init(&iport->refcnt_lock, NULL, MUTEX_DRIVER, 300 DDI_INTR_PRI(pwp->intr_pri)); 301 302 /* Set some data on the iport handle */ 303 iport->dip = dip; 304 iport->pwp = pwp; 305 306 /* Dup the UA into the iport handle */ 307 iport->ua = strdup(iport_ua); 308 309 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 310 tran->tran_hba_private = iport; 311 312 list_create(&iport->phys, sizeof (pmcs_phy_t), 313 offsetof(pmcs_phy_t, list_node)); 314 315 /* 316 * If our unit address is active in the phymap, configure our 317 * iport's phylist. 318 */ 319 mutex_enter(&iport->lock); 320 ua_priv = sas_phymap_lookup_uapriv(pwp->hss_phymap, iport->ua); 321 if (ua_priv) { 322 /* Non-NULL private data indicates the unit address is active */ 323 iport->ua_state = UA_ACTIVE; 324 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) { 325 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 326 "%s: failed to " 327 "configure phys on iport handle (0x%p), " 328 " unit address [%s]", __func__, 329 (void *)iport, iport_ua); 330 mutex_exit(&iport->lock); 331 goto iport_attach_fail2; 332 } 333 } else { 334 iport->ua_state = UA_INACTIVE; 335 } 336 mutex_exit(&iport->lock); 337 338 /* Allocate string-based soft state pool for targets */ 339 iport->tgt_sstate = NULL; 340 if (ddi_soft_state_bystr_init(&iport->tgt_sstate, 341 sizeof (pmcs_xscsi_t), PMCS_TGT_SSTATE_SZ) != 0) { 342 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 343 "cannot get iport tgt soft state"); 344 goto iport_attach_fail2; 345 } 346 347 /* Create this iport's target map */ 348 if (pmcs_iport_tgtmap_create(iport) == B_FALSE) { 349 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 350 "Failed to create tgtmap on iport %d", inst); 351 goto iport_attach_fail3; 352 } 353 354 /* Set up the 'initiator-port' DDI property on this iport */ 355 init_port = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP); 356 if (pwp->separate_ports) { 357 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 358 "%s: separate ports not supported", __func__); 359 } else { 360 /* Set initiator-port value to the HBA's base WWN */ 361 (void) scsi_wwn_to_wwnstr(pwp->sas_wwns[0], 1, 362 init_port); 363 } 364 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_STRING, 365 SCSI_ADDR_PROP_INITIATOR_PORT, init_port); 366 kmem_free(init_port, PMCS_MAX_UA_SIZE); 367 368 /* Set up a 'num-phys' DDI property for the iport node */ 369 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 370 &iport->nphy); 371 372 /* Create kstats for each of the phys in this port */ 373 pmcs_create_phy_stats(iport); 374 375 /* 376 * Insert this iport handle into our list and set 377 * iports_attached on the HBA node. 378 */ 379 rw_enter(&pwp->iports_lock, RW_WRITER); 380 ASSERT(!list_link_active(&iport->list_node)); 381 list_insert_tail(&pwp->iports, iport); 382 pwp->iports_attached = 1; 383 pwp->num_iports++; 384 rw_exit(&pwp->iports_lock); 385 386 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 387 "iport%d attached", inst); 388 ddi_report_dev(dip); 389 return (DDI_SUCCESS); 390 391 /* teardown and fail */ 392 iport_attach_fail3: 393 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 394 iport_attach_fail2: 395 list_destroy(&iport->phys); 396 strfree(iport->ua); 397 mutex_destroy(&iport->refcnt_lock); 398 cv_destroy(&iport->refcnt_cv); 399 mutex_destroy(&iport->lock); 400 iport_attach_fail1: 401 ddi_soft_state_free(pmcs_iport_softstate, inst); 402 return (DDI_FAILURE); 403 } 404 405 static int 406 pmcs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 407 { 408 scsi_hba_tran_t *tran; 409 char chiprev, *fwsupport, hw_rev[24], fw_rev[24]; 410 off_t set3size; 411 int inst, i; 412 int sm_hba = 1; 413 int protocol = 0; 414 int num_phys = 0; 415 pmcs_hw_t *pwp; 416 pmcs_phy_t *phyp; 417 uint32_t num_threads; 418 char buf[64]; 419 420 switch (cmd) { 421 case DDI_ATTACH: 422 break; 423 424 case DDI_PM_RESUME: 425 case DDI_RESUME: 426 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 427 if (!tran) { 428 return (DDI_FAILURE); 429 } 430 /* No DDI_?_RESUME on iport nodes */ 431 if (scsi_hba_iport_unit_address(dip) != NULL) { 432 return (DDI_SUCCESS); 433 } 434 pwp = TRAN2PMC(tran); 435 if (pwp == NULL) { 436 return (DDI_FAILURE); 437 } 438 439 mutex_enter(&pwp->lock); 440 pwp->suspended = 0; 441 if (pwp->tq) { 442 ddi_taskq_resume(pwp->tq); 443 } 444 mutex_exit(&pwp->lock); 445 return (DDI_SUCCESS); 446 447 default: 448 return (DDI_FAILURE); 449 } 450 451 /* 452 * If this is an iport node, invoke iport attach. 453 */ 454 if (scsi_hba_iport_unit_address(dip) != NULL) { 455 return (pmcs_iport_attach(dip)); 456 } 457 458 /* 459 * From here on is attach for the HBA node 460 */ 461 462 #ifdef DEBUG 463 /* 464 * Check to see if this unit is to be disabled. We can't disable 465 * on a per-iport node. It's either the entire HBA or nothing. 466 */ 467 (void) snprintf(buf, sizeof (buf), 468 "disable-instance-%d", ddi_get_instance(dip)); 469 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 470 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, buf, 0)) { 471 cmn_err(CE_NOTE, "pmcs%d: disabled by configuration", 472 ddi_get_instance(dip)); 473 return (DDI_FAILURE); 474 } 475 #endif 476 477 /* 478 * Allocate softstate 479 */ 480 inst = ddi_get_instance(dip); 481 if (ddi_soft_state_zalloc(pmcs_softc_state, inst) != DDI_SUCCESS) { 482 cmn_err(CE_WARN, "pmcs%d: Failed to alloc soft state", inst); 483 return (DDI_FAILURE); 484 } 485 486 pwp = ddi_get_soft_state(pmcs_softc_state, inst); 487 if (pwp == NULL) { 488 cmn_err(CE_WARN, "pmcs%d: cannot get soft state", inst); 489 ddi_soft_state_free(pmcs_softc_state, inst); 490 return (DDI_FAILURE); 491 } 492 pwp->dip = dip; 493 STAILQ_INIT(&pwp->dq); 494 STAILQ_INIT(&pwp->cq); 495 STAILQ_INIT(&pwp->wf); 496 STAILQ_INIT(&pwp->pf); 497 /* 498 * Create the list for iports 499 */ 500 list_create(&pwp->iports, sizeof (pmcs_iport_t), 501 offsetof(pmcs_iport_t, list_node)); 502 503 pwp->state = STATE_PROBING; 504 505 /* 506 * Get driver.conf properties 507 */ 508 pwp->debug_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 509 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-debug-mask", 510 debug_mask); 511 pwp->phyid_block_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 512 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phyid-block-mask", 513 block_mask); 514 pwp->physpeed = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 515 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-physpeed", physpeed); 516 pwp->phymode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 517 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phymode", phymode); 518 pwp->fwlog = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 519 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fwlog", fwlog_level); 520 if (pwp->fwlog > PMCS_FWLOG_MAX) { 521 pwp->fwlog = PMCS_FWLOG_MAX; 522 } 523 524 mutex_enter(&pmcs_trace_lock); 525 if (pmcs_tbuf == NULL) { 526 /* Allocate trace buffer */ 527 pmcs_tbuf_num_elems = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 528 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-tbuf-num-elems", 529 PMCS_TBUF_NUM_ELEMS_DEF); 530 if ((pmcs_tbuf_num_elems == DDI_PROP_NOT_FOUND) || 531 (pmcs_tbuf_num_elems == 0)) { 532 pmcs_tbuf_num_elems = PMCS_TBUF_NUM_ELEMS_DEF; 533 } 534 535 pmcs_tbuf = kmem_zalloc(pmcs_tbuf_num_elems * 536 sizeof (pmcs_tbuf_t), KM_SLEEP); 537 pmcs_tbuf_ptr = pmcs_tbuf; 538 pmcs_tbuf_idx = 0; 539 } 540 mutex_exit(&pmcs_trace_lock); 541 542 disable_msix = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 543 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msix", 544 disable_msix); 545 disable_msi = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 546 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msi", 547 disable_msi); 548 maxqdepth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 549 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-maxqdepth", maxqdepth); 550 pwp->fw_force_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 551 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fw-force-update", 0); 552 if (pwp->fw_force_update == 0) { 553 pwp->fw_disable_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 554 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 555 "pmcs-fw-disable-update", 0); 556 } 557 pwp->ioq_depth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 558 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-num-io-qentries", 559 PMCS_NQENTRY); 560 561 /* 562 * Initialize FMA 563 */ 564 pwp->dev_acc_attr = pwp->reg_acc_attr = rattr; 565 pwp->iqp_dma_attr = pwp->oqp_dma_attr = 566 pwp->regdump_dma_attr = pwp->cip_dma_attr = 567 pwp->fwlog_dma_attr = pmcs_dattr; 568 pwp->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, pwp->dip, 569 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "fm-capable", 570 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 571 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 572 pmcs_fm_init(pwp); 573 574 /* 575 * Map registers 576 */ 577 if (pci_config_setup(dip, &pwp->pci_acc_handle)) { 578 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 579 "pci config setup failed"); 580 ddi_soft_state_free(pmcs_softc_state, inst); 581 return (DDI_FAILURE); 582 } 583 584 /* 585 * Get the size of register set 3. 586 */ 587 if (ddi_dev_regsize(dip, PMCS_REGSET_3, &set3size) != DDI_SUCCESS) { 588 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 589 "unable to get size of register set %d", PMCS_REGSET_3); 590 pci_config_teardown(&pwp->pci_acc_handle); 591 ddi_soft_state_free(pmcs_softc_state, inst); 592 return (DDI_FAILURE); 593 } 594 595 /* 596 * Map registers 597 */ 598 pwp->reg_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 599 600 if (ddi_regs_map_setup(dip, PMCS_REGSET_0, (caddr_t *)&pwp->msg_regs, 601 0, 0, &pwp->reg_acc_attr, &pwp->msg_acc_handle)) { 602 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 603 "failed to map Message Unit registers"); 604 pci_config_teardown(&pwp->pci_acc_handle); 605 ddi_soft_state_free(pmcs_softc_state, inst); 606 return (DDI_FAILURE); 607 } 608 609 if (ddi_regs_map_setup(dip, PMCS_REGSET_1, (caddr_t *)&pwp->top_regs, 610 0, 0, &pwp->reg_acc_attr, &pwp->top_acc_handle)) { 611 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 612 "failed to map TOP registers"); 613 ddi_regs_map_free(&pwp->msg_acc_handle); 614 pci_config_teardown(&pwp->pci_acc_handle); 615 ddi_soft_state_free(pmcs_softc_state, inst); 616 return (DDI_FAILURE); 617 } 618 619 if (ddi_regs_map_setup(dip, PMCS_REGSET_2, (caddr_t *)&pwp->gsm_regs, 620 0, 0, &pwp->reg_acc_attr, &pwp->gsm_acc_handle)) { 621 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 622 "failed to map GSM registers"); 623 ddi_regs_map_free(&pwp->top_acc_handle); 624 ddi_regs_map_free(&pwp->msg_acc_handle); 625 pci_config_teardown(&pwp->pci_acc_handle); 626 ddi_soft_state_free(pmcs_softc_state, inst); 627 return (DDI_FAILURE); 628 } 629 630 if (ddi_regs_map_setup(dip, PMCS_REGSET_3, (caddr_t *)&pwp->mpi_regs, 631 0, 0, &pwp->reg_acc_attr, &pwp->mpi_acc_handle)) { 632 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 633 "failed to map MPI registers"); 634 ddi_regs_map_free(&pwp->top_acc_handle); 635 ddi_regs_map_free(&pwp->gsm_acc_handle); 636 ddi_regs_map_free(&pwp->msg_acc_handle); 637 pci_config_teardown(&pwp->pci_acc_handle); 638 ddi_soft_state_free(pmcs_softc_state, inst); 639 return (DDI_FAILURE); 640 } 641 pwp->mpibar = 642 (((5U << 2) + 0x10) << PMCS_MSGU_MPI_BAR_SHIFT) | set3size; 643 644 /* 645 * Make sure we can support this card. 646 */ 647 pwp->chiprev = pmcs_rd_topunit(pwp, PMCS_DEVICE_REVISION); 648 649 switch (pwp->chiprev) { 650 case PMCS_PM8001_REV_A: 651 case PMCS_PM8001_REV_B: 652 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 653 "Rev A/B Card no longer supported"); 654 goto failure; 655 case PMCS_PM8001_REV_C: 656 break; 657 default: 658 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 659 "Unknown chip revision (%d)", pwp->chiprev); 660 goto failure; 661 } 662 663 /* 664 * Allocate DMA addressable area for Inbound and Outbound Queue indices 665 * that the chip needs to access plus a space for scratch usage 666 */ 667 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 668 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pwp->cip_acchdls, 669 &pwp->cip_handles, ptob(1), (caddr_t *)&pwp->cip, 670 &pwp->ciaddr) == B_FALSE) { 671 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 672 "Failed to setup DMA for index/scratch"); 673 goto failure; 674 } 675 676 bzero(pwp->cip, ptob(1)); 677 pwp->scratch = &pwp->cip[PMCS_INDICES_SIZE]; 678 pwp->scratch_dma = pwp->ciaddr + PMCS_INDICES_SIZE; 679 680 /* 681 * Allocate DMA S/G list chunks 682 */ 683 (void) pmcs_add_more_chunks(pwp, ptob(1) * PMCS_MIN_CHUNK_PAGES); 684 685 /* 686 * Allocate a DMA addressable area for the firmware log (if needed) 687 */ 688 if (pwp->fwlog) { 689 /* 690 * Align to event log header and entry size 691 */ 692 pwp->fwlog_dma_attr.dma_attr_align = 32; 693 if (pmcs_dma_setup(pwp, &pwp->fwlog_dma_attr, 694 &pwp->fwlog_acchdl, 695 &pwp->fwlog_hndl, PMCS_FWLOG_SIZE, 696 (caddr_t *)&pwp->fwlogp, 697 &pwp->fwaddr) == B_FALSE) { 698 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 699 "Failed to setup DMA for fwlog area"); 700 pwp->fwlog = 0; 701 } else { 702 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE); 703 } 704 } 705 706 if (pwp->flash_chunk_addr == NULL) { 707 pwp->regdump_dma_attr.dma_attr_align = PMCS_FLASH_CHUNK_SIZE; 708 if (pmcs_dma_setup(pwp, &pwp->regdump_dma_attr, 709 &pwp->regdump_acchdl, 710 &pwp->regdump_hndl, PMCS_FLASH_CHUNK_SIZE, 711 (caddr_t *)&pwp->flash_chunkp, &pwp->flash_chunk_addr) == 712 B_FALSE) { 713 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 714 "Failed to setup DMA for register dump area"); 715 goto failure; 716 } 717 bzero(pwp->flash_chunkp, PMCS_FLASH_CHUNK_SIZE); 718 } 719 720 /* 721 * More bits of local initialization... 722 */ 723 pwp->tq = ddi_taskq_create(dip, "_tq", 4, TASKQ_DEFAULTPRI, 0); 724 if (pwp->tq == NULL) { 725 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 726 "unable to create worker taskq"); 727 goto failure; 728 } 729 730 /* 731 * Cache of structures for dealing with I/O completion callbacks. 732 */ 733 (void) snprintf(buf, sizeof (buf), "pmcs_iocomp_cb_cache%d", inst); 734 pwp->iocomp_cb_cache = kmem_cache_create(buf, 735 sizeof (pmcs_iocomp_cb_t), 16, NULL, NULL, NULL, NULL, NULL, 0); 736 737 /* 738 * Cache of PHY structures 739 */ 740 (void) snprintf(buf, sizeof (buf), "pmcs_phy_cache%d", inst); 741 pwp->phy_cache = kmem_cache_create(buf, sizeof (pmcs_phy_t), 8, 742 pmcs_phy_constructor, pmcs_phy_destructor, NULL, (void *)pwp, 743 NULL, 0); 744 745 /* 746 * Allocate space for the I/O completion threads 747 */ 748 num_threads = ncpus_online; 749 if (num_threads > PMCS_MAX_CQ_THREADS) { 750 num_threads = PMCS_MAX_CQ_THREADS; 751 } 752 753 pwp->cq_info.cq_thr_info = kmem_zalloc(sizeof (pmcs_cq_thr_info_t) * 754 num_threads, KM_SLEEP); 755 pwp->cq_info.cq_threads = num_threads; 756 pwp->cq_info.cq_next_disp_thr = 0; 757 pwp->cq_info.cq_stop = B_FALSE; 758 759 /* 760 * Set the quantum value in clock ticks for the I/O interrupt 761 * coalescing timer. 762 */ 763 pwp->io_intr_coal.quantum = drv_usectohz(PMCS_QUANTUM_TIME_USECS); 764 765 /* 766 * We have a delicate dance here. We need to set up 767 * interrupts so we know how to set up some OQC 768 * tables. However, while we're setting up table 769 * access, we may need to flash new firmware and 770 * reset the card, which will take some finessing. 771 */ 772 773 /* 774 * Set up interrupts here. 775 */ 776 switch (pmcs_setup_intr(pwp)) { 777 case 0: 778 break; 779 case EIO: 780 pwp->stuck = 1; 781 /* FALLTHROUGH */ 782 default: 783 goto failure; 784 } 785 786 /* 787 * Set these up now becuase they are used to initialize the OQC tables. 788 * 789 * If we have MSI or MSI-X interrupts set up and we have enough 790 * vectors for each OQ, the Outbound Queue vectors can all be the 791 * same as the appropriate interrupt routine will have been called 792 * and the doorbell register automatically cleared. 793 * This keeps us from having to check the Outbound Doorbell register 794 * when the routines for these interrupts are called. 795 * 796 * If we have Legacy INT-X interrupts set up or we didn't have enough 797 * MSI/MSI-X vectors to uniquely identify each OQ, we point these 798 * vectors to the bits we would like to have set in the Outbound 799 * Doorbell register because pmcs_all_intr will read the doorbell 800 * register to find out why we have an interrupt and write the 801 * corresponding 'clear' bit for that interrupt. 802 */ 803 804 switch (pwp->intr_cnt) { 805 case 1: 806 /* 807 * Only one vector, so we must check all OQs for MSI. For 808 * INT-X, there's only one vector anyway, so we can just 809 * use the outbound queue bits to keep from having to 810 * check each queue for each interrupt. 811 */ 812 if (pwp->int_type == PMCS_INT_FIXED) { 813 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 814 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 815 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 816 } else { 817 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 818 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_IODONE; 819 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_IODONE; 820 } 821 break; 822 case 2: 823 /* With 2, we can at least isolate IODONE */ 824 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 825 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 826 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_GENERAL; 827 break; 828 case 4: 829 /* With 4 vectors, everybody gets one */ 830 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 831 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 832 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 833 break; 834 } 835 836 /* 837 * Do the first part of setup 838 */ 839 if (pmcs_setup(pwp)) { 840 goto failure; 841 } 842 pmcs_report_fwversion(pwp); 843 844 /* 845 * Now do some additonal allocations based upon information 846 * gathered during MPI setup. 847 */ 848 pwp->root_phys = kmem_zalloc(pwp->nphy * sizeof (pmcs_phy_t), KM_SLEEP); 849 ASSERT(pwp->nphy < SAS2_PHYNUM_MAX); 850 phyp = pwp->root_phys; 851 for (i = 0; i < pwp->nphy; i++) { 852 if (i < pwp->nphy-1) { 853 phyp->sibling = (phyp + 1); 854 } 855 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER, 856 DDI_INTR_PRI(pwp->intr_pri)); 857 phyp->phynum = i & SAS2_PHYNUM_MASK; 858 pmcs_phy_name(pwp, phyp, phyp->path, sizeof (phyp->path)); 859 phyp->pwp = pwp; 860 phyp->device_id = PMCS_INVALID_DEVICE_ID; 861 phyp++; 862 } 863 864 pwp->work = kmem_zalloc(pwp->max_cmd * sizeof (pmcwork_t), KM_SLEEP); 865 for (i = 0; i < pwp->max_cmd - 1; i++) { 866 pmcwork_t *pwrk = &pwp->work[i]; 867 mutex_init(&pwrk->lock, NULL, MUTEX_DRIVER, 868 DDI_INTR_PRI(pwp->intr_pri)); 869 cv_init(&pwrk->sleep_cv, NULL, CV_DRIVER, NULL); 870 STAILQ_INSERT_TAIL(&pwp->wf, pwrk, next); 871 872 } 873 pwp->targets = (pmcs_xscsi_t **) 874 kmem_zalloc(pwp->max_dev * sizeof (pmcs_xscsi_t *), KM_SLEEP); 875 876 pwp->iqpt = (pmcs_iqp_trace_t *) 877 kmem_zalloc(sizeof (pmcs_iqp_trace_t), KM_SLEEP); 878 pwp->iqpt->head = kmem_zalloc(PMCS_IQP_TRACE_BUFFER_SIZE, KM_SLEEP); 879 pwp->iqpt->curpos = pwp->iqpt->head; 880 pwp->iqpt->size_left = PMCS_IQP_TRACE_BUFFER_SIZE; 881 882 /* 883 * Start MPI communication. 884 */ 885 if (pmcs_start_mpi(pwp)) { 886 if (pmcs_soft_reset(pwp, B_FALSE)) { 887 goto failure; 888 } 889 } 890 891 /* 892 * Do some initial acceptance tests. 893 * This tests interrupts and queues. 894 */ 895 if (pmcs_echo_test(pwp)) { 896 goto failure; 897 } 898 899 /* Read VPD - if it exists */ 900 if (pmcs_get_nvmd(pwp, PMCS_NVMD_VPD, PMCIN_NVMD_VPD, 0, NULL, 0)) { 901 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 902 "%s: Unable to read VPD: " 903 "attempting to fabricate", __func__); 904 /* 905 * When we release, this must goto failure and the call 906 * to pmcs_fabricate_wwid is removed. 907 */ 908 /* goto failure; */ 909 if (!pmcs_fabricate_wwid(pwp)) { 910 goto failure; 911 } 912 } 913 914 /* 915 * We're now officially running 916 */ 917 pwp->state = STATE_RUNNING; 918 919 /* 920 * Check firmware versions and load new firmware 921 * if needed and reset. 922 */ 923 if (pmcs_firmware_update(pwp)) { 924 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 925 "%s: Firmware update failed", __func__); 926 goto failure; 927 } 928 929 /* 930 * Create completion threads. 931 */ 932 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 933 pwp->cq_info.cq_thr_info[i].cq_pwp = pwp; 934 pwp->cq_info.cq_thr_info[i].cq_thread = 935 thread_create(NULL, 0, pmcs_scsa_cq_run, 936 &pwp->cq_info.cq_thr_info[i], 0, &p0, TS_RUN, minclsyspri); 937 } 938 939 /* 940 * Create one thread to deal with the updating of the interrupt 941 * coalescing timer. 942 */ 943 pwp->ict_thread = thread_create(NULL, 0, pmcs_check_intr_coal, 944 pwp, 0, &p0, TS_RUN, minclsyspri); 945 946 /* 947 * Kick off the watchdog 948 */ 949 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 950 drv_usectohz(PMCS_WATCH_INTERVAL)); 951 /* 952 * Do the SCSI attachment code (before starting phys) 953 */ 954 if (pmcs_scsa_init(pwp, &pmcs_dattr)) { 955 goto failure; 956 } 957 pwp->hba_attached = 1; 958 959 /* 960 * Initialize the rwlock for the iport elements. 961 */ 962 rw_init(&pwp->iports_lock, NULL, RW_DRIVER, NULL); 963 964 /* Check all acc & dma handles allocated in attach */ 965 if (pmcs_check_acc_dma_handle(pwp)) { 966 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 967 goto failure; 968 } 969 970 /* 971 * Create the phymap for this HBA instance 972 */ 973 if (sas_phymap_create(dip, phymap_usec, PHYMAP_MODE_SIMPLE, NULL, 974 pwp, pmcs_phymap_activate, pmcs_phymap_deactivate, 975 &pwp->hss_phymap) != DDI_SUCCESS) { 976 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 977 "%s: pmcs%d phymap_create failed", __func__, inst); 978 goto failure; 979 } 980 ASSERT(pwp->hss_phymap); 981 982 /* 983 * Create the iportmap for this HBA instance 984 */ 985 if (scsi_hba_iportmap_create(dip, iportmap_usec, 986 &pwp->hss_iportmap) != DDI_SUCCESS) { 987 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 988 "%s: pmcs%d iportmap_create failed", __func__, inst); 989 goto failure; 990 } 991 ASSERT(pwp->hss_iportmap); 992 993 /* 994 * Start the PHYs. 995 */ 996 if (pmcs_start_phys(pwp)) { 997 goto failure; 998 } 999 1000 /* 1001 * From this point on, we can't fail. 1002 */ 1003 ddi_report_dev(dip); 1004 1005 /* SM-HBA */ 1006 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SMHBA_SUPPORTED, 1007 &sm_hba); 1008 1009 /* SM-HBA */ 1010 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_DRV_VERSION, 1011 pmcs_driver_rev); 1012 1013 /* SM-HBA */ 1014 chiprev = 'A' + pwp->chiprev; 1015 (void) snprintf(hw_rev, 2, "%s", &chiprev); 1016 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_HWARE_VERSION, 1017 hw_rev); 1018 1019 /* SM-HBA */ 1020 switch (PMCS_FW_TYPE(pwp)) { 1021 case PMCS_FW_TYPE_RELEASED: 1022 fwsupport = "Released"; 1023 break; 1024 case PMCS_FW_TYPE_DEVELOPMENT: 1025 fwsupport = "Development"; 1026 break; 1027 case PMCS_FW_TYPE_ALPHA: 1028 fwsupport = "Alpha"; 1029 break; 1030 case PMCS_FW_TYPE_BETA: 1031 fwsupport = "Beta"; 1032 break; 1033 default: 1034 fwsupport = "Special"; 1035 break; 1036 } 1037 (void) snprintf(fw_rev, sizeof (fw_rev), "%x.%x.%x %s", 1038 PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp), 1039 fwsupport); 1040 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_FWARE_VERSION, 1041 fw_rev); 1042 1043 /* SM-HBA */ 1044 num_phys = pwp->nphy; 1045 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_NUM_PHYS_HBA, 1046 &num_phys); 1047 1048 /* SM-HBA */ 1049 protocol = SAS_SSP_SUPPORT | SAS_SATA_SUPPORT | SAS_SMP_SUPPORT; 1050 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SUPPORTED_PROTOCOL, 1051 &protocol); 1052 1053 return (DDI_SUCCESS); 1054 1055 failure: 1056 if (pmcs_unattach(pwp)) { 1057 pwp->stuck = 1; 1058 } 1059 return (DDI_FAILURE); 1060 } 1061 1062 int 1063 pmcs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1064 { 1065 int inst = ddi_get_instance(dip); 1066 pmcs_iport_t *iport = NULL; 1067 pmcs_hw_t *pwp = NULL; 1068 scsi_hba_tran_t *tran; 1069 1070 if (scsi_hba_iport_unit_address(dip) != NULL) { 1071 /* iport node */ 1072 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 1073 ASSERT(iport); 1074 if (iport == NULL) { 1075 return (DDI_FAILURE); 1076 } 1077 pwp = iport->pwp; 1078 } else { 1079 /* hba node */ 1080 pwp = (pmcs_hw_t *)ddi_get_soft_state(pmcs_softc_state, inst); 1081 ASSERT(pwp); 1082 if (pwp == NULL) { 1083 return (DDI_FAILURE); 1084 } 1085 } 1086 1087 switch (cmd) { 1088 case DDI_DETACH: 1089 if (iport) { 1090 /* iport detach */ 1091 if (pmcs_iport_unattach(iport)) { 1092 return (DDI_FAILURE); 1093 } 1094 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1095 "iport%d detached", inst); 1096 return (DDI_SUCCESS); 1097 } else { 1098 /* HBA detach */ 1099 if (pmcs_unattach(pwp)) { 1100 return (DDI_FAILURE); 1101 } 1102 return (DDI_SUCCESS); 1103 } 1104 1105 case DDI_SUSPEND: 1106 case DDI_PM_SUSPEND: 1107 /* No DDI_SUSPEND on iport nodes */ 1108 if (iport) { 1109 return (DDI_SUCCESS); 1110 } 1111 1112 if (pwp->stuck) { 1113 return (DDI_FAILURE); 1114 } 1115 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 1116 if (!tran) { 1117 return (DDI_FAILURE); 1118 } 1119 1120 pwp = TRAN2PMC(tran); 1121 if (pwp == NULL) { 1122 return (DDI_FAILURE); 1123 } 1124 mutex_enter(&pwp->lock); 1125 if (pwp->tq) { 1126 ddi_taskq_suspend(pwp->tq); 1127 } 1128 pwp->suspended = 1; 1129 mutex_exit(&pwp->lock); 1130 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "PMC8X6G suspending"); 1131 return (DDI_SUCCESS); 1132 1133 default: 1134 return (DDI_FAILURE); 1135 } 1136 } 1137 1138 static int 1139 pmcs_iport_unattach(pmcs_iport_t *iport) 1140 { 1141 pmcs_hw_t *pwp = iport->pwp; 1142 1143 /* 1144 * First, check if there are still any configured targets on this 1145 * iport. If so, we fail detach. 1146 */ 1147 if (pmcs_iport_has_targets(pwp, iport)) { 1148 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1149 "iport%d detach failure: iport has targets (luns)", 1150 ddi_get_instance(iport->dip)); 1151 return (DDI_FAILURE); 1152 } 1153 1154 /* 1155 * Remove this iport from our list if it is inactive in the phymap. 1156 */ 1157 rw_enter(&pwp->iports_lock, RW_WRITER); 1158 mutex_enter(&iport->lock); 1159 1160 if (iport->ua_state == UA_ACTIVE) { 1161 mutex_exit(&iport->lock); 1162 rw_exit(&pwp->iports_lock); 1163 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1164 "iport%d detach failure: " 1165 "iport unit address active in phymap", 1166 ddi_get_instance(iport->dip)); 1167 return (DDI_FAILURE); 1168 } 1169 1170 /* If it's our only iport, clear iports_attached */ 1171 ASSERT(pwp->num_iports >= 1); 1172 if (--pwp->num_iports == 0) { 1173 pwp->iports_attached = 0; 1174 } 1175 1176 ASSERT(list_link_active(&iport->list_node)); 1177 list_remove(&pwp->iports, iport); 1178 rw_exit(&pwp->iports_lock); 1179 1180 /* 1181 * We have removed the iport handle from the HBA's iports list, 1182 * there will be no new references to it. Two things must be 1183 * guarded against here. First, we could have PHY up events, 1184 * adding themselves to the iport->phys list and grabbing ref's 1185 * on our iport handle. Second, we could have existing references 1186 * to this iport handle from a point in time prior to the list 1187 * removal above. 1188 * 1189 * So first, destroy the phys list. Remove any phys that have snuck 1190 * in after the phymap deactivate, dropping the refcnt accordingly. 1191 * If these PHYs are still up if and when the phymap reactivates 1192 * (i.e. when this iport reattaches), we'll populate the list with 1193 * them and bump the refcnt back up. 1194 */ 1195 pmcs_remove_phy_from_iport(iport, NULL); 1196 ASSERT(list_is_empty(&iport->phys)); 1197 list_destroy(&iport->phys); 1198 mutex_exit(&iport->lock); 1199 1200 /* 1201 * Second, wait for any other references to this iport to be 1202 * dropped, then continue teardown. 1203 */ 1204 mutex_enter(&iport->refcnt_lock); 1205 while (iport->refcnt != 0) { 1206 cv_wait(&iport->refcnt_cv, &iport->refcnt_lock); 1207 } 1208 mutex_exit(&iport->refcnt_lock); 1209 1210 /* Delete kstats */ 1211 pmcs_destroy_phy_stats(iport); 1212 1213 /* Destroy the iport target map */ 1214 if (pmcs_iport_tgtmap_destroy(iport) == B_FALSE) { 1215 return (DDI_FAILURE); 1216 } 1217 1218 /* Free the tgt soft state */ 1219 if (iport->tgt_sstate != NULL) { 1220 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 1221 } 1222 1223 /* Free our unit address string */ 1224 strfree(iport->ua); 1225 1226 /* Finish teardown and free the softstate */ 1227 mutex_destroy(&iport->refcnt_lock); 1228 ASSERT(iport->refcnt == 0); 1229 cv_destroy(&iport->refcnt_cv); 1230 mutex_destroy(&iport->lock); 1231 ddi_soft_state_free(pmcs_iport_softstate, ddi_get_instance(iport->dip)); 1232 1233 return (DDI_SUCCESS); 1234 } 1235 1236 static int 1237 pmcs_unattach(pmcs_hw_t *pwp) 1238 { 1239 int i; 1240 enum pwpstate curstate; 1241 pmcs_cq_thr_info_t *cqti; 1242 1243 /* 1244 * Tear down the interrupt infrastructure. 1245 */ 1246 if (pmcs_teardown_intr(pwp)) { 1247 pwp->stuck = 1; 1248 } 1249 pwp->intr_cnt = 0; 1250 1251 /* 1252 * Grab a lock, if initted, to set state. 1253 */ 1254 if (pwp->locks_initted) { 1255 mutex_enter(&pwp->lock); 1256 if (pwp->state != STATE_DEAD) { 1257 pwp->state = STATE_UNPROBING; 1258 } 1259 curstate = pwp->state; 1260 mutex_exit(&pwp->lock); 1261 1262 /* 1263 * Stop the I/O completion threads. 1264 */ 1265 mutex_enter(&pwp->cq_lock); 1266 pwp->cq_info.cq_stop = B_TRUE; 1267 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 1268 if (pwp->cq_info.cq_thr_info[i].cq_thread) { 1269 cqti = &pwp->cq_info.cq_thr_info[i]; 1270 mutex_enter(&cqti->cq_thr_lock); 1271 cv_signal(&cqti->cq_cv); 1272 mutex_exit(&cqti->cq_thr_lock); 1273 mutex_exit(&pwp->cq_lock); 1274 thread_join(cqti->cq_thread->t_did); 1275 mutex_enter(&pwp->cq_lock); 1276 } 1277 } 1278 mutex_exit(&pwp->cq_lock); 1279 1280 /* 1281 * Stop the interrupt coalescing timer thread 1282 */ 1283 if (pwp->ict_thread) { 1284 mutex_enter(&pwp->ict_lock); 1285 pwp->io_intr_coal.stop_thread = B_TRUE; 1286 cv_signal(&pwp->ict_cv); 1287 mutex_exit(&pwp->ict_lock); 1288 thread_join(pwp->ict_thread->t_did); 1289 } 1290 } else { 1291 if (pwp->state != STATE_DEAD) { 1292 pwp->state = STATE_UNPROBING; 1293 } 1294 curstate = pwp->state; 1295 } 1296 1297 if (&pwp->iports != NULL) { 1298 /* Destroy the iports lock */ 1299 rw_destroy(&pwp->iports_lock); 1300 /* Destroy the iports list */ 1301 ASSERT(list_is_empty(&pwp->iports)); 1302 list_destroy(&pwp->iports); 1303 } 1304 1305 if (pwp->hss_iportmap != NULL) { 1306 /* Destroy the iportmap */ 1307 scsi_hba_iportmap_destroy(pwp->hss_iportmap); 1308 } 1309 1310 if (pwp->hss_phymap != NULL) { 1311 /* Destroy the phymap */ 1312 sas_phymap_destroy(pwp->hss_phymap); 1313 } 1314 1315 /* 1316 * Make sure that any pending watchdog won't 1317 * be called from this point on out. 1318 */ 1319 (void) untimeout(pwp->wdhandle); 1320 /* 1321 * After the above action, the watchdog 1322 * timer that starts up the worker task 1323 * may trigger but will exit immediately 1324 * on triggering. 1325 * 1326 * Now that this is done, we can destroy 1327 * the task queue, which will wait if we're 1328 * running something on it. 1329 */ 1330 if (pwp->tq) { 1331 ddi_taskq_destroy(pwp->tq); 1332 pwp->tq = NULL; 1333 } 1334 1335 pmcs_fm_fini(pwp); 1336 1337 if (pwp->hba_attached) { 1338 (void) scsi_hba_detach(pwp->dip); 1339 pwp->hba_attached = 0; 1340 } 1341 1342 /* 1343 * If the chip hasn't been marked dead, shut it down now 1344 * to bring it back to a known state without attempting 1345 * a soft reset. 1346 */ 1347 if (curstate != STATE_DEAD && pwp->locks_initted) { 1348 /* 1349 * De-register all registered devices 1350 */ 1351 pmcs_deregister_devices(pwp, pwp->root_phys); 1352 1353 /* 1354 * Stop all the phys. 1355 */ 1356 pmcs_stop_phys(pwp); 1357 1358 /* 1359 * Shut Down Message Passing 1360 */ 1361 (void) pmcs_stop_mpi(pwp); 1362 1363 /* 1364 * Reset chip 1365 */ 1366 (void) pmcs_soft_reset(pwp, B_FALSE); 1367 } 1368 1369 /* 1370 * Turn off interrupts on the chip 1371 */ 1372 if (pwp->mpi_acc_handle) { 1373 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1374 } 1375 1376 /* Destroy pwp's lock */ 1377 if (pwp->locks_initted) { 1378 mutex_destroy(&pwp->lock); 1379 mutex_destroy(&pwp->dma_lock); 1380 mutex_destroy(&pwp->axil_lock); 1381 mutex_destroy(&pwp->cq_lock); 1382 mutex_destroy(&pwp->config_lock); 1383 mutex_destroy(&pwp->ict_lock); 1384 mutex_destroy(&pwp->wfree_lock); 1385 mutex_destroy(&pwp->pfree_lock); 1386 mutex_destroy(&pwp->dead_phylist_lock); 1387 #ifdef DEBUG 1388 mutex_destroy(&pwp->dbglock); 1389 #endif 1390 cv_destroy(&pwp->ict_cv); 1391 cv_destroy(&pwp->drain_cv); 1392 pwp->locks_initted = 0; 1393 } 1394 1395 /* 1396 * Free DMA handles and associated consistent memory 1397 */ 1398 if (pwp->regdump_hndl) { 1399 if (ddi_dma_unbind_handle(pwp->regdump_hndl) != DDI_SUCCESS) { 1400 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1401 "Condition check failed " 1402 "at %s():%d", __func__, __LINE__); 1403 } 1404 ddi_dma_free_handle(&pwp->regdump_hndl); 1405 ddi_dma_mem_free(&pwp->regdump_acchdl); 1406 pwp->regdump_hndl = 0; 1407 } 1408 if (pwp->fwlog_hndl) { 1409 if (ddi_dma_unbind_handle(pwp->fwlog_hndl) != DDI_SUCCESS) { 1410 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1411 "Condition check failed " 1412 "at %s():%d", __func__, __LINE__); 1413 } 1414 ddi_dma_free_handle(&pwp->fwlog_hndl); 1415 ddi_dma_mem_free(&pwp->fwlog_acchdl); 1416 pwp->fwlog_hndl = 0; 1417 } 1418 if (pwp->cip_handles) { 1419 if (ddi_dma_unbind_handle(pwp->cip_handles) != DDI_SUCCESS) { 1420 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1421 "Condition check failed " 1422 "at %s():%d", __func__, __LINE__); 1423 } 1424 ddi_dma_free_handle(&pwp->cip_handles); 1425 ddi_dma_mem_free(&pwp->cip_acchdls); 1426 pwp->cip_handles = 0; 1427 } 1428 for (i = 0; i < PMCS_NOQ; i++) { 1429 if (pwp->oqp_handles[i]) { 1430 if (ddi_dma_unbind_handle(pwp->oqp_handles[i]) != 1431 DDI_SUCCESS) { 1432 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1433 "Condition check failed at %s():%d", 1434 __func__, __LINE__); 1435 } 1436 ddi_dma_free_handle(&pwp->oqp_handles[i]); 1437 ddi_dma_mem_free(&pwp->oqp_acchdls[i]); 1438 pwp->oqp_handles[i] = 0; 1439 } 1440 } 1441 for (i = 0; i < PMCS_NIQ; i++) { 1442 if (pwp->iqp_handles[i]) { 1443 if (ddi_dma_unbind_handle(pwp->iqp_handles[i]) != 1444 DDI_SUCCESS) { 1445 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1446 "Condition check failed at %s():%d", 1447 __func__, __LINE__); 1448 } 1449 ddi_dma_free_handle(&pwp->iqp_handles[i]); 1450 ddi_dma_mem_free(&pwp->iqp_acchdls[i]); 1451 pwp->iqp_handles[i] = 0; 1452 } 1453 } 1454 1455 pmcs_free_dma_chunklist(pwp); 1456 1457 /* 1458 * Unmap registers and destroy access handles 1459 */ 1460 if (pwp->mpi_acc_handle) { 1461 ddi_regs_map_free(&pwp->mpi_acc_handle); 1462 pwp->mpi_acc_handle = 0; 1463 } 1464 if (pwp->top_acc_handle) { 1465 ddi_regs_map_free(&pwp->top_acc_handle); 1466 pwp->top_acc_handle = 0; 1467 } 1468 if (pwp->gsm_acc_handle) { 1469 ddi_regs_map_free(&pwp->gsm_acc_handle); 1470 pwp->gsm_acc_handle = 0; 1471 } 1472 if (pwp->msg_acc_handle) { 1473 ddi_regs_map_free(&pwp->msg_acc_handle); 1474 pwp->msg_acc_handle = 0; 1475 } 1476 if (pwp->pci_acc_handle) { 1477 pci_config_teardown(&pwp->pci_acc_handle); 1478 pwp->pci_acc_handle = 0; 1479 } 1480 1481 /* 1482 * Do memory allocation cleanup. 1483 */ 1484 while (pwp->dma_freelist) { 1485 pmcs_dmachunk_t *this = pwp->dma_freelist; 1486 pwp->dma_freelist = this->nxt; 1487 kmem_free(this, sizeof (pmcs_dmachunk_t)); 1488 } 1489 1490 /* 1491 * Free pools 1492 */ 1493 if (pwp->iocomp_cb_cache) { 1494 kmem_cache_destroy(pwp->iocomp_cb_cache); 1495 } 1496 1497 /* 1498 * Free all PHYs (at level > 0), then free the cache 1499 */ 1500 pmcs_free_all_phys(pwp, pwp->root_phys); 1501 if (pwp->phy_cache) { 1502 kmem_cache_destroy(pwp->phy_cache); 1503 } 1504 1505 /* 1506 * Free root PHYs 1507 */ 1508 if (pwp->root_phys) { 1509 pmcs_phy_t *phyp = pwp->root_phys; 1510 for (i = 0; i < pwp->nphy; i++) { 1511 mutex_destroy(&phyp->phy_lock); 1512 phyp = phyp->sibling; 1513 } 1514 kmem_free(pwp->root_phys, pwp->nphy * sizeof (pmcs_phy_t)); 1515 pwp->root_phys = NULL; 1516 pwp->nphy = 0; 1517 } 1518 1519 /* Free the targets list */ 1520 if (pwp->targets) { 1521 kmem_free(pwp->targets, 1522 sizeof (pmcs_xscsi_t *) * pwp->max_dev); 1523 } 1524 1525 /* 1526 * Free work structures 1527 */ 1528 1529 if (pwp->work && pwp->max_cmd) { 1530 for (i = 0; i < pwp->max_cmd - 1; i++) { 1531 pmcwork_t *pwrk = &pwp->work[i]; 1532 mutex_destroy(&pwrk->lock); 1533 cv_destroy(&pwrk->sleep_cv); 1534 } 1535 kmem_free(pwp->work, sizeof (pmcwork_t) * pwp->max_cmd); 1536 pwp->work = NULL; 1537 pwp->max_cmd = 0; 1538 } 1539 1540 /* 1541 * Do last property and SCSA cleanup 1542 */ 1543 if (pwp->tran) { 1544 scsi_hba_tran_free(pwp->tran); 1545 pwp->tran = NULL; 1546 } 1547 if (pwp->reset_notify_listf) { 1548 scsi_hba_reset_notify_tear_down(pwp->reset_notify_listf); 1549 pwp->reset_notify_listf = NULL; 1550 } 1551 ddi_prop_remove_all(pwp->dip); 1552 if (pwp->stuck) { 1553 return (-1); 1554 } 1555 1556 /* Free register dump area if allocated */ 1557 if (pwp->regdumpp) { 1558 kmem_free(pwp->regdumpp, PMCS_REG_DUMP_SIZE); 1559 pwp->regdumpp = NULL; 1560 } 1561 if (pwp->iqpt && pwp->iqpt->head) { 1562 kmem_free(pwp->iqpt->head, PMCS_IQP_TRACE_BUFFER_SIZE); 1563 pwp->iqpt->head = pwp->iqpt->curpos = NULL; 1564 } 1565 if (pwp->iqpt) { 1566 kmem_free(pwp->iqpt, sizeof (pmcs_iqp_trace_t)); 1567 pwp->iqpt = NULL; 1568 } 1569 1570 ddi_soft_state_free(pmcs_softc_state, ddi_get_instance(pwp->dip)); 1571 return (0); 1572 } 1573 1574 /* 1575 * quiesce (9E) entry point 1576 * 1577 * This function is called when the system is single-threaded at high PIL 1578 * with preemption disabled. Therefore, the function must not block/wait/sleep. 1579 * 1580 * Returns DDI_SUCCESS or DDI_FAILURE. 1581 * 1582 */ 1583 static int 1584 pmcs_quiesce(dev_info_t *dip) 1585 { 1586 pmcs_hw_t *pwp; 1587 scsi_hba_tran_t *tran; 1588 1589 if ((tran = ddi_get_driver_private(dip)) == NULL) 1590 return (DDI_SUCCESS); 1591 1592 /* No quiesce necessary on a per-iport basis */ 1593 if (scsi_hba_iport_unit_address(dip) != NULL) { 1594 return (DDI_SUCCESS); 1595 } 1596 1597 if ((pwp = TRAN2PMC(tran)) == NULL) 1598 return (DDI_SUCCESS); 1599 1600 /* Stop MPI & Reset chip (no need to re-initialize) */ 1601 (void) pmcs_stop_mpi(pwp); 1602 (void) pmcs_soft_reset(pwp, B_TRUE); 1603 1604 return (DDI_SUCCESS); 1605 } 1606 1607 /* 1608 * Called with xp->statlock and PHY lock and scratch acquired. 1609 */ 1610 static int 1611 pmcs_add_sata_device(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1612 { 1613 ata_identify_t *ati; 1614 int result, i; 1615 pmcs_phy_t *pptr; 1616 uint16_t *a; 1617 union { 1618 uint8_t nsa[8]; 1619 uint16_t nsb[4]; 1620 } u; 1621 1622 /* 1623 * Safe defaults - use only if this target is brand new (i.e. doesn't 1624 * already have these settings configured) 1625 */ 1626 if (xp->capacity == 0) { 1627 xp->capacity = (uint64_t)-1; 1628 xp->ca = 1; 1629 xp->qdepth = 1; 1630 xp->pio = 1; 1631 } 1632 1633 pptr = xp->phy; 1634 1635 /* 1636 * We only try and issue an IDENTIFY for first level 1637 * (direct attached) devices. We don't try and 1638 * set other quirks here (this will happen later, 1639 * if the device is fully configured) 1640 */ 1641 if (pptr->level) { 1642 return (0); 1643 } 1644 1645 mutex_exit(&xp->statlock); 1646 result = pmcs_sata_identify(pwp, pptr); 1647 mutex_enter(&xp->statlock); 1648 1649 if (result) { 1650 return (result); 1651 } 1652 ati = pwp->scratch; 1653 a = &ati->word108; 1654 for (i = 0; i < 4; i++) { 1655 u.nsb[i] = ddi_swap16(*a++); 1656 } 1657 1658 /* 1659 * Check the returned data for being a valid (NAA=5) WWN. 1660 * If so, use that and override the SAS address we were 1661 * given at Link Up time. 1662 */ 1663 if ((u.nsa[0] >> 4) == 5) { 1664 (void) memcpy(pptr->sas_address, u.nsa, 8); 1665 } 1666 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 1667 "%s: %s has SAS ADDRESS " SAS_ADDR_FMT, 1668 __func__, pptr->path, SAS_ADDR_PRT(pptr->sas_address)); 1669 return (0); 1670 } 1671 1672 /* 1673 * Called with PHY lock and target statlock held and scratch acquired 1674 */ 1675 static boolean_t 1676 pmcs_add_new_device(pmcs_hw_t *pwp, pmcs_xscsi_t *target) 1677 { 1678 ASSERT(target != NULL); 1679 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target, "%s: target = 0x%p", 1680 __func__, (void *) target); 1681 1682 switch (target->phy->dtype) { 1683 case SATA: 1684 if (pmcs_add_sata_device(pwp, target) != 0) { 1685 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, target->phy, 1686 target, "%s: add_sata_device failed for tgt 0x%p", 1687 __func__, (void *) target); 1688 return (B_FALSE); 1689 } 1690 break; 1691 case SAS: 1692 target->qdepth = maxqdepth; 1693 break; 1694 case EXPANDER: 1695 target->qdepth = 1; 1696 break; 1697 } 1698 1699 target->new = 0; 1700 target->assigned = 1; 1701 target->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1702 target->dtype = target->phy->dtype; 1703 1704 /* 1705 * Set the PHY's config stop time to 0. This is one of the final 1706 * stops along the config path, so we're indicating that we 1707 * successfully configured the PHY. 1708 */ 1709 target->phy->config_stop = 0; 1710 1711 return (B_TRUE); 1712 } 1713 1714 void 1715 pmcs_worker(void *arg) 1716 { 1717 pmcs_hw_t *pwp = arg; 1718 ulong_t work_flags; 1719 1720 DTRACE_PROBE2(pmcs__worker, ulong_t, pwp->work_flags, boolean_t, 1721 pwp->config_changed); 1722 1723 if (pwp->state != STATE_RUNNING) { 1724 return; 1725 } 1726 1727 work_flags = atomic_swap_ulong(&pwp->work_flags, 0); 1728 1729 if (work_flags & PMCS_WORK_FLAG_SAS_HW_ACK) { 1730 pmcs_ack_events(pwp); 1731 } 1732 1733 if (work_flags & PMCS_WORK_FLAG_SPINUP_RELEASE) { 1734 mutex_enter(&pwp->lock); 1735 pmcs_spinup_release(pwp, NULL); 1736 mutex_exit(&pwp->lock); 1737 } 1738 1739 if (work_flags & PMCS_WORK_FLAG_SSP_EVT_RECOVERY) { 1740 pmcs_ssp_event_recovery(pwp); 1741 } 1742 1743 if (work_flags & PMCS_WORK_FLAG_DS_ERR_RECOVERY) { 1744 pmcs_dev_state_recovery(pwp, NULL); 1745 } 1746 1747 if (work_flags & PMCS_WORK_FLAG_DISCOVER) { 1748 pmcs_discover(pwp); 1749 } 1750 1751 if (work_flags & PMCS_WORK_FLAG_ABORT_HANDLE) { 1752 if (pmcs_abort_handler(pwp)) { 1753 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 1754 } 1755 } 1756 1757 if (work_flags & PMCS_WORK_FLAG_SATA_RUN) { 1758 pmcs_sata_work(pwp); 1759 } 1760 1761 if (work_flags & PMCS_WORK_FLAG_RUN_QUEUES) { 1762 pmcs_scsa_wq_run(pwp); 1763 mutex_enter(&pwp->lock); 1764 PMCS_CQ_RUN(pwp); 1765 mutex_exit(&pwp->lock); 1766 } 1767 1768 if (work_flags & PMCS_WORK_FLAG_ADD_DMA_CHUNKS) { 1769 if (pmcs_add_more_chunks(pwp, 1770 ptob(1) * PMCS_ADDTL_CHUNK_PAGES)) { 1771 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 1772 } else { 1773 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1774 } 1775 } 1776 } 1777 1778 static int 1779 pmcs_add_more_chunks(pmcs_hw_t *pwp, unsigned long nsize) 1780 { 1781 pmcs_dmachunk_t *dc; 1782 unsigned long dl; 1783 pmcs_chunk_t *pchunk = NULL; 1784 1785 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 1786 1787 pchunk = kmem_zalloc(sizeof (pmcs_chunk_t), KM_SLEEP); 1788 if (pchunk == NULL) { 1789 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1790 "Not enough memory for DMA chunks"); 1791 return (-1); 1792 } 1793 1794 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pchunk->acc_handle, 1795 &pchunk->dma_handle, nsize, (caddr_t *)&pchunk->addrp, 1796 &pchunk->dma_addr) == B_FALSE) { 1797 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1798 "Failed to setup DMA for chunks"); 1799 kmem_free(pchunk, sizeof (pmcs_chunk_t)); 1800 return (-1); 1801 } 1802 1803 if ((pmcs_check_acc_handle(pchunk->acc_handle) != DDI_SUCCESS) || 1804 (pmcs_check_dma_handle(pchunk->dma_handle) != DDI_SUCCESS)) { 1805 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1806 return (-1); 1807 } 1808 1809 bzero(pchunk->addrp, nsize); 1810 dc = NULL; 1811 for (dl = 0; dl < (nsize / PMCS_SGL_CHUNKSZ); dl++) { 1812 pmcs_dmachunk_t *tmp; 1813 tmp = kmem_alloc(sizeof (pmcs_dmachunk_t), KM_SLEEP); 1814 tmp->nxt = dc; 1815 dc = tmp; 1816 } 1817 mutex_enter(&pwp->dma_lock); 1818 pmcs_idma_chunks(pwp, dc, pchunk, nsize); 1819 pwp->nchunks++; 1820 mutex_exit(&pwp->dma_lock); 1821 return (0); 1822 } 1823 1824 1825 static void 1826 pmcs_check_commands(pmcs_hw_t *pwp) 1827 { 1828 pmcs_cmd_t *sp; 1829 size_t amt; 1830 char path[32]; 1831 pmcwork_t *pwrk; 1832 pmcs_xscsi_t *target; 1833 pmcs_phy_t *phyp; 1834 1835 for (pwrk = pwp->work; pwrk < &pwp->work[pwp->max_cmd]; pwrk++) { 1836 mutex_enter(&pwrk->lock); 1837 1838 /* 1839 * If the command isn't active, we can't be timing it still. 1840 * Active means the tag is not free and the state is "on chip". 1841 */ 1842 if (!PMCS_COMMAND_ACTIVE(pwrk)) { 1843 mutex_exit(&pwrk->lock); 1844 continue; 1845 } 1846 1847 /* 1848 * No timer active for this command. 1849 */ 1850 if (pwrk->timer == 0) { 1851 mutex_exit(&pwrk->lock); 1852 continue; 1853 } 1854 1855 /* 1856 * Knock off bits for the time interval. 1857 */ 1858 if (pwrk->timer >= US2WT(PMCS_WATCH_INTERVAL)) { 1859 pwrk->timer -= US2WT(PMCS_WATCH_INTERVAL); 1860 } else { 1861 pwrk->timer = 0; 1862 } 1863 if (pwrk->timer > 0) { 1864 mutex_exit(&pwrk->lock); 1865 continue; 1866 } 1867 1868 /* 1869 * The command has now officially timed out. 1870 * Get the path for it. If it doesn't have 1871 * a phy pointer any more, it's really dead 1872 * and can just be put back on the free list. 1873 * There should *not* be any commands associated 1874 * with it any more. 1875 */ 1876 if (pwrk->phy == NULL) { 1877 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1878 "dead command with gone phy being recycled"); 1879 ASSERT(pwrk->xp == NULL); 1880 pmcs_pwork(pwp, pwrk); 1881 continue; 1882 } 1883 amt = sizeof (path); 1884 amt = min(sizeof (pwrk->phy->path), amt); 1885 (void) memcpy(path, pwrk->phy->path, amt); 1886 1887 /* 1888 * If this is a non-SCSA command, stop here. Eventually 1889 * we might do something with non-SCSA commands here- 1890 * but so far their timeout mechanisms are handled in 1891 * the WAIT_FOR macro. 1892 */ 1893 if (pwrk->xp == NULL) { 1894 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1895 "%s: non-SCSA cmd tag 0x%x timed out", 1896 path, pwrk->htag); 1897 mutex_exit(&pwrk->lock); 1898 continue; 1899 } 1900 1901 sp = pwrk->arg; 1902 ASSERT(sp != NULL); 1903 1904 /* 1905 * Mark it as timed out. 1906 */ 1907 CMD2PKT(sp)->pkt_reason = CMD_TIMEOUT; 1908 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 1909 #ifdef DEBUG 1910 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 1911 "%s: SCSA cmd tag 0x%x timed out (state %x) onwire=%d", 1912 path, pwrk->htag, pwrk->state, pwrk->onwire); 1913 #else 1914 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 1915 "%s: SCSA cmd tag 0x%x timed out (state %x)", 1916 path, pwrk->htag, pwrk->state); 1917 #endif 1918 /* 1919 * Mark the work structure as timed out. 1920 */ 1921 pwrk->state = PMCS_WORK_STATE_TIMED_OUT; 1922 phyp = pwrk->phy; 1923 target = pwrk->xp; 1924 mutex_exit(&pwrk->lock); 1925 1926 pmcs_lock_phy(phyp); 1927 mutex_enter(&target->statlock); 1928 1929 /* 1930 * No point attempting recovery if the device is gone 1931 */ 1932 if (pwrk->xp->dev_gone) { 1933 mutex_exit(&target->statlock); 1934 pmcs_unlock_phy(phyp); 1935 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 1936 "%s: tgt(0x%p) is gone. Returning CMD_DEV_GONE " 1937 "for htag 0x%08x", __func__, 1938 (void *)pwrk->xp, pwrk->htag); 1939 mutex_enter(&pwrk->lock); 1940 if (!PMCS_COMMAND_DONE(pwrk)) { 1941 /* Complete this command here */ 1942 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 1943 "%s: Completing cmd (htag 0x%08x) " 1944 "anyway", __func__, pwrk->htag); 1945 pwrk->dead = 1; 1946 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 1947 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 1948 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 1949 } else { 1950 mutex_exit(&pwrk->lock); 1951 } 1952 continue; 1953 } 1954 1955 /* 1956 * See if we're already waiting for device state recovery 1957 */ 1958 if (target->recover_wait) { 1959 pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, phyp, target, 1960 "%s: Target %p already in recovery", __func__, 1961 (void *)target); 1962 mutex_exit(&target->statlock); 1963 pmcs_unlock_phy(phyp); 1964 continue; 1965 } 1966 1967 pmcs_start_dev_state_recovery(target, phyp); 1968 mutex_exit(&target->statlock); 1969 pmcs_unlock_phy(phyp); 1970 } 1971 /* 1972 * Run any completions that may have been queued up. 1973 */ 1974 PMCS_CQ_RUN(pwp); 1975 } 1976 1977 static void 1978 pmcs_watchdog(void *arg) 1979 { 1980 pmcs_hw_t *pwp = arg; 1981 1982 DTRACE_PROBE2(pmcs__watchdog, ulong_t, pwp->work_flags, boolean_t, 1983 pwp->config_changed); 1984 1985 mutex_enter(&pwp->lock); 1986 1987 if (pwp->state != STATE_RUNNING) { 1988 mutex_exit(&pwp->lock); 1989 return; 1990 } 1991 1992 if (atomic_cas_ulong(&pwp->work_flags, 0, 0) != 0) { 1993 if (ddi_taskq_dispatch(pwp->tq, pmcs_worker, pwp, 1994 DDI_NOSLEEP) != DDI_SUCCESS) { 1995 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1996 "Could not dispatch to worker thread"); 1997 } 1998 } 1999 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 2000 drv_usectohz(PMCS_WATCH_INTERVAL)); 2001 mutex_exit(&pwp->lock); 2002 pmcs_check_commands(pwp); 2003 pmcs_handle_dead_phys(pwp); 2004 } 2005 2006 static int 2007 pmcs_remove_ihandlers(pmcs_hw_t *pwp, int icnt) 2008 { 2009 int i, r, rslt = 0; 2010 for (i = 0; i < icnt; i++) { 2011 r = ddi_intr_remove_handler(pwp->ih_table[i]); 2012 if (r == DDI_SUCCESS) { 2013 continue; 2014 } 2015 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2016 "%s: unable to remove interrupt handler %d", __func__, i); 2017 rslt = -1; 2018 break; 2019 } 2020 return (rslt); 2021 } 2022 2023 static int 2024 pmcs_disable_intrs(pmcs_hw_t *pwp, int icnt) 2025 { 2026 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2027 int r = ddi_intr_block_disable(&pwp->ih_table[0], 2028 pwp->intr_cnt); 2029 if (r != DDI_SUCCESS) { 2030 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2031 "unable to disable interrupt block"); 2032 return (-1); 2033 } 2034 } else { 2035 int i; 2036 for (i = 0; i < icnt; i++) { 2037 if (ddi_intr_disable(pwp->ih_table[i]) == DDI_SUCCESS) { 2038 continue; 2039 } 2040 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2041 "unable to disable interrupt %d", i); 2042 return (-1); 2043 } 2044 } 2045 return (0); 2046 } 2047 2048 static int 2049 pmcs_free_intrs(pmcs_hw_t *pwp, int icnt) 2050 { 2051 int i; 2052 for (i = 0; i < icnt; i++) { 2053 if (ddi_intr_free(pwp->ih_table[i]) == DDI_SUCCESS) { 2054 continue; 2055 } 2056 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2057 "unable to free interrupt %d", i); 2058 return (-1); 2059 } 2060 kmem_free(pwp->ih_table, pwp->ih_table_size); 2061 pwp->ih_table_size = 0; 2062 return (0); 2063 } 2064 2065 /* 2066 * Try to set up interrupts of type "type" with a minimum number of interrupts 2067 * of "min". 2068 */ 2069 static void 2070 pmcs_setup_intr_impl(pmcs_hw_t *pwp, int type, int min) 2071 { 2072 int rval, avail, count, actual, max; 2073 2074 rval = ddi_intr_get_nintrs(pwp->dip, type, &count); 2075 if ((rval != DDI_SUCCESS) || (count < min)) { 2076 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2077 "%s: get_nintrs failed; type: %d rc: %d count: %d min: %d", 2078 __func__, type, rval, count, min); 2079 return; 2080 } 2081 2082 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2083 "%s: nintrs = %d for type: %d", __func__, count, type); 2084 2085 rval = ddi_intr_get_navail(pwp->dip, type, &avail); 2086 if ((rval != DDI_SUCCESS) || (avail < min)) { 2087 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2088 "%s: get_navail failed; type: %d rc: %d avail: %d min: %d", 2089 __func__, type, rval, avail, min); 2090 return; 2091 } 2092 2093 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2094 "%s: navail = %d for type: %d", __func__, avail, type); 2095 2096 pwp->ih_table_size = avail * sizeof (ddi_intr_handle_t); 2097 pwp->ih_table = kmem_alloc(pwp->ih_table_size, KM_SLEEP); 2098 2099 switch (type) { 2100 case DDI_INTR_TYPE_MSIX: 2101 pwp->int_type = PMCS_INT_MSIX; 2102 max = PMCS_MAX_MSIX; 2103 break; 2104 case DDI_INTR_TYPE_MSI: 2105 pwp->int_type = PMCS_INT_MSI; 2106 max = PMCS_MAX_MSI; 2107 break; 2108 case DDI_INTR_TYPE_FIXED: 2109 default: 2110 pwp->int_type = PMCS_INT_FIXED; 2111 max = PMCS_MAX_FIXED; 2112 break; 2113 } 2114 2115 rval = ddi_intr_alloc(pwp->dip, pwp->ih_table, type, 0, max, &actual, 2116 DDI_INTR_ALLOC_NORMAL); 2117 if (rval != DDI_SUCCESS) { 2118 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2119 "%s: ddi_intr_alloc failed; type: %d rc: %d", 2120 __func__, type, rval); 2121 kmem_free(pwp->ih_table, pwp->ih_table_size); 2122 pwp->ih_table = NULL; 2123 pwp->ih_table_size = 0; 2124 pwp->intr_cnt = 0; 2125 pwp->int_type = PMCS_INT_NONE; 2126 return; 2127 } 2128 2129 pwp->intr_cnt = actual; 2130 } 2131 2132 /* 2133 * Set up interrupts. 2134 * We return one of three values: 2135 * 2136 * 0 - success 2137 * EAGAIN - failure to set up interrupts 2138 * EIO - "" + we're now stuck partly enabled 2139 * 2140 * If EIO is returned, we can't unload the driver. 2141 */ 2142 static int 2143 pmcs_setup_intr(pmcs_hw_t *pwp) 2144 { 2145 int i, r, itypes, oqv_count; 2146 ddi_intr_handler_t **iv_table; 2147 size_t iv_table_size; 2148 uint_t pri; 2149 2150 if (ddi_intr_get_supported_types(pwp->dip, &itypes) != DDI_SUCCESS) { 2151 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2152 "cannot get interrupt types"); 2153 return (EAGAIN); 2154 } 2155 2156 if (disable_msix) { 2157 itypes &= ~DDI_INTR_TYPE_MSIX; 2158 } 2159 if (disable_msi) { 2160 itypes &= ~DDI_INTR_TYPE_MSI; 2161 } 2162 2163 /* 2164 * We won't know what firmware we're running until we call pmcs_setup, 2165 * and we can't call pmcs_setup until we establish interrupts. 2166 */ 2167 2168 pwp->int_type = PMCS_INT_NONE; 2169 2170 /* 2171 * We want PMCS_MAX_MSIX vectors for MSI-X. Anything less would be 2172 * uncivilized. 2173 */ 2174 if (itypes & DDI_INTR_TYPE_MSIX) { 2175 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSIX, PMCS_MAX_MSIX); 2176 if (pwp->int_type == PMCS_INT_MSIX) { 2177 itypes = 0; 2178 } 2179 } 2180 2181 if (itypes & DDI_INTR_TYPE_MSI) { 2182 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSI, 1); 2183 if (pwp->int_type == PMCS_INT_MSI) { 2184 itypes = 0; 2185 } 2186 } 2187 2188 if (itypes & DDI_INTR_TYPE_FIXED) { 2189 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_FIXED, 1); 2190 if (pwp->int_type == PMCS_INT_FIXED) { 2191 itypes = 0; 2192 } 2193 } 2194 2195 if (pwp->intr_cnt == 0) { 2196 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2197 "No interrupts available"); 2198 return (EAGAIN); 2199 } 2200 2201 iv_table_size = sizeof (ddi_intr_handler_t *) * pwp->intr_cnt; 2202 iv_table = kmem_alloc(iv_table_size, KM_SLEEP); 2203 2204 /* 2205 * Get iblock cookie and add handlers. 2206 */ 2207 switch (pwp->intr_cnt) { 2208 case 1: 2209 iv_table[0] = pmcs_all_intr; 2210 break; 2211 case 2: 2212 iv_table[0] = pmcs_iodone_ix; 2213 iv_table[1] = pmcs_nonio_ix; 2214 break; 2215 case 4: 2216 iv_table[PMCS_MSIX_GENERAL] = pmcs_general_ix; 2217 iv_table[PMCS_MSIX_IODONE] = pmcs_iodone_ix; 2218 iv_table[PMCS_MSIX_EVENTS] = pmcs_event_ix; 2219 iv_table[PMCS_MSIX_FATAL] = pmcs_fatal_ix; 2220 break; 2221 default: 2222 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2223 "%s: intr_cnt = %d - unexpected", __func__, pwp->intr_cnt); 2224 kmem_free(iv_table, iv_table_size); 2225 return (EAGAIN); 2226 } 2227 2228 for (i = 0; i < pwp->intr_cnt; i++) { 2229 r = ddi_intr_add_handler(pwp->ih_table[i], iv_table[i], 2230 (caddr_t)pwp, NULL); 2231 if (r != DDI_SUCCESS) { 2232 kmem_free(iv_table, iv_table_size); 2233 if (pmcs_remove_ihandlers(pwp, i)) { 2234 return (EIO); 2235 } 2236 if (pmcs_free_intrs(pwp, i)) { 2237 return (EIO); 2238 } 2239 pwp->intr_cnt = 0; 2240 return (EAGAIN); 2241 } 2242 } 2243 2244 kmem_free(iv_table, iv_table_size); 2245 2246 if (ddi_intr_get_cap(pwp->ih_table[0], &pwp->intr_cap) != DDI_SUCCESS) { 2247 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2248 "unable to get int capabilities"); 2249 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2250 return (EIO); 2251 } 2252 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2253 return (EIO); 2254 } 2255 pwp->intr_cnt = 0; 2256 return (EAGAIN); 2257 } 2258 2259 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2260 r = ddi_intr_block_enable(&pwp->ih_table[0], pwp->intr_cnt); 2261 if (r != DDI_SUCCESS) { 2262 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2263 "intr blk enable failed"); 2264 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2265 return (EIO); 2266 } 2267 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2268 return (EIO); 2269 } 2270 pwp->intr_cnt = 0; 2271 return (EFAULT); 2272 } 2273 } else { 2274 for (i = 0; i < pwp->intr_cnt; i++) { 2275 r = ddi_intr_enable(pwp->ih_table[i]); 2276 if (r == DDI_SUCCESS) { 2277 continue; 2278 } 2279 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2280 "unable to enable interrupt %d", i); 2281 if (pmcs_disable_intrs(pwp, i)) { 2282 return (EIO); 2283 } 2284 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2285 return (EIO); 2286 } 2287 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2288 return (EIO); 2289 } 2290 pwp->intr_cnt = 0; 2291 return (EAGAIN); 2292 } 2293 } 2294 2295 /* 2296 * Set up locks. 2297 */ 2298 if (ddi_intr_get_pri(pwp->ih_table[0], &pri) != DDI_SUCCESS) { 2299 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2300 "unable to get interrupt priority"); 2301 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2302 return (EIO); 2303 } 2304 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2305 return (EIO); 2306 } 2307 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2308 return (EIO); 2309 } 2310 pwp->intr_cnt = 0; 2311 return (EAGAIN); 2312 } 2313 2314 pwp->locks_initted = 1; 2315 pwp->intr_pri = pri; 2316 mutex_init(&pwp->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2317 mutex_init(&pwp->dma_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2318 mutex_init(&pwp->axil_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2319 mutex_init(&pwp->cq_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2320 mutex_init(&pwp->ict_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2321 mutex_init(&pwp->config_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2322 mutex_init(&pwp->wfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2323 mutex_init(&pwp->pfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2324 mutex_init(&pwp->dead_phylist_lock, NULL, MUTEX_DRIVER, 2325 DDI_INTR_PRI(pri)); 2326 #ifdef DEBUG 2327 mutex_init(&pwp->dbglock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2328 #endif 2329 cv_init(&pwp->ict_cv, NULL, CV_DRIVER, NULL); 2330 cv_init(&pwp->drain_cv, NULL, CV_DRIVER, NULL); 2331 for (i = 0; i < PMCS_NIQ; i++) { 2332 mutex_init(&pwp->iqp_lock[i], NULL, 2333 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2334 } 2335 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 2336 mutex_init(&pwp->cq_info.cq_thr_info[i].cq_thr_lock, NULL, 2337 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2338 cv_init(&pwp->cq_info.cq_thr_info[i].cq_cv, NULL, 2339 CV_DRIVER, NULL); 2340 } 2341 2342 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%d %s interrup%s configured", 2343 pwp->intr_cnt, (pwp->int_type == PMCS_INT_MSIX)? "MSI-X" : 2344 ((pwp->int_type == PMCS_INT_MSI)? "MSI" : "INT-X"), 2345 pwp->intr_cnt == 1? "t" : "ts"); 2346 2347 2348 /* 2349 * Enable Interrupts 2350 */ 2351 if (pwp->intr_cnt > PMCS_NOQ) { 2352 oqv_count = pwp->intr_cnt; 2353 } else { 2354 oqv_count = PMCS_NOQ; 2355 } 2356 for (pri = 0xffffffff, i = 0; i < oqv_count; i++) { 2357 pri ^= (1 << i); 2358 } 2359 2360 mutex_enter(&pwp->lock); 2361 pwp->intr_mask = pri; 2362 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask); 2363 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 2364 mutex_exit(&pwp->lock); 2365 2366 return (0); 2367 } 2368 2369 static int 2370 pmcs_teardown_intr(pmcs_hw_t *pwp) 2371 { 2372 if (pwp->intr_cnt) { 2373 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2374 return (EIO); 2375 } 2376 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2377 return (EIO); 2378 } 2379 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2380 return (EIO); 2381 } 2382 pwp->intr_cnt = 0; 2383 } 2384 return (0); 2385 } 2386 2387 static uint_t 2388 pmcs_general_ix(caddr_t arg1, caddr_t arg2) 2389 { 2390 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2391 _NOTE(ARGUNUSED(arg2)); 2392 pmcs_general_intr(pwp); 2393 return (DDI_INTR_CLAIMED); 2394 } 2395 2396 static uint_t 2397 pmcs_event_ix(caddr_t arg1, caddr_t arg2) 2398 { 2399 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2400 _NOTE(ARGUNUSED(arg2)); 2401 pmcs_event_intr(pwp); 2402 return (DDI_INTR_CLAIMED); 2403 } 2404 2405 static uint_t 2406 pmcs_iodone_ix(caddr_t arg1, caddr_t arg2) 2407 { 2408 _NOTE(ARGUNUSED(arg2)); 2409 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2410 2411 /* 2412 * It's possible that if we just turned interrupt coalescing off 2413 * (and thus, re-enabled auto clear for interrupts on the I/O outbound 2414 * queue) that there was an interrupt already pending. We use 2415 * io_intr_coal.int_cleared to ensure that we still drop in here and 2416 * clear the appropriate interrupt bit one last time. 2417 */ 2418 mutex_enter(&pwp->ict_lock); 2419 if (pwp->io_intr_coal.timer_on || 2420 (pwp->io_intr_coal.int_cleared == B_FALSE)) { 2421 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2422 (1 << PMCS_OQ_IODONE)); 2423 pwp->io_intr_coal.int_cleared = B_TRUE; 2424 } 2425 mutex_exit(&pwp->ict_lock); 2426 2427 pmcs_iodone_intr(pwp); 2428 2429 return (DDI_INTR_CLAIMED); 2430 } 2431 2432 static uint_t 2433 pmcs_fatal_ix(caddr_t arg1, caddr_t arg2) 2434 { 2435 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2436 _NOTE(ARGUNUSED(arg2)); 2437 pmcs_fatal_handler(pwp); 2438 return (DDI_INTR_CLAIMED); 2439 } 2440 2441 static uint_t 2442 pmcs_nonio_ix(caddr_t arg1, caddr_t arg2) 2443 { 2444 _NOTE(ARGUNUSED(arg2)); 2445 pmcs_hw_t *pwp = (void *)arg1; 2446 uint32_t obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2447 2448 /* 2449 * Check for Fatal Interrupts 2450 */ 2451 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2452 pmcs_fatal_handler(pwp); 2453 return (DDI_INTR_CLAIMED); 2454 } 2455 2456 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2457 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2458 (1 << PMCS_OQ_GENERAL)); 2459 pmcs_general_intr(pwp); 2460 pmcs_event_intr(pwp); 2461 } 2462 2463 return (DDI_INTR_CLAIMED); 2464 } 2465 2466 static uint_t 2467 pmcs_all_intr(caddr_t arg1, caddr_t arg2) 2468 { 2469 _NOTE(ARGUNUSED(arg2)); 2470 pmcs_hw_t *pwp = (void *) arg1; 2471 uint32_t obdb; 2472 int handled = 0; 2473 2474 obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2475 2476 /* 2477 * Check for Fatal Interrupts 2478 */ 2479 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2480 pmcs_fatal_handler(pwp); 2481 return (DDI_INTR_CLAIMED); 2482 } 2483 2484 /* 2485 * Check for Outbound Queue service needed 2486 */ 2487 if (obdb & (1 << PMCS_OQ_IODONE)) { 2488 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2489 (1 << PMCS_OQ_IODONE)); 2490 obdb ^= (1 << PMCS_OQ_IODONE); 2491 handled++; 2492 pmcs_iodone_intr(pwp); 2493 } 2494 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2495 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2496 (1 << PMCS_OQ_GENERAL)); 2497 obdb ^= (1 << PMCS_OQ_GENERAL); 2498 handled++; 2499 pmcs_general_intr(pwp); 2500 } 2501 if (obdb & (1 << PMCS_OQ_EVENTS)) { 2502 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2503 (1 << PMCS_OQ_EVENTS)); 2504 obdb ^= (1 << PMCS_OQ_EVENTS); 2505 handled++; 2506 pmcs_event_intr(pwp); 2507 } 2508 if (obdb) { 2509 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2510 "interrupt bits not handled (0x%x)", obdb); 2511 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, obdb); 2512 handled++; 2513 } 2514 if (pwp->int_type == PMCS_INT_MSI) { 2515 handled++; 2516 } 2517 return (handled? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 2518 } 2519 2520 void 2521 pmcs_fatal_handler(pmcs_hw_t *pwp) 2522 { 2523 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, "Fatal Interrupt caught"); 2524 mutex_enter(&pwp->lock); 2525 pwp->state = STATE_DEAD; 2526 pmcs_register_dump_int(pwp); 2527 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 2528 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 2529 mutex_exit(&pwp->lock); 2530 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE); 2531 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 2532 2533 #ifdef DEBUG 2534 cmn_err(CE_PANIC, "PMCS Fatal Firmware Error"); 2535 #endif 2536 } 2537 2538 /* 2539 * Called with PHY lock and target statlock held and scratch acquired. 2540 */ 2541 boolean_t 2542 pmcs_assign_device(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt) 2543 { 2544 pmcs_phy_t *pptr = tgt->phy; 2545 2546 switch (pptr->dtype) { 2547 case SAS: 2548 case EXPANDER: 2549 break; 2550 case SATA: 2551 tgt->ca = 1; 2552 break; 2553 default: 2554 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2555 "%s: Target %p has PHY %p with invalid dtype", 2556 __func__, (void *)tgt, (void *)pptr); 2557 return (B_FALSE); 2558 } 2559 2560 tgt->new = 1; 2561 tgt->dev_gone = 0; 2562 tgt->recover_wait = 0; 2563 2564 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2565 "%s: config %s vtgt %u for " SAS_ADDR_FMT, __func__, 2566 pptr->path, tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2567 2568 if (pmcs_add_new_device(pwp, tgt) != B_TRUE) { 2569 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2570 "%s: Failed for vtgt %u / WWN " SAS_ADDR_FMT, __func__, 2571 tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2572 mutex_destroy(&tgt->statlock); 2573 mutex_destroy(&tgt->wqlock); 2574 mutex_destroy(&tgt->aqlock); 2575 return (B_FALSE); 2576 } 2577 2578 return (B_TRUE); 2579 } 2580 2581 /* 2582 * Called with softstate lock held 2583 */ 2584 void 2585 pmcs_remove_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2586 { 2587 pmcs_xscsi_t *xp; 2588 unsigned int vtgt; 2589 2590 ASSERT(mutex_owned(&pwp->lock)); 2591 2592 for (vtgt = 0; vtgt < pwp->max_dev; vtgt++) { 2593 xp = pwp->targets[vtgt]; 2594 if (xp == NULL) { 2595 continue; 2596 } 2597 2598 mutex_enter(&xp->statlock); 2599 if (xp->phy == pptr) { 2600 if (xp->new) { 2601 xp->new = 0; 2602 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2603 "cancel config of vtgt %u", vtgt); 2604 } else { 2605 pmcs_clear_xp(pwp, xp); 2606 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2607 "Removed tgt 0x%p vtgt %u", 2608 (void *)xp, vtgt); 2609 } 2610 mutex_exit(&xp->statlock); 2611 break; 2612 } 2613 mutex_exit(&xp->statlock); 2614 } 2615 } 2616 2617 void 2618 pmcs_prt_impl(pmcs_hw_t *pwp, pmcs_prt_level_t level, 2619 pmcs_phy_t *phyp, pmcs_xscsi_t *target, const char *fmt, ...) 2620 { 2621 va_list ap; 2622 int written = 0; 2623 char *ptr; 2624 uint32_t elem_size = PMCS_TBUF_ELEM_SIZE - 1; 2625 boolean_t system_log; 2626 int system_log_level; 2627 2628 switch (level) { 2629 case PMCS_PRT_DEBUG_DEVEL: 2630 case PMCS_PRT_DEBUG_DEV_STATE: 2631 case PMCS_PRT_DEBUG_PHY_LOCKING: 2632 case PMCS_PRT_DEBUG_SCSI_STATUS: 2633 case PMCS_PRT_DEBUG_UNDERFLOW: 2634 case PMCS_PRT_DEBUG_CONFIG: 2635 case PMCS_PRT_DEBUG_IPORT: 2636 case PMCS_PRT_DEBUG_MAP: 2637 case PMCS_PRT_DEBUG3: 2638 case PMCS_PRT_DEBUG2: 2639 case PMCS_PRT_DEBUG1: 2640 case PMCS_PRT_DEBUG: 2641 system_log = B_FALSE; 2642 break; 2643 case PMCS_PRT_INFO: 2644 system_log = B_TRUE; 2645 system_log_level = CE_CONT; 2646 break; 2647 case PMCS_PRT_WARN: 2648 system_log = B_TRUE; 2649 system_log_level = CE_NOTE; 2650 break; 2651 case PMCS_PRT_ERR: 2652 system_log = B_TRUE; 2653 system_log_level = CE_WARN; 2654 break; 2655 default: 2656 return; 2657 } 2658 2659 mutex_enter(&pmcs_trace_lock); 2660 gethrestime(&pmcs_tbuf_ptr->timestamp); 2661 ptr = pmcs_tbuf_ptr->buf; 2662 2663 /* 2664 * Store the pertinent PHY and target information if there is any 2665 */ 2666 if (target == NULL) { 2667 pmcs_tbuf_ptr->target_num = PMCS_INVALID_TARGET_NUM; 2668 pmcs_tbuf_ptr->target_ua[0] = '\0'; 2669 } else { 2670 pmcs_tbuf_ptr->target_num = target->target_num; 2671 (void) strncpy(pmcs_tbuf_ptr->target_ua, target->ua, 2672 PMCS_TBUF_UA_MAX_SIZE); 2673 } 2674 2675 if (phyp == NULL) { 2676 (void) memset(pmcs_tbuf_ptr->phy_sas_address, 0, 8); 2677 pmcs_tbuf_ptr->phy_path[0] = '\0'; 2678 pmcs_tbuf_ptr->phy_dtype = NOTHING; 2679 } else { 2680 (void) memcpy(pmcs_tbuf_ptr->phy_sas_address, 2681 phyp->sas_address, 8); 2682 (void) strncpy(pmcs_tbuf_ptr->phy_path, phyp->path, 32); 2683 pmcs_tbuf_ptr->phy_dtype = phyp->dtype; 2684 } 2685 2686 written += snprintf(ptr, elem_size, "pmcs%d:%d: ", 2687 ddi_get_instance(pwp->dip), level); 2688 ptr += strlen(ptr); 2689 va_start(ap, fmt); 2690 written += vsnprintf(ptr, elem_size - written, fmt, ap); 2691 va_end(ap); 2692 if (written > elem_size - 1) { 2693 /* Indicate truncation */ 2694 pmcs_tbuf_ptr->buf[elem_size - 1] = '+'; 2695 } 2696 if (++pmcs_tbuf_idx == pmcs_tbuf_num_elems) { 2697 pmcs_tbuf_ptr = pmcs_tbuf; 2698 pmcs_tbuf_wrap = B_TRUE; 2699 pmcs_tbuf_idx = 0; 2700 } else { 2701 ++pmcs_tbuf_ptr; 2702 } 2703 mutex_exit(&pmcs_trace_lock); 2704 2705 /* 2706 * When pmcs_force_syslog in non-zero, everything goes also 2707 * to syslog, at CE_CONT level. 2708 */ 2709 if (pmcs_force_syslog) { 2710 system_log = B_TRUE; 2711 system_log_level = CE_CONT; 2712 } 2713 2714 /* 2715 * Anything that comes in with PMCS_PRT_INFO, WARN, or ERR also 2716 * goes to syslog. 2717 */ 2718 if (system_log) { 2719 char local[196]; 2720 2721 switch (system_log_level) { 2722 case CE_CONT: 2723 (void) snprintf(local, sizeof (local), "%sINFO: ", 2724 pmcs_console ? "" : "?"); 2725 break; 2726 case CE_NOTE: 2727 case CE_WARN: 2728 local[0] = 0; 2729 break; 2730 default: 2731 return; 2732 } 2733 2734 ptr = local; 2735 ptr += strlen(local); 2736 (void) snprintf(ptr, (sizeof (local)) - 2737 ((size_t)ptr - (size_t)local), "pmcs%d: ", 2738 ddi_get_instance(pwp->dip)); 2739 ptr += strlen(ptr); 2740 va_start(ap, fmt); 2741 (void) vsnprintf(ptr, 2742 (sizeof (local)) - ((size_t)ptr - (size_t)local), fmt, ap); 2743 va_end(ap); 2744 if (level == CE_CONT) { 2745 (void) strlcat(local, "\n", sizeof (local)); 2746 } 2747 cmn_err(system_log_level, local); 2748 } 2749 2750 } 2751 2752 /* 2753 * pmcs_acquire_scratch 2754 * 2755 * If "wait" is true, the caller will wait until it can acquire the scratch. 2756 * This implies the caller needs to be in a context where spinning for an 2757 * indeterminate amount of time is acceptable. 2758 */ 2759 int 2760 pmcs_acquire_scratch(pmcs_hw_t *pwp, boolean_t wait) 2761 { 2762 int rval; 2763 2764 if (!wait) { 2765 return (atomic_swap_8(&pwp->scratch_locked, 1)); 2766 } 2767 2768 /* 2769 * Caller will wait for scratch. 2770 */ 2771 while ((rval = atomic_swap_8(&pwp->scratch_locked, 1)) != 0) { 2772 drv_usecwait(100); 2773 } 2774 2775 return (rval); 2776 } 2777 2778 void 2779 pmcs_release_scratch(pmcs_hw_t *pwp) 2780 { 2781 pwp->scratch_locked = 0; 2782 } 2783 2784 static void 2785 pmcs_create_phy_stats(pmcs_iport_t *iport) 2786 { 2787 sas_phy_stats_t *ps; 2788 pmcs_hw_t *pwp; 2789 pmcs_phy_t *phyp; 2790 int ndata; 2791 char ks_name[KSTAT_STRLEN]; 2792 2793 ASSERT(iport != NULL); 2794 pwp = iport->pwp; 2795 ASSERT(pwp != NULL); 2796 2797 mutex_enter(&iport->lock); 2798 2799 for (phyp = list_head(&iport->phys); 2800 phyp != NULL; 2801 phyp = list_next(&iport->phys, phyp)) { 2802 2803 pmcs_lock_phy(phyp); 2804 2805 if (phyp->phy_stats != NULL) { 2806 pmcs_unlock_phy(phyp); 2807 /* We've already created this kstat instance */ 2808 continue; 2809 } 2810 2811 ndata = (sizeof (sas_phy_stats_t)/sizeof (kstat_named_t)); 2812 2813 (void) snprintf(ks_name, sizeof (ks_name), 2814 "%s.%llx.%d.%d", ddi_driver_name(iport->dip), 2815 (longlong_t)pwp->sas_wwns[0], 2816 ddi_get_instance(iport->dip), phyp->phynum); 2817 2818 phyp->phy_stats = kstat_create("pmcs", 2819 ddi_get_instance(iport->dip), ks_name, KSTAT_SAS_PHY_CLASS, 2820 KSTAT_TYPE_NAMED, ndata, 0); 2821 2822 if (phyp->phy_stats == NULL) { 2823 pmcs_unlock_phy(phyp); 2824 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2825 "%s: Failed to create %s kstats", __func__, 2826 ks_name); 2827 continue; 2828 } 2829 2830 ps = (sas_phy_stats_t *)phyp->phy_stats->ks_data; 2831 2832 kstat_named_init(&ps->seconds_since_last_reset, 2833 "SecondsSinceLastReset", KSTAT_DATA_ULONGLONG); 2834 kstat_named_init(&ps->tx_frames, 2835 "TxFrames", KSTAT_DATA_ULONGLONG); 2836 kstat_named_init(&ps->rx_frames, 2837 "RxFrames", KSTAT_DATA_ULONGLONG); 2838 kstat_named_init(&ps->tx_words, 2839 "TxWords", KSTAT_DATA_ULONGLONG); 2840 kstat_named_init(&ps->rx_words, 2841 "RxWords", KSTAT_DATA_ULONGLONG); 2842 kstat_named_init(&ps->invalid_dword_count, 2843 "InvalidDwordCount", KSTAT_DATA_ULONGLONG); 2844 kstat_named_init(&ps->running_disparity_error_count, 2845 "RunningDisparityErrorCount", KSTAT_DATA_ULONGLONG); 2846 kstat_named_init(&ps->loss_of_dword_sync_count, 2847 "LossofDwordSyncCount", KSTAT_DATA_ULONGLONG); 2848 kstat_named_init(&ps->phy_reset_problem_count, 2849 "PhyResetProblemCount", KSTAT_DATA_ULONGLONG); 2850 2851 phyp->phy_stats->ks_private = phyp; 2852 phyp->phy_stats->ks_update = pmcs_update_phy_stats; 2853 kstat_install(phyp->phy_stats); 2854 pmcs_unlock_phy(phyp); 2855 } 2856 2857 mutex_exit(&iport->lock); 2858 } 2859 2860 int 2861 pmcs_update_phy_stats(kstat_t *ks, int rw) 2862 { 2863 int val, ret = DDI_FAILURE; 2864 pmcs_phy_t *pptr = (pmcs_phy_t *)ks->ks_private; 2865 pmcs_hw_t *pwp = pptr->pwp; 2866 sas_phy_stats_t *ps = ks->ks_data; 2867 2868 _NOTE(ARGUNUSED(rw)); 2869 ASSERT((pptr != NULL) && (pwp != NULL)); 2870 2871 /* 2872 * We just want to lock against other invocations of kstat; 2873 * we don't need to pmcs_lock_phy() for this. 2874 */ 2875 mutex_enter(&pptr->phy_lock); 2876 2877 /* Get Stats from Chip */ 2878 val = pmcs_get_diag_report(pwp, PMCS_INVALID_DWORD_CNT, pptr->phynum); 2879 if (val == DDI_FAILURE) 2880 goto fail; 2881 ps->invalid_dword_count.value.ull = (unsigned long long)val; 2882 2883 val = pmcs_get_diag_report(pwp, PMCS_DISPARITY_ERR_CNT, pptr->phynum); 2884 if (val == DDI_FAILURE) 2885 goto fail; 2886 ps->running_disparity_error_count.value.ull = (unsigned long long)val; 2887 2888 val = pmcs_get_diag_report(pwp, PMCS_LOST_DWORD_SYNC_CNT, pptr->phynum); 2889 if (val == DDI_FAILURE) 2890 goto fail; 2891 ps->loss_of_dword_sync_count.value.ull = (unsigned long long)val; 2892 2893 val = pmcs_get_diag_report(pwp, PMCS_RESET_FAILED_CNT, pptr->phynum); 2894 if (val == DDI_FAILURE) 2895 goto fail; 2896 ps->phy_reset_problem_count.value.ull = (unsigned long long)val; 2897 2898 ret = DDI_SUCCESS; 2899 fail: 2900 mutex_exit(&pptr->phy_lock); 2901 return (ret); 2902 } 2903 2904 static void 2905 pmcs_destroy_phy_stats(pmcs_iport_t *iport) 2906 { 2907 pmcs_phy_t *phyp; 2908 2909 ASSERT(iport != NULL); 2910 mutex_enter(&iport->lock); 2911 phyp = iport->pptr; 2912 if (phyp == NULL) { 2913 mutex_exit(&iport->lock); 2914 return; 2915 } 2916 2917 pmcs_lock_phy(phyp); 2918 if (phyp->phy_stats != NULL) { 2919 kstat_delete(phyp->phy_stats); 2920 phyp->phy_stats = NULL; 2921 } 2922 pmcs_unlock_phy(phyp); 2923 2924 mutex_exit(&iport->lock); 2925 } 2926 2927 /*ARGSUSED*/ 2928 static int 2929 pmcs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2930 { 2931 /* 2932 * as the driver can always deal with an error in any dma or 2933 * access handle, we can just return the fme_status value. 2934 */ 2935 pci_ereport_post(dip, err, NULL); 2936 return (err->fme_status); 2937 } 2938 2939 static void 2940 pmcs_fm_init(pmcs_hw_t *pwp) 2941 { 2942 ddi_iblock_cookie_t fm_ibc; 2943 2944 /* Only register with IO Fault Services if we have some capability */ 2945 if (pwp->fm_capabilities) { 2946 /* Adjust access and dma attributes for FMA */ 2947 pwp->reg_acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC; 2948 pwp->dev_acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC; 2949 pwp->iqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2950 pwp->oqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2951 pwp->cip_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2952 pwp->fwlog_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2953 2954 /* 2955 * Register capabilities with IO Fault Services. 2956 */ 2957 ddi_fm_init(pwp->dip, &pwp->fm_capabilities, &fm_ibc); 2958 2959 /* 2960 * Initialize pci ereport capabilities if ereport 2961 * capable (should always be.) 2962 */ 2963 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 2964 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2965 pci_ereport_setup(pwp->dip); 2966 } 2967 2968 /* 2969 * Register error callback if error callback capable. 2970 */ 2971 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2972 ddi_fm_handler_register(pwp->dip, 2973 pmcs_fm_error_cb, (void *) pwp); 2974 } 2975 } 2976 } 2977 2978 static void 2979 pmcs_fm_fini(pmcs_hw_t *pwp) 2980 { 2981 /* Only unregister FMA capabilities if registered */ 2982 if (pwp->fm_capabilities) { 2983 /* 2984 * Un-register error callback if error callback capable. 2985 */ 2986 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2987 ddi_fm_handler_unregister(pwp->dip); 2988 } 2989 2990 /* 2991 * Release any resources allocated by pci_ereport_setup() 2992 */ 2993 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 2994 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2995 pci_ereport_teardown(pwp->dip); 2996 } 2997 2998 /* Unregister from IO Fault Services */ 2999 ddi_fm_fini(pwp->dip); 3000 3001 /* Adjust access and dma attributes for FMA */ 3002 pwp->reg_acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC; 3003 pwp->dev_acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC; 3004 pwp->iqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3005 pwp->oqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3006 pwp->cip_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3007 pwp->fwlog_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3008 } 3009 } 3010 3011 static boolean_t 3012 pmcs_fabricate_wwid(pmcs_hw_t *pwp) 3013 { 3014 char *cp, c; 3015 uint64_t adr; 3016 int i; 3017 3018 cp = &c; 3019 (void) ddi_strtoul(hw_serial, &cp, 10, (unsigned long *)&adr); 3020 if (adr == 0) { 3021 static const char foo[] = __DATE__ __TIME__; 3022 /* Oh, dear, we're toast */ 3023 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 3024 "%s: No serial number available to fabricate WWN", 3025 __func__); 3026 for (i = 0; foo[i]; i++) { 3027 adr += foo[i]; 3028 } 3029 } 3030 adr <<= 8; 3031 adr |= ((uint64_t)ddi_get_instance(pwp->dip) << 52); 3032 adr |= (5ULL << 60); 3033 for (i = 0; i < PMCS_MAX_PORTS; i++) { 3034 pwp->sas_wwns[i] = adr + i; 3035 } 3036 3037 return (B_TRUE); 3038 } 3039