1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 #include <sys/scsi/adapters/pmcs/pmcs.h> 26 27 #define PMCS_DRIVER_VERSION "pmcs HBA device driver" 28 29 static char *pmcs_driver_rev = PMCS_DRIVER_VERSION; 30 31 /* 32 * Non-DDI Compliant stuff 33 */ 34 extern char hw_serial[]; 35 36 /* 37 * Global driver data 38 */ 39 void *pmcs_softc_state = NULL; 40 void *pmcs_iport_softstate = NULL; 41 42 /* 43 * Tracing and Logging info 44 */ 45 pmcs_tbuf_t *pmcs_tbuf = NULL; 46 uint32_t pmcs_tbuf_num_elems = 0; 47 pmcs_tbuf_t *pmcs_tbuf_ptr; 48 uint32_t pmcs_tbuf_idx = 0; 49 boolean_t pmcs_tbuf_wrap = B_FALSE; 50 static kmutex_t pmcs_trace_lock; 51 52 /* 53 * If pmcs_force_syslog value is non-zero, all messages put in the trace log 54 * will also be sent to system log. 55 */ 56 int pmcs_force_syslog = 0; 57 int pmcs_console = 0; 58 59 /* 60 * External References 61 */ 62 extern int ncpus_online; 63 64 /* 65 * Local static data 66 */ 67 static int fwlog_level = 3; 68 static int physpeed = PHY_LINK_ALL; 69 static int phymode = PHY_LM_AUTO; 70 static int block_mask = 0; 71 static int phymap_usec = 3 * MICROSEC; 72 static int iportmap_usec = 2 * MICROSEC; 73 74 #ifdef DEBUG 75 static int debug_mask = 1; 76 #else 77 static int debug_mask = 0; 78 #endif 79 80 #ifdef DISABLE_MSIX 81 static int disable_msix = 1; 82 #else 83 static int disable_msix = 0; 84 #endif 85 86 #ifdef DISABLE_MSI 87 static int disable_msi = 1; 88 #else 89 static int disable_msi = 0; 90 #endif 91 92 static uint16_t maxqdepth = 0xfffe; 93 94 /* 95 * Local prototypes 96 */ 97 static int pmcs_attach(dev_info_t *, ddi_attach_cmd_t); 98 static int pmcs_detach(dev_info_t *, ddi_detach_cmd_t); 99 static int pmcs_unattach(pmcs_hw_t *); 100 static int pmcs_iport_unattach(pmcs_iport_t *); 101 static int pmcs_add_more_chunks(pmcs_hw_t *, unsigned long); 102 static void pmcs_watchdog(void *); 103 static int pmcs_setup_intr(pmcs_hw_t *); 104 static int pmcs_teardown_intr(pmcs_hw_t *); 105 106 static uint_t pmcs_nonio_ix(caddr_t, caddr_t); 107 static uint_t pmcs_general_ix(caddr_t, caddr_t); 108 static uint_t pmcs_event_ix(caddr_t, caddr_t); 109 static uint_t pmcs_iodone_ix(caddr_t, caddr_t); 110 static uint_t pmcs_fatal_ix(caddr_t, caddr_t); 111 static uint_t pmcs_all_intr(caddr_t, caddr_t); 112 static int pmcs_quiesce(dev_info_t *dip); 113 static boolean_t pmcs_fabricate_wwid(pmcs_hw_t *); 114 115 static void pmcs_create_phy_stats(pmcs_iport_t *); 116 int pmcs_update_phy_stats(kstat_t *, int); 117 static void pmcs_destroy_phy_stats(pmcs_iport_t *); 118 119 static void pmcs_fm_fini(pmcs_hw_t *pwp); 120 static void pmcs_fm_init(pmcs_hw_t *pwp); 121 static int pmcs_fm_error_cb(dev_info_t *dip, 122 ddi_fm_error_t *err, const void *impl_data); 123 124 /* 125 * Local configuration data 126 */ 127 static struct dev_ops pmcs_ops = { 128 DEVO_REV, /* devo_rev, */ 129 0, /* refcnt */ 130 ddi_no_info, /* info */ 131 nulldev, /* identify */ 132 nulldev, /* probe */ 133 pmcs_attach, /* attach */ 134 pmcs_detach, /* detach */ 135 nodev, /* reset */ 136 NULL, /* driver operations */ 137 NULL, /* bus operations */ 138 ddi_power, /* power management */ 139 pmcs_quiesce /* quiesce */ 140 }; 141 142 static struct modldrv modldrv = { 143 &mod_driverops, 144 PMCS_DRIVER_VERSION, 145 &pmcs_ops, /* driver ops */ 146 }; 147 static struct modlinkage modlinkage = { 148 MODREV_1, &modldrv, NULL 149 }; 150 151 const ddi_dma_attr_t pmcs_dattr = { 152 DMA_ATTR_V0, /* dma_attr version */ 153 0x0000000000000000ull, /* dma_attr_addr_lo */ 154 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 155 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 156 0x0000000000000001ull, /* dma_attr_align */ 157 0x00000078, /* dma_attr_burstsizes */ 158 0x00000001, /* dma_attr_minxfer */ 159 0x00000000FFFFFFFFull, /* dma_attr_maxxfer */ 160 0x00000000FFFFFFFFull, /* dma_attr_seg */ 161 1, /* dma_attr_sgllen */ 162 512, /* dma_attr_granular */ 163 0 /* dma_attr_flags */ 164 }; 165 166 static ddi_device_acc_attr_t rattr = { 167 DDI_DEVICE_ATTR_V1, 168 DDI_STRUCTURE_LE_ACC, 169 DDI_STRICTORDER_ACC, 170 DDI_DEFAULT_ACC 171 }; 172 173 174 /* 175 * Attach/Detach functions 176 */ 177 178 int 179 _init(void) 180 { 181 int ret; 182 183 ret = ddi_soft_state_init(&pmcs_softc_state, sizeof (pmcs_hw_t), 1); 184 if (ret != 0) { 185 cmn_err(CE_WARN, "?soft state init failed for pmcs"); 186 return (ret); 187 } 188 189 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 190 cmn_err(CE_WARN, "?scsi_hba_init failed for pmcs"); 191 ddi_soft_state_fini(&pmcs_softc_state); 192 return (ret); 193 } 194 195 /* 196 * Allocate soft state for iports 197 */ 198 ret = ddi_soft_state_init(&pmcs_iport_softstate, 199 sizeof (pmcs_iport_t), 2); 200 if (ret != 0) { 201 cmn_err(CE_WARN, "?iport soft state init failed for pmcs"); 202 ddi_soft_state_fini(&pmcs_softc_state); 203 return (ret); 204 } 205 206 ret = mod_install(&modlinkage); 207 if (ret != 0) { 208 cmn_err(CE_WARN, "?mod_install failed for pmcs (%d)", ret); 209 scsi_hba_fini(&modlinkage); 210 ddi_soft_state_fini(&pmcs_iport_softstate); 211 ddi_soft_state_fini(&pmcs_softc_state); 212 return (ret); 213 } 214 215 /* Initialize the global trace lock */ 216 mutex_init(&pmcs_trace_lock, NULL, MUTEX_DRIVER, NULL); 217 218 return (0); 219 } 220 221 int 222 _fini(void) 223 { 224 int ret; 225 if ((ret = mod_remove(&modlinkage)) != 0) { 226 return (ret); 227 } 228 scsi_hba_fini(&modlinkage); 229 230 /* Free pmcs log buffer and destroy the global lock */ 231 if (pmcs_tbuf) { 232 kmem_free(pmcs_tbuf, 233 pmcs_tbuf_num_elems * sizeof (pmcs_tbuf_t)); 234 pmcs_tbuf = NULL; 235 } 236 mutex_destroy(&pmcs_trace_lock); 237 238 ddi_soft_state_fini(&pmcs_iport_softstate); 239 ddi_soft_state_fini(&pmcs_softc_state); 240 return (0); 241 } 242 243 int 244 _info(struct modinfo *modinfop) 245 { 246 return (mod_info(&modlinkage, modinfop)); 247 } 248 249 static int 250 pmcs_iport_attach(dev_info_t *dip) 251 { 252 pmcs_iport_t *iport; 253 pmcs_hw_t *pwp; 254 scsi_hba_tran_t *tran; 255 void *ua_priv = NULL; 256 char *iport_ua; 257 char *init_port; 258 int hba_inst; 259 int inst; 260 261 hba_inst = ddi_get_instance(ddi_get_parent(dip)); 262 inst = ddi_get_instance(dip); 263 264 pwp = ddi_get_soft_state(pmcs_softc_state, hba_inst); 265 if (pwp == NULL) { 266 cmn_err(CE_WARN, "%s: No HBA softstate for instance %d", 267 __func__, inst); 268 return (DDI_FAILURE); 269 } 270 271 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) { 272 return (DDI_FAILURE); 273 } 274 275 if ((iport_ua = scsi_hba_iport_unit_address(dip)) == NULL) { 276 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 277 "%s: invoked with NULL unit address, inst (%d)", 278 __func__, inst); 279 return (DDI_FAILURE); 280 } 281 282 if (ddi_soft_state_zalloc(pmcs_iport_softstate, inst) != DDI_SUCCESS) { 283 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 284 "Failed to alloc soft state for iport %d", inst); 285 return (DDI_FAILURE); 286 } 287 288 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 289 if (iport == NULL) { 290 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 291 "cannot get iport soft state"); 292 goto iport_attach_fail1; 293 } 294 295 mutex_init(&iport->lock, NULL, MUTEX_DRIVER, 296 DDI_INTR_PRI(pwp->intr_pri)); 297 cv_init(&iport->refcnt_cv, NULL, CV_DEFAULT, NULL); 298 cv_init(&iport->smp_cv, NULL, CV_DEFAULT, NULL); 299 mutex_init(&iport->refcnt_lock, NULL, MUTEX_DRIVER, 300 DDI_INTR_PRI(pwp->intr_pri)); 301 mutex_init(&iport->smp_lock, NULL, MUTEX_DRIVER, 302 DDI_INTR_PRI(pwp->intr_pri)); 303 304 /* Set some data on the iport handle */ 305 iport->dip = dip; 306 iport->pwp = pwp; 307 308 /* Dup the UA into the iport handle */ 309 iport->ua = strdup(iport_ua); 310 311 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 312 tran->tran_hba_private = iport; 313 314 list_create(&iport->phys, sizeof (pmcs_phy_t), 315 offsetof(pmcs_phy_t, list_node)); 316 317 /* 318 * If our unit address is active in the phymap, configure our 319 * iport's phylist. 320 */ 321 mutex_enter(&iport->lock); 322 ua_priv = sas_phymap_lookup_uapriv(pwp->hss_phymap, iport->ua); 323 if (ua_priv) { 324 /* Non-NULL private data indicates the unit address is active */ 325 iport->ua_state = UA_ACTIVE; 326 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) { 327 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 328 "%s: failed to " 329 "configure phys on iport handle (0x%p), " 330 " unit address [%s]", __func__, 331 (void *)iport, iport_ua); 332 mutex_exit(&iport->lock); 333 goto iport_attach_fail2; 334 } 335 } else { 336 iport->ua_state = UA_INACTIVE; 337 } 338 mutex_exit(&iport->lock); 339 340 /* Allocate string-based soft state pool for targets */ 341 iport->tgt_sstate = NULL; 342 if (ddi_soft_state_bystr_init(&iport->tgt_sstate, 343 sizeof (pmcs_xscsi_t), PMCS_TGT_SSTATE_SZ) != 0) { 344 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 345 "cannot get iport tgt soft state"); 346 goto iport_attach_fail2; 347 } 348 349 /* Create this iport's target map */ 350 if (pmcs_iport_tgtmap_create(iport) == B_FALSE) { 351 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 352 "Failed to create tgtmap on iport %d", inst); 353 goto iport_attach_fail3; 354 } 355 356 /* Set up the 'initiator-port' DDI property on this iport */ 357 init_port = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP); 358 if (pwp->separate_ports) { 359 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 360 "%s: separate ports not supported", __func__); 361 } else { 362 /* Set initiator-port value to the HBA's base WWN */ 363 (void) scsi_wwn_to_wwnstr(pwp->sas_wwns[0], 1, 364 init_port); 365 } 366 367 mutex_enter(&iport->lock); 368 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_STRING, 369 SCSI_ADDR_PROP_INITIATOR_PORT, init_port); 370 kmem_free(init_port, PMCS_MAX_UA_SIZE); 371 372 /* Set up a 'num-phys' DDI property for the iport node */ 373 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 374 &iport->nphy); 375 mutex_exit(&iport->lock); 376 377 /* Create kstats for each of the phys in this port */ 378 pmcs_create_phy_stats(iport); 379 380 /* 381 * Insert this iport handle into our list and set 382 * iports_attached on the HBA node. 383 */ 384 rw_enter(&pwp->iports_lock, RW_WRITER); 385 ASSERT(!list_link_active(&iport->list_node)); 386 list_insert_tail(&pwp->iports, iport); 387 pwp->iports_attached = 1; 388 pwp->num_iports++; 389 rw_exit(&pwp->iports_lock); 390 391 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 392 "iport%d attached", inst); 393 ddi_report_dev(dip); 394 return (DDI_SUCCESS); 395 396 /* teardown and fail */ 397 iport_attach_fail3: 398 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 399 iport_attach_fail2: 400 list_destroy(&iport->phys); 401 strfree(iport->ua); 402 mutex_destroy(&iport->refcnt_lock); 403 mutex_destroy(&iport->smp_lock); 404 cv_destroy(&iport->refcnt_cv); 405 cv_destroy(&iport->smp_cv); 406 mutex_destroy(&iport->lock); 407 iport_attach_fail1: 408 ddi_soft_state_free(pmcs_iport_softstate, inst); 409 return (DDI_FAILURE); 410 } 411 412 static int 413 pmcs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 414 { 415 scsi_hba_tran_t *tran; 416 char chiprev, *fwsupport, hw_rev[24], fw_rev[24]; 417 off_t set3size; 418 int inst, i; 419 int sm_hba = 1; 420 int protocol = 0; 421 int num_phys = 0; 422 pmcs_hw_t *pwp; 423 pmcs_phy_t *phyp; 424 uint32_t num_threads; 425 char buf[64]; 426 char *fwl_file; 427 428 switch (cmd) { 429 case DDI_ATTACH: 430 break; 431 432 case DDI_PM_RESUME: 433 case DDI_RESUME: 434 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 435 if (!tran) { 436 return (DDI_FAILURE); 437 } 438 /* No DDI_?_RESUME on iport nodes */ 439 if (scsi_hba_iport_unit_address(dip) != NULL) { 440 return (DDI_SUCCESS); 441 } 442 pwp = TRAN2PMC(tran); 443 if (pwp == NULL) { 444 return (DDI_FAILURE); 445 } 446 447 mutex_enter(&pwp->lock); 448 pwp->suspended = 0; 449 if (pwp->tq) { 450 ddi_taskq_resume(pwp->tq); 451 } 452 mutex_exit(&pwp->lock); 453 return (DDI_SUCCESS); 454 455 default: 456 return (DDI_FAILURE); 457 } 458 459 /* 460 * If this is an iport node, invoke iport attach. 461 */ 462 if (scsi_hba_iport_unit_address(dip) != NULL) { 463 return (pmcs_iport_attach(dip)); 464 } 465 466 /* 467 * From here on is attach for the HBA node 468 */ 469 470 #ifdef DEBUG 471 /* 472 * Check to see if this unit is to be disabled. We can't disable 473 * on a per-iport node. It's either the entire HBA or nothing. 474 */ 475 (void) snprintf(buf, sizeof (buf), 476 "disable-instance-%d", ddi_get_instance(dip)); 477 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 478 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, buf, 0)) { 479 cmn_err(CE_NOTE, "pmcs%d: disabled by configuration", 480 ddi_get_instance(dip)); 481 return (DDI_FAILURE); 482 } 483 #endif 484 485 /* 486 * Allocate softstate 487 */ 488 inst = ddi_get_instance(dip); 489 if (ddi_soft_state_zalloc(pmcs_softc_state, inst) != DDI_SUCCESS) { 490 cmn_err(CE_WARN, "pmcs%d: Failed to alloc soft state", inst); 491 return (DDI_FAILURE); 492 } 493 494 pwp = ddi_get_soft_state(pmcs_softc_state, inst); 495 if (pwp == NULL) { 496 cmn_err(CE_WARN, "pmcs%d: cannot get soft state", inst); 497 ddi_soft_state_free(pmcs_softc_state, inst); 498 return (DDI_FAILURE); 499 } 500 pwp->dip = dip; 501 STAILQ_INIT(&pwp->dq); 502 STAILQ_INIT(&pwp->cq); 503 STAILQ_INIT(&pwp->wf); 504 STAILQ_INIT(&pwp->pf); 505 /* 506 * Create the list for iports 507 */ 508 list_create(&pwp->iports, sizeof (pmcs_iport_t), 509 offsetof(pmcs_iport_t, list_node)); 510 511 pwp->state = STATE_PROBING; 512 513 /* 514 * Get driver.conf properties 515 */ 516 pwp->debug_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 517 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-debug-mask", 518 debug_mask); 519 pwp->phyid_block_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 520 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phyid-block-mask", 521 block_mask); 522 pwp->physpeed = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 523 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-physpeed", physpeed); 524 pwp->phymode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 525 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phymode", phymode); 526 pwp->fwlog = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 527 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fwlog", fwlog_level); 528 if (pwp->fwlog > PMCS_FWLOG_MAX) { 529 pwp->fwlog = PMCS_FWLOG_MAX; 530 } 531 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, "pmcs-fwlogfile", 532 &fwl_file) == DDI_SUCCESS)) { 533 if (snprintf(pwp->fwlogfile_aap1, MAXPATHLEN, "%s%d-aap1.0", 534 fwl_file, ddi_get_instance(dip)) > MAXPATHLEN) { 535 pwp->fwlogfile_aap1[0] = '\0'; 536 pwp->fwlogfile_iop[0] = '\0'; 537 } else if (snprintf(pwp->fwlogfile_iop, MAXPATHLEN, 538 "%s%d-iop.0", fwl_file, 539 ddi_get_instance(dip)) > MAXPATHLEN) { 540 pwp->fwlogfile_aap1[0] = '\0'; 541 pwp->fwlogfile_iop[0] = '\0'; 542 } 543 ddi_prop_free(fwl_file); 544 } else { 545 pwp->fwlogfile_aap1[0] = '\0'; 546 pwp->fwlogfile_iop[0] = '\0'; 547 } 548 549 mutex_enter(&pmcs_trace_lock); 550 if (pmcs_tbuf == NULL) { 551 /* Allocate trace buffer */ 552 pmcs_tbuf_num_elems = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 553 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-tbuf-num-elems", 554 PMCS_TBUF_NUM_ELEMS_DEF); 555 if ((pmcs_tbuf_num_elems == DDI_PROP_NOT_FOUND) || 556 (pmcs_tbuf_num_elems == 0)) { 557 pmcs_tbuf_num_elems = PMCS_TBUF_NUM_ELEMS_DEF; 558 } 559 560 pmcs_tbuf = kmem_zalloc(pmcs_tbuf_num_elems * 561 sizeof (pmcs_tbuf_t), KM_SLEEP); 562 pmcs_tbuf_ptr = pmcs_tbuf; 563 pmcs_tbuf_idx = 0; 564 } 565 mutex_exit(&pmcs_trace_lock); 566 567 if (pwp->fwlog && strlen(pwp->fwlogfile_aap1) > 0) { 568 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 569 "%s: firmware event log files: %s, %s", __func__, 570 pwp->fwlogfile_aap1, pwp->fwlogfile_iop); 571 pwp->fwlog_file = 1; 572 } else { 573 if (pwp->fwlog == 0) { 574 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 575 "%s: No firmware event log will be written " 576 "(event log disabled)", __func__); 577 } else { 578 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 579 "%s: No firmware event log will be written " 580 "(no filename configured - too long?)", __func__); 581 } 582 pwp->fwlog_file = 0; 583 } 584 585 disable_msix = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 586 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msix", 587 disable_msix); 588 disable_msi = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 589 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msi", 590 disable_msi); 591 maxqdepth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 592 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-maxqdepth", maxqdepth); 593 pwp->fw_force_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 594 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fw-force-update", 0); 595 if (pwp->fw_force_update == 0) { 596 pwp->fw_disable_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 597 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 598 "pmcs-fw-disable-update", 0); 599 } 600 pwp->ioq_depth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 601 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-num-io-qentries", 602 PMCS_NQENTRY); 603 604 /* 605 * Initialize FMA 606 */ 607 pwp->dev_acc_attr = pwp->reg_acc_attr = rattr; 608 pwp->iqp_dma_attr = pwp->oqp_dma_attr = 609 pwp->regdump_dma_attr = pwp->cip_dma_attr = 610 pwp->fwlog_dma_attr = pmcs_dattr; 611 pwp->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, pwp->dip, 612 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "fm-capable", 613 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 614 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 615 pmcs_fm_init(pwp); 616 617 /* 618 * Map registers 619 */ 620 if (pci_config_setup(dip, &pwp->pci_acc_handle)) { 621 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 622 "pci config setup failed"); 623 ddi_soft_state_free(pmcs_softc_state, inst); 624 return (DDI_FAILURE); 625 } 626 627 /* 628 * Get the size of register set 3. 629 */ 630 if (ddi_dev_regsize(dip, PMCS_REGSET_3, &set3size) != DDI_SUCCESS) { 631 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 632 "unable to get size of register set %d", PMCS_REGSET_3); 633 pci_config_teardown(&pwp->pci_acc_handle); 634 ddi_soft_state_free(pmcs_softc_state, inst); 635 return (DDI_FAILURE); 636 } 637 638 /* 639 * Map registers 640 */ 641 pwp->reg_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 642 643 if (ddi_regs_map_setup(dip, PMCS_REGSET_0, (caddr_t *)&pwp->msg_regs, 644 0, 0, &pwp->reg_acc_attr, &pwp->msg_acc_handle)) { 645 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 646 "failed to map Message Unit registers"); 647 pci_config_teardown(&pwp->pci_acc_handle); 648 ddi_soft_state_free(pmcs_softc_state, inst); 649 return (DDI_FAILURE); 650 } 651 652 if (ddi_regs_map_setup(dip, PMCS_REGSET_1, (caddr_t *)&pwp->top_regs, 653 0, 0, &pwp->reg_acc_attr, &pwp->top_acc_handle)) { 654 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 655 "failed to map TOP registers"); 656 ddi_regs_map_free(&pwp->msg_acc_handle); 657 pci_config_teardown(&pwp->pci_acc_handle); 658 ddi_soft_state_free(pmcs_softc_state, inst); 659 return (DDI_FAILURE); 660 } 661 662 if (ddi_regs_map_setup(dip, PMCS_REGSET_2, (caddr_t *)&pwp->gsm_regs, 663 0, 0, &pwp->reg_acc_attr, &pwp->gsm_acc_handle)) { 664 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 665 "failed to map GSM registers"); 666 ddi_regs_map_free(&pwp->top_acc_handle); 667 ddi_regs_map_free(&pwp->msg_acc_handle); 668 pci_config_teardown(&pwp->pci_acc_handle); 669 ddi_soft_state_free(pmcs_softc_state, inst); 670 return (DDI_FAILURE); 671 } 672 673 if (ddi_regs_map_setup(dip, PMCS_REGSET_3, (caddr_t *)&pwp->mpi_regs, 674 0, 0, &pwp->reg_acc_attr, &pwp->mpi_acc_handle)) { 675 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 676 "failed to map MPI registers"); 677 ddi_regs_map_free(&pwp->top_acc_handle); 678 ddi_regs_map_free(&pwp->gsm_acc_handle); 679 ddi_regs_map_free(&pwp->msg_acc_handle); 680 pci_config_teardown(&pwp->pci_acc_handle); 681 ddi_soft_state_free(pmcs_softc_state, inst); 682 return (DDI_FAILURE); 683 } 684 pwp->mpibar = 685 (((5U << 2) + 0x10) << PMCS_MSGU_MPI_BAR_SHIFT) | set3size; 686 687 /* 688 * Make sure we can support this card. 689 */ 690 pwp->chiprev = pmcs_rd_topunit(pwp, PMCS_DEVICE_REVISION); 691 692 switch (pwp->chiprev) { 693 case PMCS_PM8001_REV_A: 694 case PMCS_PM8001_REV_B: 695 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 696 "Rev A/B Card no longer supported"); 697 goto failure; 698 case PMCS_PM8001_REV_C: 699 break; 700 default: 701 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 702 "Unknown chip revision (%d)", pwp->chiprev); 703 goto failure; 704 } 705 706 /* 707 * Allocate DMA addressable area for Inbound and Outbound Queue indices 708 * that the chip needs to access plus a space for scratch usage 709 */ 710 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 711 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pwp->cip_acchdls, 712 &pwp->cip_handles, ptob(1), (caddr_t *)&pwp->cip, 713 &pwp->ciaddr) == B_FALSE) { 714 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 715 "Failed to setup DMA for index/scratch"); 716 goto failure; 717 } 718 719 bzero(pwp->cip, ptob(1)); 720 pwp->scratch = &pwp->cip[PMCS_INDICES_SIZE]; 721 pwp->scratch_dma = pwp->ciaddr + PMCS_INDICES_SIZE; 722 723 /* 724 * Allocate DMA S/G list chunks 725 */ 726 (void) pmcs_add_more_chunks(pwp, ptob(1) * PMCS_MIN_CHUNK_PAGES); 727 728 /* 729 * Allocate a DMA addressable area for the firmware log (if needed) 730 */ 731 if (pwp->fwlog) { 732 /* 733 * Align to event log header and entry size 734 */ 735 pwp->fwlog_dma_attr.dma_attr_align = 32; 736 if (pmcs_dma_setup(pwp, &pwp->fwlog_dma_attr, 737 &pwp->fwlog_acchdl, 738 &pwp->fwlog_hndl, PMCS_FWLOG_SIZE, 739 (caddr_t *)&pwp->fwlogp, 740 &pwp->fwaddr) == B_FALSE) { 741 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 742 "Failed to setup DMA for fwlog area"); 743 pwp->fwlog = 0; 744 } else { 745 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE); 746 pwp->fwlogp_aap1 = (pmcs_fw_event_hdr_t *)pwp->fwlogp; 747 pwp->fwlogp_iop = (pmcs_fw_event_hdr_t *)((void *) 748 ((caddr_t)pwp->fwlogp + (PMCS_FWLOG_SIZE / 2))); 749 } 750 } 751 752 if (pwp->flash_chunk_addr == NULL) { 753 pwp->regdump_dma_attr.dma_attr_align = PMCS_FLASH_CHUNK_SIZE; 754 if (pmcs_dma_setup(pwp, &pwp->regdump_dma_attr, 755 &pwp->regdump_acchdl, 756 &pwp->regdump_hndl, PMCS_FLASH_CHUNK_SIZE, 757 (caddr_t *)&pwp->flash_chunkp, &pwp->flash_chunk_addr) == 758 B_FALSE) { 759 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 760 "Failed to setup DMA for register dump area"); 761 goto failure; 762 } 763 bzero(pwp->flash_chunkp, PMCS_FLASH_CHUNK_SIZE); 764 } 765 766 /* 767 * More bits of local initialization... 768 */ 769 pwp->tq = ddi_taskq_create(dip, "_tq", 4, TASKQ_DEFAULTPRI, 0); 770 if (pwp->tq == NULL) { 771 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 772 "unable to create worker taskq"); 773 goto failure; 774 } 775 776 /* 777 * Cache of structures for dealing with I/O completion callbacks. 778 */ 779 (void) snprintf(buf, sizeof (buf), "pmcs_iocomp_cb_cache%d", inst); 780 pwp->iocomp_cb_cache = kmem_cache_create(buf, 781 sizeof (pmcs_iocomp_cb_t), 16, NULL, NULL, NULL, NULL, NULL, 0); 782 783 /* 784 * Cache of PHY structures 785 */ 786 (void) snprintf(buf, sizeof (buf), "pmcs_phy_cache%d", inst); 787 pwp->phy_cache = kmem_cache_create(buf, sizeof (pmcs_phy_t), 8, 788 pmcs_phy_constructor, pmcs_phy_destructor, NULL, (void *)pwp, 789 NULL, 0); 790 791 /* 792 * Allocate space for the I/O completion threads 793 */ 794 num_threads = ncpus_online; 795 if (num_threads > PMCS_MAX_CQ_THREADS) { 796 num_threads = PMCS_MAX_CQ_THREADS; 797 } 798 799 pwp->cq_info.cq_thr_info = kmem_zalloc(sizeof (pmcs_cq_thr_info_t) * 800 num_threads, KM_SLEEP); 801 pwp->cq_info.cq_threads = num_threads; 802 pwp->cq_info.cq_next_disp_thr = 0; 803 pwp->cq_info.cq_stop = B_FALSE; 804 805 /* 806 * Set the quantum value in clock ticks for the I/O interrupt 807 * coalescing timer. 808 */ 809 pwp->io_intr_coal.quantum = drv_usectohz(PMCS_QUANTUM_TIME_USECS); 810 811 /* 812 * We have a delicate dance here. We need to set up 813 * interrupts so we know how to set up some OQC 814 * tables. However, while we're setting up table 815 * access, we may need to flash new firmware and 816 * reset the card, which will take some finessing. 817 */ 818 819 /* 820 * Set up interrupts here. 821 */ 822 switch (pmcs_setup_intr(pwp)) { 823 case 0: 824 break; 825 case EIO: 826 pwp->stuck = 1; 827 /* FALLTHROUGH */ 828 default: 829 goto failure; 830 } 831 832 /* 833 * Set these up now becuase they are used to initialize the OQC tables. 834 * 835 * If we have MSI or MSI-X interrupts set up and we have enough 836 * vectors for each OQ, the Outbound Queue vectors can all be the 837 * same as the appropriate interrupt routine will have been called 838 * and the doorbell register automatically cleared. 839 * This keeps us from having to check the Outbound Doorbell register 840 * when the routines for these interrupts are called. 841 * 842 * If we have Legacy INT-X interrupts set up or we didn't have enough 843 * MSI/MSI-X vectors to uniquely identify each OQ, we point these 844 * vectors to the bits we would like to have set in the Outbound 845 * Doorbell register because pmcs_all_intr will read the doorbell 846 * register to find out why we have an interrupt and write the 847 * corresponding 'clear' bit for that interrupt. 848 */ 849 850 switch (pwp->intr_cnt) { 851 case 1: 852 /* 853 * Only one vector, so we must check all OQs for MSI. For 854 * INT-X, there's only one vector anyway, so we can just 855 * use the outbound queue bits to keep from having to 856 * check each queue for each interrupt. 857 */ 858 if (pwp->int_type == PMCS_INT_FIXED) { 859 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 860 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 861 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 862 } else { 863 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 864 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_IODONE; 865 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_IODONE; 866 } 867 break; 868 case 2: 869 /* With 2, we can at least isolate IODONE */ 870 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 871 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 872 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_GENERAL; 873 break; 874 case 4: 875 /* With 4 vectors, everybody gets one */ 876 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 877 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 878 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 879 break; 880 } 881 882 /* 883 * Do the first part of setup 884 */ 885 if (pmcs_setup(pwp)) { 886 goto failure; 887 } 888 pmcs_report_fwversion(pwp); 889 890 /* 891 * Now do some additonal allocations based upon information 892 * gathered during MPI setup. 893 */ 894 pwp->root_phys = kmem_zalloc(pwp->nphy * sizeof (pmcs_phy_t), KM_SLEEP); 895 ASSERT(pwp->nphy < SAS2_PHYNUM_MAX); 896 phyp = pwp->root_phys; 897 for (i = 0; i < pwp->nphy; i++) { 898 if (i < pwp->nphy-1) { 899 phyp->sibling = (phyp + 1); 900 } 901 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER, 902 DDI_INTR_PRI(pwp->intr_pri)); 903 phyp->phynum = i & SAS2_PHYNUM_MASK; 904 pmcs_phy_name(pwp, phyp, phyp->path, sizeof (phyp->path)); 905 phyp->pwp = pwp; 906 phyp->device_id = PMCS_INVALID_DEVICE_ID; 907 phyp->portid = PMCS_PHY_INVALID_PORT_ID; 908 phyp++; 909 } 910 911 pwp->work = kmem_zalloc(pwp->max_cmd * sizeof (pmcwork_t), KM_SLEEP); 912 for (i = 0; i < pwp->max_cmd - 1; i++) { 913 pmcwork_t *pwrk = &pwp->work[i]; 914 mutex_init(&pwrk->lock, NULL, MUTEX_DRIVER, 915 DDI_INTR_PRI(pwp->intr_pri)); 916 cv_init(&pwrk->sleep_cv, NULL, CV_DRIVER, NULL); 917 STAILQ_INSERT_TAIL(&pwp->wf, pwrk, next); 918 919 } 920 pwp->targets = (pmcs_xscsi_t **) 921 kmem_zalloc(pwp->max_dev * sizeof (pmcs_xscsi_t *), KM_SLEEP); 922 923 pwp->iqpt = (pmcs_iqp_trace_t *) 924 kmem_zalloc(sizeof (pmcs_iqp_trace_t), KM_SLEEP); 925 pwp->iqpt->head = kmem_zalloc(PMCS_IQP_TRACE_BUFFER_SIZE, KM_SLEEP); 926 pwp->iqpt->curpos = pwp->iqpt->head; 927 pwp->iqpt->size_left = PMCS_IQP_TRACE_BUFFER_SIZE; 928 929 /* 930 * Start MPI communication. 931 */ 932 if (pmcs_start_mpi(pwp)) { 933 if (pmcs_soft_reset(pwp, B_FALSE)) { 934 goto failure; 935 } 936 pwp->last_reset_reason = PMCS_LAST_RST_ATTACH; 937 } 938 939 /* 940 * Do some initial acceptance tests. 941 * This tests interrupts and queues. 942 */ 943 if (pmcs_echo_test(pwp)) { 944 goto failure; 945 } 946 947 /* Read VPD - if it exists */ 948 if (pmcs_get_nvmd(pwp, PMCS_NVMD_VPD, PMCIN_NVMD_VPD, 0, NULL, 0)) { 949 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 950 "%s: Unable to read VPD: " 951 "attempting to fabricate", __func__); 952 /* 953 * When we release, this must goto failure and the call 954 * to pmcs_fabricate_wwid is removed. 955 */ 956 /* goto failure; */ 957 if (!pmcs_fabricate_wwid(pwp)) { 958 goto failure; 959 } 960 } 961 962 /* 963 * We're now officially running 964 */ 965 pwp->state = STATE_RUNNING; 966 967 /* 968 * Check firmware versions and load new firmware 969 * if needed and reset. 970 */ 971 if (pmcs_firmware_update(pwp)) { 972 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 973 "%s: Firmware update failed", __func__); 974 goto failure; 975 } 976 977 /* 978 * Create completion threads. 979 */ 980 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 981 pwp->cq_info.cq_thr_info[i].cq_pwp = pwp; 982 pwp->cq_info.cq_thr_info[i].cq_thread = 983 thread_create(NULL, 0, pmcs_scsa_cq_run, 984 &pwp->cq_info.cq_thr_info[i], 0, &p0, TS_RUN, minclsyspri); 985 } 986 987 /* 988 * Create one thread to deal with the updating of the interrupt 989 * coalescing timer. 990 */ 991 pwp->ict_thread = thread_create(NULL, 0, pmcs_check_intr_coal, 992 pwp, 0, &p0, TS_RUN, minclsyspri); 993 994 /* 995 * Kick off the watchdog 996 */ 997 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 998 drv_usectohz(PMCS_WATCH_INTERVAL)); 999 /* 1000 * Do the SCSI attachment code (before starting phys) 1001 */ 1002 if (pmcs_scsa_init(pwp, &pmcs_dattr)) { 1003 goto failure; 1004 } 1005 pwp->hba_attached = 1; 1006 1007 /* 1008 * Initialize the rwlock for the iport elements. 1009 */ 1010 rw_init(&pwp->iports_lock, NULL, RW_DRIVER, NULL); 1011 1012 /* Check all acc & dma handles allocated in attach */ 1013 if (pmcs_check_acc_dma_handle(pwp)) { 1014 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 1015 goto failure; 1016 } 1017 1018 /* 1019 * Create the phymap for this HBA instance 1020 */ 1021 if (sas_phymap_create(dip, phymap_usec, PHYMAP_MODE_SIMPLE, NULL, 1022 pwp, pmcs_phymap_activate, pmcs_phymap_deactivate, 1023 &pwp->hss_phymap) != DDI_SUCCESS) { 1024 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1025 "%s: pmcs%d phymap_create failed", __func__, inst); 1026 goto failure; 1027 } 1028 ASSERT(pwp->hss_phymap); 1029 1030 /* 1031 * Create the iportmap for this HBA instance 1032 */ 1033 if (scsi_hba_iportmap_create(dip, iportmap_usec, 1034 &pwp->hss_iportmap) != DDI_SUCCESS) { 1035 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1036 "%s: pmcs%d iportmap_create failed", __func__, inst); 1037 goto failure; 1038 } 1039 ASSERT(pwp->hss_iportmap); 1040 1041 /* 1042 * Start the PHYs. 1043 */ 1044 if (pmcs_start_phys(pwp)) { 1045 goto failure; 1046 } 1047 1048 /* 1049 * From this point on, we can't fail. 1050 */ 1051 ddi_report_dev(dip); 1052 1053 /* SM-HBA */ 1054 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SMHBA_SUPPORTED, 1055 &sm_hba); 1056 1057 /* SM-HBA */ 1058 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_DRV_VERSION, 1059 pmcs_driver_rev); 1060 1061 /* SM-HBA */ 1062 chiprev = 'A' + pwp->chiprev; 1063 (void) snprintf(hw_rev, 2, "%s", &chiprev); 1064 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_HWARE_VERSION, 1065 hw_rev); 1066 1067 /* SM-HBA */ 1068 switch (PMCS_FW_TYPE(pwp)) { 1069 case PMCS_FW_TYPE_RELEASED: 1070 fwsupport = "Released"; 1071 break; 1072 case PMCS_FW_TYPE_DEVELOPMENT: 1073 fwsupport = "Development"; 1074 break; 1075 case PMCS_FW_TYPE_ALPHA: 1076 fwsupport = "Alpha"; 1077 break; 1078 case PMCS_FW_TYPE_BETA: 1079 fwsupport = "Beta"; 1080 break; 1081 default: 1082 fwsupport = "Special"; 1083 break; 1084 } 1085 (void) snprintf(fw_rev, sizeof (fw_rev), "%x.%x.%x %s", 1086 PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp), 1087 fwsupport); 1088 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_FWARE_VERSION, 1089 fw_rev); 1090 1091 /* SM-HBA */ 1092 num_phys = pwp->nphy; 1093 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_NUM_PHYS_HBA, 1094 &num_phys); 1095 1096 /* SM-HBA */ 1097 protocol = SAS_SSP_SUPPORT | SAS_SATA_SUPPORT | SAS_SMP_SUPPORT; 1098 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SUPPORTED_PROTOCOL, 1099 &protocol); 1100 1101 return (DDI_SUCCESS); 1102 1103 failure: 1104 if (pmcs_unattach(pwp)) { 1105 pwp->stuck = 1; 1106 } 1107 return (DDI_FAILURE); 1108 } 1109 1110 int 1111 pmcs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1112 { 1113 int inst = ddi_get_instance(dip); 1114 pmcs_iport_t *iport = NULL; 1115 pmcs_hw_t *pwp = NULL; 1116 scsi_hba_tran_t *tran; 1117 1118 if (scsi_hba_iport_unit_address(dip) != NULL) { 1119 /* iport node */ 1120 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 1121 ASSERT(iport); 1122 if (iport == NULL) { 1123 return (DDI_FAILURE); 1124 } 1125 pwp = iport->pwp; 1126 } else { 1127 /* hba node */ 1128 pwp = (pmcs_hw_t *)ddi_get_soft_state(pmcs_softc_state, inst); 1129 ASSERT(pwp); 1130 if (pwp == NULL) { 1131 return (DDI_FAILURE); 1132 } 1133 } 1134 1135 switch (cmd) { 1136 case DDI_DETACH: 1137 if (iport) { 1138 /* iport detach */ 1139 if (pmcs_iport_unattach(iport)) { 1140 return (DDI_FAILURE); 1141 } 1142 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1143 "iport%d detached", inst); 1144 return (DDI_SUCCESS); 1145 } else { 1146 /* HBA detach */ 1147 if (pmcs_unattach(pwp)) { 1148 return (DDI_FAILURE); 1149 } 1150 return (DDI_SUCCESS); 1151 } 1152 1153 case DDI_SUSPEND: 1154 case DDI_PM_SUSPEND: 1155 /* No DDI_SUSPEND on iport nodes */ 1156 if (iport) { 1157 return (DDI_SUCCESS); 1158 } 1159 1160 if (pwp->stuck) { 1161 return (DDI_FAILURE); 1162 } 1163 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 1164 if (!tran) { 1165 return (DDI_FAILURE); 1166 } 1167 1168 pwp = TRAN2PMC(tran); 1169 if (pwp == NULL) { 1170 return (DDI_FAILURE); 1171 } 1172 mutex_enter(&pwp->lock); 1173 if (pwp->tq) { 1174 ddi_taskq_suspend(pwp->tq); 1175 } 1176 pwp->suspended = 1; 1177 mutex_exit(&pwp->lock); 1178 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "PMC8X6G suspending"); 1179 return (DDI_SUCCESS); 1180 1181 default: 1182 return (DDI_FAILURE); 1183 } 1184 } 1185 1186 static int 1187 pmcs_iport_unattach(pmcs_iport_t *iport) 1188 { 1189 pmcs_hw_t *pwp = iport->pwp; 1190 1191 /* 1192 * First, check if there are still any configured targets on this 1193 * iport. If so, we fail detach. 1194 */ 1195 if (pmcs_iport_has_targets(pwp, iport)) { 1196 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1197 "iport%d detach failure: iport has targets (luns)", 1198 ddi_get_instance(iport->dip)); 1199 return (DDI_FAILURE); 1200 } 1201 1202 /* 1203 * Remove this iport from our list if it is inactive in the phymap. 1204 */ 1205 rw_enter(&pwp->iports_lock, RW_WRITER); 1206 mutex_enter(&iport->lock); 1207 1208 if (iport->ua_state == UA_ACTIVE) { 1209 mutex_exit(&iport->lock); 1210 rw_exit(&pwp->iports_lock); 1211 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1212 "iport%d detach failure: " 1213 "iport unit address active in phymap", 1214 ddi_get_instance(iport->dip)); 1215 return (DDI_FAILURE); 1216 } 1217 1218 /* If it's our only iport, clear iports_attached */ 1219 ASSERT(pwp->num_iports >= 1); 1220 if (--pwp->num_iports == 0) { 1221 pwp->iports_attached = 0; 1222 } 1223 1224 ASSERT(list_link_active(&iport->list_node)); 1225 list_remove(&pwp->iports, iport); 1226 rw_exit(&pwp->iports_lock); 1227 1228 /* 1229 * We have removed the iport handle from the HBA's iports list, 1230 * there will be no new references to it. Two things must be 1231 * guarded against here. First, we could have PHY up events, 1232 * adding themselves to the iport->phys list and grabbing ref's 1233 * on our iport handle. Second, we could have existing references 1234 * to this iport handle from a point in time prior to the list 1235 * removal above. 1236 * 1237 * So first, destroy the phys list. Remove any phys that have snuck 1238 * in after the phymap deactivate, dropping the refcnt accordingly. 1239 * If these PHYs are still up if and when the phymap reactivates 1240 * (i.e. when this iport reattaches), we'll populate the list with 1241 * them and bump the refcnt back up. 1242 */ 1243 pmcs_remove_phy_from_iport(iport, NULL); 1244 ASSERT(list_is_empty(&iport->phys)); 1245 list_destroy(&iport->phys); 1246 mutex_exit(&iport->lock); 1247 1248 /* 1249 * Second, wait for any other references to this iport to be 1250 * dropped, then continue teardown. 1251 */ 1252 mutex_enter(&iport->refcnt_lock); 1253 while (iport->refcnt != 0) { 1254 cv_wait(&iport->refcnt_cv, &iport->refcnt_lock); 1255 } 1256 mutex_exit(&iport->refcnt_lock); 1257 1258 /* Delete kstats */ 1259 pmcs_destroy_phy_stats(iport); 1260 1261 /* Destroy the iport target map */ 1262 if (pmcs_iport_tgtmap_destroy(iport) == B_FALSE) { 1263 return (DDI_FAILURE); 1264 } 1265 1266 /* Free the tgt soft state */ 1267 if (iport->tgt_sstate != NULL) { 1268 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 1269 } 1270 1271 /* Free our unit address string */ 1272 strfree(iport->ua); 1273 1274 /* Finish teardown and free the softstate */ 1275 mutex_destroy(&iport->refcnt_lock); 1276 mutex_destroy(&iport->smp_lock); 1277 ASSERT(iport->refcnt == 0); 1278 cv_destroy(&iport->refcnt_cv); 1279 cv_destroy(&iport->smp_cv); 1280 mutex_destroy(&iport->lock); 1281 ddi_soft_state_free(pmcs_iport_softstate, ddi_get_instance(iport->dip)); 1282 1283 return (DDI_SUCCESS); 1284 } 1285 1286 static int 1287 pmcs_unattach(pmcs_hw_t *pwp) 1288 { 1289 int i; 1290 enum pwpstate curstate; 1291 pmcs_cq_thr_info_t *cqti; 1292 1293 /* 1294 * Tear down the interrupt infrastructure. 1295 */ 1296 if (pmcs_teardown_intr(pwp)) { 1297 pwp->stuck = 1; 1298 } 1299 pwp->intr_cnt = 0; 1300 1301 /* 1302 * Grab a lock, if initted, to set state. 1303 */ 1304 if (pwp->locks_initted) { 1305 mutex_enter(&pwp->lock); 1306 if (pwp->state != STATE_DEAD) { 1307 pwp->state = STATE_UNPROBING; 1308 } 1309 curstate = pwp->state; 1310 mutex_exit(&pwp->lock); 1311 1312 /* 1313 * Stop the I/O completion threads. 1314 */ 1315 mutex_enter(&pwp->cq_lock); 1316 pwp->cq_info.cq_stop = B_TRUE; 1317 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 1318 if (pwp->cq_info.cq_thr_info[i].cq_thread) { 1319 cqti = &pwp->cq_info.cq_thr_info[i]; 1320 mutex_enter(&cqti->cq_thr_lock); 1321 cv_signal(&cqti->cq_cv); 1322 mutex_exit(&cqti->cq_thr_lock); 1323 mutex_exit(&pwp->cq_lock); 1324 thread_join(cqti->cq_thread->t_did); 1325 mutex_enter(&pwp->cq_lock); 1326 } 1327 } 1328 mutex_exit(&pwp->cq_lock); 1329 1330 /* 1331 * Stop the interrupt coalescing timer thread 1332 */ 1333 if (pwp->ict_thread) { 1334 mutex_enter(&pwp->ict_lock); 1335 pwp->io_intr_coal.stop_thread = B_TRUE; 1336 cv_signal(&pwp->ict_cv); 1337 mutex_exit(&pwp->ict_lock); 1338 thread_join(pwp->ict_thread->t_did); 1339 } 1340 } else { 1341 if (pwp->state != STATE_DEAD) { 1342 pwp->state = STATE_UNPROBING; 1343 } 1344 curstate = pwp->state; 1345 } 1346 1347 if (&pwp->iports != NULL) { 1348 /* Destroy the iports lock */ 1349 rw_destroy(&pwp->iports_lock); 1350 /* Destroy the iports list */ 1351 ASSERT(list_is_empty(&pwp->iports)); 1352 list_destroy(&pwp->iports); 1353 } 1354 1355 if (pwp->hss_iportmap != NULL) { 1356 /* Destroy the iportmap */ 1357 scsi_hba_iportmap_destroy(pwp->hss_iportmap); 1358 } 1359 1360 if (pwp->hss_phymap != NULL) { 1361 /* Destroy the phymap */ 1362 sas_phymap_destroy(pwp->hss_phymap); 1363 } 1364 1365 /* 1366 * Make sure that any pending watchdog won't 1367 * be called from this point on out. 1368 */ 1369 (void) untimeout(pwp->wdhandle); 1370 /* 1371 * After the above action, the watchdog 1372 * timer that starts up the worker task 1373 * may trigger but will exit immediately 1374 * on triggering. 1375 * 1376 * Now that this is done, we can destroy 1377 * the task queue, which will wait if we're 1378 * running something on it. 1379 */ 1380 if (pwp->tq) { 1381 ddi_taskq_destroy(pwp->tq); 1382 pwp->tq = NULL; 1383 } 1384 1385 pmcs_fm_fini(pwp); 1386 1387 if (pwp->hba_attached) { 1388 (void) scsi_hba_detach(pwp->dip); 1389 pwp->hba_attached = 0; 1390 } 1391 1392 /* 1393 * If the chip hasn't been marked dead, shut it down now 1394 * to bring it back to a known state without attempting 1395 * a soft reset. 1396 */ 1397 if (curstate != STATE_DEAD && pwp->locks_initted) { 1398 /* 1399 * De-register all registered devices 1400 */ 1401 pmcs_deregister_devices(pwp, pwp->root_phys); 1402 1403 /* 1404 * Stop all the phys. 1405 */ 1406 pmcs_stop_phys(pwp); 1407 1408 /* 1409 * Shut Down Message Passing 1410 */ 1411 (void) pmcs_stop_mpi(pwp); 1412 1413 /* 1414 * Reset chip 1415 */ 1416 (void) pmcs_soft_reset(pwp, B_FALSE); 1417 pwp->last_reset_reason = PMCS_LAST_RST_DETACH; 1418 } 1419 1420 /* 1421 * Turn off interrupts on the chip 1422 */ 1423 if (pwp->mpi_acc_handle) { 1424 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1425 } 1426 1427 /* Destroy pwp's lock */ 1428 if (pwp->locks_initted) { 1429 mutex_destroy(&pwp->lock); 1430 mutex_destroy(&pwp->dma_lock); 1431 mutex_destroy(&pwp->axil_lock); 1432 mutex_destroy(&pwp->cq_lock); 1433 mutex_destroy(&pwp->config_lock); 1434 mutex_destroy(&pwp->ict_lock); 1435 mutex_destroy(&pwp->wfree_lock); 1436 mutex_destroy(&pwp->pfree_lock); 1437 mutex_destroy(&pwp->dead_phylist_lock); 1438 #ifdef DEBUG 1439 mutex_destroy(&pwp->dbglock); 1440 #endif 1441 cv_destroy(&pwp->ict_cv); 1442 cv_destroy(&pwp->drain_cv); 1443 pwp->locks_initted = 0; 1444 } 1445 1446 /* 1447 * Free DMA handles and associated consistent memory 1448 */ 1449 if (pwp->regdump_hndl) { 1450 if (ddi_dma_unbind_handle(pwp->regdump_hndl) != DDI_SUCCESS) { 1451 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1452 "Condition check failed " 1453 "at %s():%d", __func__, __LINE__); 1454 } 1455 ddi_dma_free_handle(&pwp->regdump_hndl); 1456 ddi_dma_mem_free(&pwp->regdump_acchdl); 1457 pwp->regdump_hndl = 0; 1458 } 1459 if (pwp->fwlog_hndl) { 1460 if (ddi_dma_unbind_handle(pwp->fwlog_hndl) != DDI_SUCCESS) { 1461 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1462 "Condition check failed " 1463 "at %s():%d", __func__, __LINE__); 1464 } 1465 ddi_dma_free_handle(&pwp->fwlog_hndl); 1466 ddi_dma_mem_free(&pwp->fwlog_acchdl); 1467 pwp->fwlog_hndl = 0; 1468 } 1469 if (pwp->cip_handles) { 1470 if (ddi_dma_unbind_handle(pwp->cip_handles) != DDI_SUCCESS) { 1471 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1472 "Condition check failed " 1473 "at %s():%d", __func__, __LINE__); 1474 } 1475 ddi_dma_free_handle(&pwp->cip_handles); 1476 ddi_dma_mem_free(&pwp->cip_acchdls); 1477 pwp->cip_handles = 0; 1478 } 1479 for (i = 0; i < PMCS_NOQ; i++) { 1480 if (pwp->oqp_handles[i]) { 1481 if (ddi_dma_unbind_handle(pwp->oqp_handles[i]) != 1482 DDI_SUCCESS) { 1483 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1484 "Condition check failed at %s():%d", 1485 __func__, __LINE__); 1486 } 1487 ddi_dma_free_handle(&pwp->oqp_handles[i]); 1488 ddi_dma_mem_free(&pwp->oqp_acchdls[i]); 1489 pwp->oqp_handles[i] = 0; 1490 } 1491 } 1492 for (i = 0; i < PMCS_NIQ; i++) { 1493 if (pwp->iqp_handles[i]) { 1494 if (ddi_dma_unbind_handle(pwp->iqp_handles[i]) != 1495 DDI_SUCCESS) { 1496 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1497 "Condition check failed at %s():%d", 1498 __func__, __LINE__); 1499 } 1500 ddi_dma_free_handle(&pwp->iqp_handles[i]); 1501 ddi_dma_mem_free(&pwp->iqp_acchdls[i]); 1502 pwp->iqp_handles[i] = 0; 1503 } 1504 } 1505 1506 pmcs_free_dma_chunklist(pwp); 1507 1508 /* 1509 * Unmap registers and destroy access handles 1510 */ 1511 if (pwp->mpi_acc_handle) { 1512 ddi_regs_map_free(&pwp->mpi_acc_handle); 1513 pwp->mpi_acc_handle = 0; 1514 } 1515 if (pwp->top_acc_handle) { 1516 ddi_regs_map_free(&pwp->top_acc_handle); 1517 pwp->top_acc_handle = 0; 1518 } 1519 if (pwp->gsm_acc_handle) { 1520 ddi_regs_map_free(&pwp->gsm_acc_handle); 1521 pwp->gsm_acc_handle = 0; 1522 } 1523 if (pwp->msg_acc_handle) { 1524 ddi_regs_map_free(&pwp->msg_acc_handle); 1525 pwp->msg_acc_handle = 0; 1526 } 1527 if (pwp->pci_acc_handle) { 1528 pci_config_teardown(&pwp->pci_acc_handle); 1529 pwp->pci_acc_handle = 0; 1530 } 1531 1532 /* 1533 * Do memory allocation cleanup. 1534 */ 1535 while (pwp->dma_freelist) { 1536 pmcs_dmachunk_t *this = pwp->dma_freelist; 1537 pwp->dma_freelist = this->nxt; 1538 kmem_free(this, sizeof (pmcs_dmachunk_t)); 1539 } 1540 1541 /* 1542 * Free pools 1543 */ 1544 if (pwp->iocomp_cb_cache) { 1545 kmem_cache_destroy(pwp->iocomp_cb_cache); 1546 } 1547 1548 /* 1549 * Free all PHYs (at level > 0), then free the cache 1550 */ 1551 pmcs_free_all_phys(pwp, pwp->root_phys); 1552 if (pwp->phy_cache) { 1553 kmem_cache_destroy(pwp->phy_cache); 1554 } 1555 1556 /* 1557 * Free root PHYs 1558 */ 1559 if (pwp->root_phys) { 1560 pmcs_phy_t *phyp = pwp->root_phys; 1561 for (i = 0; i < pwp->nphy; i++) { 1562 mutex_destroy(&phyp->phy_lock); 1563 phyp = phyp->sibling; 1564 } 1565 kmem_free(pwp->root_phys, pwp->nphy * sizeof (pmcs_phy_t)); 1566 pwp->root_phys = NULL; 1567 pwp->nphy = 0; 1568 } 1569 1570 /* Free the targets list */ 1571 if (pwp->targets) { 1572 kmem_free(pwp->targets, 1573 sizeof (pmcs_xscsi_t *) * pwp->max_dev); 1574 } 1575 1576 /* 1577 * Free work structures 1578 */ 1579 1580 if (pwp->work && pwp->max_cmd) { 1581 for (i = 0; i < pwp->max_cmd - 1; i++) { 1582 pmcwork_t *pwrk = &pwp->work[i]; 1583 mutex_destroy(&pwrk->lock); 1584 cv_destroy(&pwrk->sleep_cv); 1585 } 1586 kmem_free(pwp->work, sizeof (pmcwork_t) * pwp->max_cmd); 1587 pwp->work = NULL; 1588 pwp->max_cmd = 0; 1589 } 1590 1591 /* 1592 * Do last property and SCSA cleanup 1593 */ 1594 if (pwp->tran) { 1595 scsi_hba_tran_free(pwp->tran); 1596 pwp->tran = NULL; 1597 } 1598 if (pwp->reset_notify_listf) { 1599 scsi_hba_reset_notify_tear_down(pwp->reset_notify_listf); 1600 pwp->reset_notify_listf = NULL; 1601 } 1602 ddi_prop_remove_all(pwp->dip); 1603 if (pwp->stuck) { 1604 return (-1); 1605 } 1606 1607 /* Free register dump area if allocated */ 1608 if (pwp->regdumpp) { 1609 kmem_free(pwp->regdumpp, PMCS_REG_DUMP_SIZE); 1610 pwp->regdumpp = NULL; 1611 } 1612 if (pwp->iqpt && pwp->iqpt->head) { 1613 kmem_free(pwp->iqpt->head, PMCS_IQP_TRACE_BUFFER_SIZE); 1614 pwp->iqpt->head = pwp->iqpt->curpos = NULL; 1615 } 1616 if (pwp->iqpt) { 1617 kmem_free(pwp->iqpt, sizeof (pmcs_iqp_trace_t)); 1618 pwp->iqpt = NULL; 1619 } 1620 1621 ddi_soft_state_free(pmcs_softc_state, ddi_get_instance(pwp->dip)); 1622 return (0); 1623 } 1624 1625 /* 1626 * quiesce (9E) entry point 1627 * 1628 * This function is called when the system is single-threaded at high PIL 1629 * with preemption disabled. Therefore, the function must not block/wait/sleep. 1630 * 1631 * Returns DDI_SUCCESS or DDI_FAILURE. 1632 * 1633 */ 1634 static int 1635 pmcs_quiesce(dev_info_t *dip) 1636 { 1637 pmcs_hw_t *pwp; 1638 scsi_hba_tran_t *tran; 1639 1640 if ((tran = ddi_get_driver_private(dip)) == NULL) 1641 return (DDI_SUCCESS); 1642 1643 /* No quiesce necessary on a per-iport basis */ 1644 if (scsi_hba_iport_unit_address(dip) != NULL) { 1645 return (DDI_SUCCESS); 1646 } 1647 1648 if ((pwp = TRAN2PMC(tran)) == NULL) 1649 return (DDI_SUCCESS); 1650 1651 /* Stop MPI & Reset chip (no need to re-initialize) */ 1652 (void) pmcs_stop_mpi(pwp); 1653 (void) pmcs_soft_reset(pwp, B_TRUE); 1654 pwp->last_reset_reason = PMCS_LAST_RST_QUIESCE; 1655 1656 return (DDI_SUCCESS); 1657 } 1658 1659 /* 1660 * Called with xp->statlock and PHY lock and scratch acquired. 1661 */ 1662 static int 1663 pmcs_add_sata_device(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1664 { 1665 ata_identify_t *ati; 1666 int result, i; 1667 pmcs_phy_t *pptr; 1668 uint16_t *a; 1669 union { 1670 uint8_t nsa[8]; 1671 uint16_t nsb[4]; 1672 } u; 1673 1674 /* 1675 * Safe defaults - use only if this target is brand new (i.e. doesn't 1676 * already have these settings configured) 1677 */ 1678 if (xp->capacity == 0) { 1679 xp->capacity = (uint64_t)-1; 1680 xp->ca = 1; 1681 xp->qdepth = 1; 1682 xp->pio = 1; 1683 } 1684 1685 pptr = xp->phy; 1686 1687 /* 1688 * We only try and issue an IDENTIFY for first level 1689 * (direct attached) devices. We don't try and 1690 * set other quirks here (this will happen later, 1691 * if the device is fully configured) 1692 */ 1693 if (pptr->level) { 1694 return (0); 1695 } 1696 1697 mutex_exit(&xp->statlock); 1698 result = pmcs_sata_identify(pwp, pptr); 1699 mutex_enter(&xp->statlock); 1700 1701 if (result) { 1702 return (result); 1703 } 1704 ati = pwp->scratch; 1705 a = &ati->word108; 1706 for (i = 0; i < 4; i++) { 1707 u.nsb[i] = ddi_swap16(*a++); 1708 } 1709 1710 /* 1711 * Check the returned data for being a valid (NAA=5) WWN. 1712 * If so, use that and override the SAS address we were 1713 * given at Link Up time. 1714 */ 1715 if ((u.nsa[0] >> 4) == 5) { 1716 (void) memcpy(pptr->sas_address, u.nsa, 8); 1717 } 1718 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 1719 "%s: %s has SAS ADDRESS " SAS_ADDR_FMT, 1720 __func__, pptr->path, SAS_ADDR_PRT(pptr->sas_address)); 1721 return (0); 1722 } 1723 1724 /* 1725 * Called with PHY lock and target statlock held and scratch acquired 1726 */ 1727 static boolean_t 1728 pmcs_add_new_device(pmcs_hw_t *pwp, pmcs_xscsi_t *target) 1729 { 1730 ASSERT(target != NULL); 1731 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target, "%s: target = 0x%p", 1732 __func__, (void *) target); 1733 1734 switch (target->phy->dtype) { 1735 case SATA: 1736 if (pmcs_add_sata_device(pwp, target) != 0) { 1737 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, target->phy, 1738 target, "%s: add_sata_device failed for tgt 0x%p", 1739 __func__, (void *) target); 1740 return (B_FALSE); 1741 } 1742 break; 1743 case SAS: 1744 target->qdepth = maxqdepth; 1745 break; 1746 case EXPANDER: 1747 target->qdepth = 1; 1748 break; 1749 } 1750 1751 target->new = 0; 1752 target->assigned = 1; 1753 target->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1754 target->dtype = target->phy->dtype; 1755 1756 /* 1757 * Set the PHY's config stop time to 0. This is one of the final 1758 * stops along the config path, so we're indicating that we 1759 * successfully configured the PHY. 1760 */ 1761 target->phy->config_stop = 0; 1762 1763 return (B_TRUE); 1764 } 1765 1766 void 1767 pmcs_worker(void *arg) 1768 { 1769 pmcs_hw_t *pwp = arg; 1770 ulong_t work_flags; 1771 1772 DTRACE_PROBE2(pmcs__worker, ulong_t, pwp->work_flags, boolean_t, 1773 pwp->config_changed); 1774 1775 if (pwp->state != STATE_RUNNING) { 1776 return; 1777 } 1778 1779 work_flags = atomic_swap_ulong(&pwp->work_flags, 0); 1780 1781 if (work_flags & PMCS_WORK_FLAG_SAS_HW_ACK) { 1782 pmcs_ack_events(pwp); 1783 } 1784 1785 if (work_flags & PMCS_WORK_FLAG_SPINUP_RELEASE) { 1786 mutex_enter(&pwp->lock); 1787 pmcs_spinup_release(pwp, NULL); 1788 mutex_exit(&pwp->lock); 1789 } 1790 1791 if (work_flags & PMCS_WORK_FLAG_SSP_EVT_RECOVERY) { 1792 pmcs_ssp_event_recovery(pwp); 1793 } 1794 1795 if (work_flags & PMCS_WORK_FLAG_DS_ERR_RECOVERY) { 1796 pmcs_dev_state_recovery(pwp, NULL); 1797 } 1798 1799 if (work_flags & PMCS_WORK_FLAG_DEREGISTER_DEV) { 1800 pmcs_deregister_device_work(pwp, NULL); 1801 } 1802 1803 if (work_flags & PMCS_WORK_FLAG_DISCOVER) { 1804 pmcs_discover(pwp); 1805 } 1806 1807 if (work_flags & PMCS_WORK_FLAG_ABORT_HANDLE) { 1808 if (pmcs_abort_handler(pwp)) { 1809 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 1810 } 1811 } 1812 1813 if (work_flags & PMCS_WORK_FLAG_SATA_RUN) { 1814 pmcs_sata_work(pwp); 1815 } 1816 1817 if (work_flags & PMCS_WORK_FLAG_RUN_QUEUES) { 1818 pmcs_scsa_wq_run(pwp); 1819 mutex_enter(&pwp->lock); 1820 PMCS_CQ_RUN(pwp); 1821 mutex_exit(&pwp->lock); 1822 } 1823 1824 if (work_flags & PMCS_WORK_FLAG_ADD_DMA_CHUNKS) { 1825 if (pmcs_add_more_chunks(pwp, 1826 ptob(1) * PMCS_ADDTL_CHUNK_PAGES)) { 1827 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 1828 } else { 1829 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1830 } 1831 } 1832 } 1833 1834 static int 1835 pmcs_add_more_chunks(pmcs_hw_t *pwp, unsigned long nsize) 1836 { 1837 pmcs_dmachunk_t *dc; 1838 unsigned long dl; 1839 pmcs_chunk_t *pchunk = NULL; 1840 1841 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 1842 1843 pchunk = kmem_zalloc(sizeof (pmcs_chunk_t), KM_SLEEP); 1844 if (pchunk == NULL) { 1845 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1846 "Not enough memory for DMA chunks"); 1847 return (-1); 1848 } 1849 1850 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pchunk->acc_handle, 1851 &pchunk->dma_handle, nsize, (caddr_t *)&pchunk->addrp, 1852 &pchunk->dma_addr) == B_FALSE) { 1853 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1854 "Failed to setup DMA for chunks"); 1855 kmem_free(pchunk, sizeof (pmcs_chunk_t)); 1856 return (-1); 1857 } 1858 1859 if ((pmcs_check_acc_handle(pchunk->acc_handle) != DDI_SUCCESS) || 1860 (pmcs_check_dma_handle(pchunk->dma_handle) != DDI_SUCCESS)) { 1861 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1862 return (-1); 1863 } 1864 1865 bzero(pchunk->addrp, nsize); 1866 dc = NULL; 1867 for (dl = 0; dl < (nsize / PMCS_SGL_CHUNKSZ); dl++) { 1868 pmcs_dmachunk_t *tmp; 1869 tmp = kmem_alloc(sizeof (pmcs_dmachunk_t), KM_SLEEP); 1870 tmp->nxt = dc; 1871 dc = tmp; 1872 } 1873 mutex_enter(&pwp->dma_lock); 1874 pmcs_idma_chunks(pwp, dc, pchunk, nsize); 1875 pwp->nchunks++; 1876 mutex_exit(&pwp->dma_lock); 1877 return (0); 1878 } 1879 1880 static void 1881 pmcs_check_forward_progress(pmcs_hw_t *pwp) 1882 { 1883 uint32_t cur_iqci; 1884 uint32_t cur_msgu_tick; 1885 uint32_t cur_iop_tick; 1886 int i; 1887 1888 mutex_enter(&pwp->lock); 1889 1890 if (pwp->state == STATE_IN_RESET) { 1891 mutex_exit(&pwp->lock); 1892 return; 1893 } 1894 1895 /* Ensure that inbound work is getting picked up */ 1896 for (i = 0; i < PMCS_NIQ; i++) { 1897 cur_iqci = pmcs_rd_iqci(pwp, i); 1898 if (cur_iqci == pwp->shadow_iqpi[i]) { 1899 pwp->last_iqci[i] = cur_iqci; 1900 continue; 1901 } 1902 if (cur_iqci == pwp->last_iqci[i]) { 1903 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 1904 "Inbound Queue stall detected, issuing reset"); 1905 goto hot_reset; 1906 } 1907 pwp->last_iqci[i] = cur_iqci; 1908 } 1909 1910 /* Check heartbeat on both the MSGU and IOP */ 1911 cur_msgu_tick = pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK); 1912 if (cur_msgu_tick == pwp->last_msgu_tick) { 1913 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 1914 "Stall detected on MSGU, issuing reset"); 1915 goto hot_reset; 1916 } 1917 pwp->last_msgu_tick = cur_msgu_tick; 1918 1919 cur_iop_tick = pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK); 1920 if (cur_iop_tick == pwp->last_iop_tick) { 1921 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 1922 "Stall detected on IOP, issuing reset"); 1923 goto hot_reset; 1924 } 1925 pwp->last_iop_tick = cur_iop_tick; 1926 1927 mutex_exit(&pwp->lock); 1928 return; 1929 1930 hot_reset: 1931 pwp->state = STATE_DEAD; 1932 /* 1933 * We've detected a stall. Attempt to recover service via hot 1934 * reset. In case of failure, pmcs_hot_reset() will handle the 1935 * failure and issue any required FM notifications. 1936 * See pmcs_subr.c for more details. 1937 */ 1938 if (pmcs_hot_reset(pwp)) { 1939 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 1940 "%s: hot reset failure", __func__); 1941 } else { 1942 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 1943 "%s: hot reset complete", __func__); 1944 pwp->last_reset_reason = PMCS_LAST_RST_STALL; 1945 } 1946 mutex_exit(&pwp->lock); 1947 } 1948 1949 static void 1950 pmcs_check_commands(pmcs_hw_t *pwp) 1951 { 1952 pmcs_cmd_t *sp; 1953 size_t amt; 1954 char path[32]; 1955 pmcwork_t *pwrk; 1956 pmcs_xscsi_t *target; 1957 pmcs_phy_t *phyp; 1958 int rval; 1959 1960 for (pwrk = pwp->work; pwrk < &pwp->work[pwp->max_cmd]; pwrk++) { 1961 mutex_enter(&pwrk->lock); 1962 1963 /* 1964 * If the command isn't active, we can't be timing it still. 1965 * Active means the tag is not free and the state is "on chip". 1966 */ 1967 if (!PMCS_COMMAND_ACTIVE(pwrk)) { 1968 mutex_exit(&pwrk->lock); 1969 continue; 1970 } 1971 1972 /* 1973 * No timer active for this command. 1974 */ 1975 if (pwrk->timer == 0) { 1976 mutex_exit(&pwrk->lock); 1977 continue; 1978 } 1979 1980 /* 1981 * Knock off bits for the time interval. 1982 */ 1983 if (pwrk->timer >= US2WT(PMCS_WATCH_INTERVAL)) { 1984 pwrk->timer -= US2WT(PMCS_WATCH_INTERVAL); 1985 } else { 1986 pwrk->timer = 0; 1987 } 1988 if (pwrk->timer > 0) { 1989 mutex_exit(&pwrk->lock); 1990 continue; 1991 } 1992 1993 /* 1994 * The command has now officially timed out. 1995 * Get the path for it. If it doesn't have 1996 * a phy pointer any more, it's really dead 1997 * and can just be put back on the free list. 1998 * There should *not* be any commands associated 1999 * with it any more. 2000 */ 2001 if (pwrk->phy == NULL) { 2002 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2003 "dead command with gone phy being recycled"); 2004 ASSERT(pwrk->xp == NULL); 2005 pmcs_pwork(pwp, pwrk); 2006 continue; 2007 } 2008 amt = sizeof (path); 2009 amt = min(sizeof (pwrk->phy->path), amt); 2010 (void) memcpy(path, pwrk->phy->path, amt); 2011 2012 /* 2013 * If this is a non-SCSA command, stop here. Eventually 2014 * we might do something with non-SCSA commands here- 2015 * but so far their timeout mechanisms are handled in 2016 * the WAIT_FOR macro. 2017 */ 2018 if (pwrk->xp == NULL) { 2019 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2020 "%s: non-SCSA cmd tag 0x%x timed out", 2021 path, pwrk->htag); 2022 mutex_exit(&pwrk->lock); 2023 continue; 2024 } 2025 2026 sp = pwrk->arg; 2027 ASSERT(sp != NULL); 2028 2029 /* 2030 * Mark it as timed out. 2031 */ 2032 CMD2PKT(sp)->pkt_reason = CMD_TIMEOUT; 2033 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2034 #ifdef DEBUG 2035 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 2036 "%s: SCSA cmd tag 0x%x timed out (state %x) onwire=%d", 2037 path, pwrk->htag, pwrk->state, pwrk->onwire); 2038 #else 2039 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 2040 "%s: SCSA cmd tag 0x%x timed out (state %x)", 2041 path, pwrk->htag, pwrk->state); 2042 #endif 2043 /* 2044 * Mark the work structure as timed out. 2045 */ 2046 pwrk->state = PMCS_WORK_STATE_TIMED_OUT; 2047 phyp = pwrk->phy; 2048 target = pwrk->xp; 2049 mutex_exit(&pwrk->lock); 2050 2051 pmcs_lock_phy(phyp); 2052 mutex_enter(&target->statlock); 2053 2054 /* 2055 * No point attempting recovery if the device is gone 2056 */ 2057 if (target->dev_gone) { 2058 mutex_exit(&target->statlock); 2059 pmcs_unlock_phy(phyp); 2060 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2061 "%s: tgt(0x%p) is gone. Returning CMD_DEV_GONE " 2062 "for htag 0x%08x", __func__, 2063 (void *)target, pwrk->htag); 2064 mutex_enter(&pwrk->lock); 2065 if (!PMCS_COMMAND_DONE(pwrk)) { 2066 /* Complete this command here */ 2067 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2068 "%s: Completing cmd (htag 0x%08x) " 2069 "anyway", __func__, pwrk->htag); 2070 pwrk->dead = 1; 2071 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 2072 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 2073 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 2074 } else { 2075 mutex_exit(&pwrk->lock); 2076 } 2077 continue; 2078 } 2079 2080 mutex_exit(&target->statlock); 2081 rval = pmcs_abort(pwp, phyp, pwrk->htag, 0, 1); 2082 if (rval) { 2083 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2084 "%s: Bad status (%d) on abort of HTAG 0x%08x", 2085 __func__, rval, pwrk->htag); 2086 pmcs_unlock_phy(phyp); 2087 mutex_enter(&pwrk->lock); 2088 if (!PMCS_COMMAND_DONE(pwrk)) { 2089 /* Complete this command here */ 2090 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2091 "%s: Completing cmd (htag 0x%08x) " 2092 "anyway", __func__, pwrk->htag); 2093 if (target->dev_gone) { 2094 pwrk->dead = 1; 2095 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 2096 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 2097 } 2098 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 2099 } else { 2100 mutex_exit(&pwrk->lock); 2101 } 2102 pmcs_lock_phy(phyp); 2103 /* 2104 * No need to reschedule ABORT if we get any other 2105 * status 2106 */ 2107 if (rval == ENOMEM) { 2108 phyp->abort_sent = 0; 2109 phyp->abort_pending = 1; 2110 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 2111 } 2112 } 2113 pmcs_unlock_phy(phyp); 2114 } 2115 /* 2116 * Run any completions that may have been queued up. 2117 */ 2118 PMCS_CQ_RUN(pwp); 2119 } 2120 2121 static void 2122 pmcs_watchdog(void *arg) 2123 { 2124 pmcs_hw_t *pwp = arg; 2125 2126 DTRACE_PROBE2(pmcs__watchdog, ulong_t, pwp->work_flags, boolean_t, 2127 pwp->config_changed); 2128 2129 /* 2130 * Check forward progress on the chip 2131 */ 2132 if (++pwp->watchdog_count == PMCS_FWD_PROG_TRIGGER) { 2133 pwp->watchdog_count = 0; 2134 pmcs_check_forward_progress(pwp); 2135 } 2136 2137 /* 2138 * Check to see if we need to kick discovery off again 2139 */ 2140 mutex_enter(&pwp->config_lock); 2141 if (pwp->config_restart && 2142 (ddi_get_lbolt() >= pwp->config_restart_time)) { 2143 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2144 "%s: Timer expired for re-enumeration: Start discovery", 2145 __func__); 2146 pwp->config_restart = B_FALSE; 2147 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2148 } 2149 mutex_exit(&pwp->config_lock); 2150 2151 mutex_enter(&pwp->lock); 2152 if (pwp->state != STATE_RUNNING) { 2153 mutex_exit(&pwp->lock); 2154 return; 2155 } 2156 2157 if (atomic_cas_ulong(&pwp->work_flags, 0, 0) != 0) { 2158 if (ddi_taskq_dispatch(pwp->tq, pmcs_worker, pwp, 2159 DDI_NOSLEEP) != DDI_SUCCESS) { 2160 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2161 "Could not dispatch to worker thread"); 2162 } 2163 } 2164 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 2165 drv_usectohz(PMCS_WATCH_INTERVAL)); 2166 2167 mutex_exit(&pwp->lock); 2168 2169 pmcs_check_commands(pwp); 2170 pmcs_handle_dead_phys(pwp); 2171 } 2172 2173 static int 2174 pmcs_remove_ihandlers(pmcs_hw_t *pwp, int icnt) 2175 { 2176 int i, r, rslt = 0; 2177 for (i = 0; i < icnt; i++) { 2178 r = ddi_intr_remove_handler(pwp->ih_table[i]); 2179 if (r == DDI_SUCCESS) { 2180 continue; 2181 } 2182 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2183 "%s: unable to remove interrupt handler %d", __func__, i); 2184 rslt = -1; 2185 break; 2186 } 2187 return (rslt); 2188 } 2189 2190 static int 2191 pmcs_disable_intrs(pmcs_hw_t *pwp, int icnt) 2192 { 2193 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2194 int r = ddi_intr_block_disable(&pwp->ih_table[0], 2195 pwp->intr_cnt); 2196 if (r != DDI_SUCCESS) { 2197 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2198 "unable to disable interrupt block"); 2199 return (-1); 2200 } 2201 } else { 2202 int i; 2203 for (i = 0; i < icnt; i++) { 2204 if (ddi_intr_disable(pwp->ih_table[i]) == DDI_SUCCESS) { 2205 continue; 2206 } 2207 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2208 "unable to disable interrupt %d", i); 2209 return (-1); 2210 } 2211 } 2212 return (0); 2213 } 2214 2215 static int 2216 pmcs_free_intrs(pmcs_hw_t *pwp, int icnt) 2217 { 2218 int i; 2219 for (i = 0; i < icnt; i++) { 2220 if (ddi_intr_free(pwp->ih_table[i]) == DDI_SUCCESS) { 2221 continue; 2222 } 2223 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2224 "unable to free interrupt %d", i); 2225 return (-1); 2226 } 2227 kmem_free(pwp->ih_table, pwp->ih_table_size); 2228 pwp->ih_table_size = 0; 2229 return (0); 2230 } 2231 2232 /* 2233 * Try to set up interrupts of type "type" with a minimum number of interrupts 2234 * of "min". 2235 */ 2236 static void 2237 pmcs_setup_intr_impl(pmcs_hw_t *pwp, int type, int min) 2238 { 2239 int rval, avail, count, actual, max; 2240 2241 rval = ddi_intr_get_nintrs(pwp->dip, type, &count); 2242 if ((rval != DDI_SUCCESS) || (count < min)) { 2243 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2244 "%s: get_nintrs failed; type: %d rc: %d count: %d min: %d", 2245 __func__, type, rval, count, min); 2246 return; 2247 } 2248 2249 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2250 "%s: nintrs = %d for type: %d", __func__, count, type); 2251 2252 rval = ddi_intr_get_navail(pwp->dip, type, &avail); 2253 if ((rval != DDI_SUCCESS) || (avail < min)) { 2254 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2255 "%s: get_navail failed; type: %d rc: %d avail: %d min: %d", 2256 __func__, type, rval, avail, min); 2257 return; 2258 } 2259 2260 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2261 "%s: navail = %d for type: %d", __func__, avail, type); 2262 2263 pwp->ih_table_size = avail * sizeof (ddi_intr_handle_t); 2264 pwp->ih_table = kmem_alloc(pwp->ih_table_size, KM_SLEEP); 2265 2266 switch (type) { 2267 case DDI_INTR_TYPE_MSIX: 2268 pwp->int_type = PMCS_INT_MSIX; 2269 max = PMCS_MAX_MSIX; 2270 break; 2271 case DDI_INTR_TYPE_MSI: 2272 pwp->int_type = PMCS_INT_MSI; 2273 max = PMCS_MAX_MSI; 2274 break; 2275 case DDI_INTR_TYPE_FIXED: 2276 default: 2277 pwp->int_type = PMCS_INT_FIXED; 2278 max = PMCS_MAX_FIXED; 2279 break; 2280 } 2281 2282 rval = ddi_intr_alloc(pwp->dip, pwp->ih_table, type, 0, max, &actual, 2283 DDI_INTR_ALLOC_NORMAL); 2284 if (rval != DDI_SUCCESS) { 2285 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2286 "%s: ddi_intr_alloc failed; type: %d rc: %d", 2287 __func__, type, rval); 2288 kmem_free(pwp->ih_table, pwp->ih_table_size); 2289 pwp->ih_table = NULL; 2290 pwp->ih_table_size = 0; 2291 pwp->intr_cnt = 0; 2292 pwp->int_type = PMCS_INT_NONE; 2293 return; 2294 } 2295 2296 pwp->intr_cnt = actual; 2297 } 2298 2299 /* 2300 * Set up interrupts. 2301 * We return one of three values: 2302 * 2303 * 0 - success 2304 * EAGAIN - failure to set up interrupts 2305 * EIO - "" + we're now stuck partly enabled 2306 * 2307 * If EIO is returned, we can't unload the driver. 2308 */ 2309 static int 2310 pmcs_setup_intr(pmcs_hw_t *pwp) 2311 { 2312 int i, r, itypes, oqv_count; 2313 ddi_intr_handler_t **iv_table; 2314 size_t iv_table_size; 2315 uint_t pri; 2316 2317 if (ddi_intr_get_supported_types(pwp->dip, &itypes) != DDI_SUCCESS) { 2318 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2319 "cannot get interrupt types"); 2320 return (EAGAIN); 2321 } 2322 2323 if (disable_msix) { 2324 itypes &= ~DDI_INTR_TYPE_MSIX; 2325 } 2326 if (disable_msi) { 2327 itypes &= ~DDI_INTR_TYPE_MSI; 2328 } 2329 2330 /* 2331 * We won't know what firmware we're running until we call pmcs_setup, 2332 * and we can't call pmcs_setup until we establish interrupts. 2333 */ 2334 2335 pwp->int_type = PMCS_INT_NONE; 2336 2337 /* 2338 * We want PMCS_MAX_MSIX vectors for MSI-X. Anything less would be 2339 * uncivilized. 2340 */ 2341 if (itypes & DDI_INTR_TYPE_MSIX) { 2342 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSIX, PMCS_MAX_MSIX); 2343 if (pwp->int_type == PMCS_INT_MSIX) { 2344 itypes = 0; 2345 } 2346 } 2347 2348 if (itypes & DDI_INTR_TYPE_MSI) { 2349 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSI, 1); 2350 if (pwp->int_type == PMCS_INT_MSI) { 2351 itypes = 0; 2352 } 2353 } 2354 2355 if (itypes & DDI_INTR_TYPE_FIXED) { 2356 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_FIXED, 1); 2357 if (pwp->int_type == PMCS_INT_FIXED) { 2358 itypes = 0; 2359 } 2360 } 2361 2362 if (pwp->intr_cnt == 0) { 2363 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2364 "No interrupts available"); 2365 return (EAGAIN); 2366 } 2367 2368 iv_table_size = sizeof (ddi_intr_handler_t *) * pwp->intr_cnt; 2369 iv_table = kmem_alloc(iv_table_size, KM_SLEEP); 2370 2371 /* 2372 * Get iblock cookie and add handlers. 2373 */ 2374 switch (pwp->intr_cnt) { 2375 case 1: 2376 iv_table[0] = pmcs_all_intr; 2377 break; 2378 case 2: 2379 iv_table[0] = pmcs_iodone_ix; 2380 iv_table[1] = pmcs_nonio_ix; 2381 break; 2382 case 4: 2383 iv_table[PMCS_MSIX_GENERAL] = pmcs_general_ix; 2384 iv_table[PMCS_MSIX_IODONE] = pmcs_iodone_ix; 2385 iv_table[PMCS_MSIX_EVENTS] = pmcs_event_ix; 2386 iv_table[PMCS_MSIX_FATAL] = pmcs_fatal_ix; 2387 break; 2388 default: 2389 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2390 "%s: intr_cnt = %d - unexpected", __func__, pwp->intr_cnt); 2391 kmem_free(iv_table, iv_table_size); 2392 return (EAGAIN); 2393 } 2394 2395 for (i = 0; i < pwp->intr_cnt; i++) { 2396 r = ddi_intr_add_handler(pwp->ih_table[i], iv_table[i], 2397 (caddr_t)pwp, NULL); 2398 if (r != DDI_SUCCESS) { 2399 kmem_free(iv_table, iv_table_size); 2400 if (pmcs_remove_ihandlers(pwp, i)) { 2401 return (EIO); 2402 } 2403 if (pmcs_free_intrs(pwp, i)) { 2404 return (EIO); 2405 } 2406 pwp->intr_cnt = 0; 2407 return (EAGAIN); 2408 } 2409 } 2410 2411 kmem_free(iv_table, iv_table_size); 2412 2413 if (ddi_intr_get_cap(pwp->ih_table[0], &pwp->intr_cap) != DDI_SUCCESS) { 2414 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2415 "unable to get int capabilities"); 2416 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2417 return (EIO); 2418 } 2419 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2420 return (EIO); 2421 } 2422 pwp->intr_cnt = 0; 2423 return (EAGAIN); 2424 } 2425 2426 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2427 r = ddi_intr_block_enable(&pwp->ih_table[0], pwp->intr_cnt); 2428 if (r != DDI_SUCCESS) { 2429 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2430 "intr blk enable failed"); 2431 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2432 return (EIO); 2433 } 2434 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2435 return (EIO); 2436 } 2437 pwp->intr_cnt = 0; 2438 return (EFAULT); 2439 } 2440 } else { 2441 for (i = 0; i < pwp->intr_cnt; i++) { 2442 r = ddi_intr_enable(pwp->ih_table[i]); 2443 if (r == DDI_SUCCESS) { 2444 continue; 2445 } 2446 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2447 "unable to enable interrupt %d", i); 2448 if (pmcs_disable_intrs(pwp, i)) { 2449 return (EIO); 2450 } 2451 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2452 return (EIO); 2453 } 2454 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2455 return (EIO); 2456 } 2457 pwp->intr_cnt = 0; 2458 return (EAGAIN); 2459 } 2460 } 2461 2462 /* 2463 * Set up locks. 2464 */ 2465 if (ddi_intr_get_pri(pwp->ih_table[0], &pri) != DDI_SUCCESS) { 2466 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2467 "unable to get interrupt priority"); 2468 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2469 return (EIO); 2470 } 2471 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2472 return (EIO); 2473 } 2474 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2475 return (EIO); 2476 } 2477 pwp->intr_cnt = 0; 2478 return (EAGAIN); 2479 } 2480 2481 pwp->locks_initted = 1; 2482 pwp->intr_pri = pri; 2483 mutex_init(&pwp->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2484 mutex_init(&pwp->dma_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2485 mutex_init(&pwp->axil_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2486 mutex_init(&pwp->cq_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2487 mutex_init(&pwp->ict_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2488 mutex_init(&pwp->config_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2489 mutex_init(&pwp->wfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2490 mutex_init(&pwp->pfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2491 mutex_init(&pwp->dead_phylist_lock, NULL, MUTEX_DRIVER, 2492 DDI_INTR_PRI(pri)); 2493 #ifdef DEBUG 2494 mutex_init(&pwp->dbglock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2495 #endif 2496 cv_init(&pwp->ict_cv, NULL, CV_DRIVER, NULL); 2497 cv_init(&pwp->drain_cv, NULL, CV_DRIVER, NULL); 2498 for (i = 0; i < PMCS_NIQ; i++) { 2499 mutex_init(&pwp->iqp_lock[i], NULL, 2500 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2501 } 2502 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 2503 mutex_init(&pwp->cq_info.cq_thr_info[i].cq_thr_lock, NULL, 2504 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2505 cv_init(&pwp->cq_info.cq_thr_info[i].cq_cv, NULL, 2506 CV_DRIVER, NULL); 2507 } 2508 2509 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%d %s interrup%s configured", 2510 pwp->intr_cnt, (pwp->int_type == PMCS_INT_MSIX)? "MSI-X" : 2511 ((pwp->int_type == PMCS_INT_MSI)? "MSI" : "INT-X"), 2512 pwp->intr_cnt == 1? "t" : "ts"); 2513 2514 2515 /* 2516 * Enable Interrupts 2517 */ 2518 if (pwp->intr_cnt > PMCS_NOQ) { 2519 oqv_count = pwp->intr_cnt; 2520 } else { 2521 oqv_count = PMCS_NOQ; 2522 } 2523 for (pri = 0xffffffff, i = 0; i < oqv_count; i++) { 2524 pri ^= (1 << i); 2525 } 2526 2527 mutex_enter(&pwp->lock); 2528 pwp->intr_mask = pri; 2529 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask); 2530 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 2531 mutex_exit(&pwp->lock); 2532 2533 return (0); 2534 } 2535 2536 static int 2537 pmcs_teardown_intr(pmcs_hw_t *pwp) 2538 { 2539 if (pwp->intr_cnt) { 2540 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2541 return (EIO); 2542 } 2543 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2544 return (EIO); 2545 } 2546 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2547 return (EIO); 2548 } 2549 pwp->intr_cnt = 0; 2550 } 2551 return (0); 2552 } 2553 2554 static uint_t 2555 pmcs_general_ix(caddr_t arg1, caddr_t arg2) 2556 { 2557 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2558 _NOTE(ARGUNUSED(arg2)); 2559 pmcs_general_intr(pwp); 2560 return (DDI_INTR_CLAIMED); 2561 } 2562 2563 static uint_t 2564 pmcs_event_ix(caddr_t arg1, caddr_t arg2) 2565 { 2566 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2567 _NOTE(ARGUNUSED(arg2)); 2568 pmcs_event_intr(pwp); 2569 return (DDI_INTR_CLAIMED); 2570 } 2571 2572 static uint_t 2573 pmcs_iodone_ix(caddr_t arg1, caddr_t arg2) 2574 { 2575 _NOTE(ARGUNUSED(arg2)); 2576 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2577 2578 /* 2579 * It's possible that if we just turned interrupt coalescing off 2580 * (and thus, re-enabled auto clear for interrupts on the I/O outbound 2581 * queue) that there was an interrupt already pending. We use 2582 * io_intr_coal.int_cleared to ensure that we still drop in here and 2583 * clear the appropriate interrupt bit one last time. 2584 */ 2585 mutex_enter(&pwp->ict_lock); 2586 if (pwp->io_intr_coal.timer_on || 2587 (pwp->io_intr_coal.int_cleared == B_FALSE)) { 2588 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2589 (1 << PMCS_OQ_IODONE)); 2590 pwp->io_intr_coal.int_cleared = B_TRUE; 2591 } 2592 mutex_exit(&pwp->ict_lock); 2593 2594 pmcs_iodone_intr(pwp); 2595 2596 return (DDI_INTR_CLAIMED); 2597 } 2598 2599 static uint_t 2600 pmcs_fatal_ix(caddr_t arg1, caddr_t arg2) 2601 { 2602 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2603 _NOTE(ARGUNUSED(arg2)); 2604 pmcs_fatal_handler(pwp); 2605 return (DDI_INTR_CLAIMED); 2606 } 2607 2608 static uint_t 2609 pmcs_nonio_ix(caddr_t arg1, caddr_t arg2) 2610 { 2611 _NOTE(ARGUNUSED(arg2)); 2612 pmcs_hw_t *pwp = (void *)arg1; 2613 uint32_t obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2614 2615 /* 2616 * Check for Fatal Interrupts 2617 */ 2618 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2619 pmcs_fatal_handler(pwp); 2620 return (DDI_INTR_CLAIMED); 2621 } 2622 2623 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2624 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2625 (1 << PMCS_OQ_GENERAL)); 2626 pmcs_general_intr(pwp); 2627 pmcs_event_intr(pwp); 2628 } 2629 2630 return (DDI_INTR_CLAIMED); 2631 } 2632 2633 static uint_t 2634 pmcs_all_intr(caddr_t arg1, caddr_t arg2) 2635 { 2636 _NOTE(ARGUNUSED(arg2)); 2637 pmcs_hw_t *pwp = (void *) arg1; 2638 uint32_t obdb; 2639 int handled = 0; 2640 2641 obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2642 2643 /* 2644 * Check for Fatal Interrupts 2645 */ 2646 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2647 pmcs_fatal_handler(pwp); 2648 return (DDI_INTR_CLAIMED); 2649 } 2650 2651 /* 2652 * Check for Outbound Queue service needed 2653 */ 2654 if (obdb & (1 << PMCS_OQ_IODONE)) { 2655 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2656 (1 << PMCS_OQ_IODONE)); 2657 obdb ^= (1 << PMCS_OQ_IODONE); 2658 handled++; 2659 pmcs_iodone_intr(pwp); 2660 } 2661 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2662 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2663 (1 << PMCS_OQ_GENERAL)); 2664 obdb ^= (1 << PMCS_OQ_GENERAL); 2665 handled++; 2666 pmcs_general_intr(pwp); 2667 } 2668 if (obdb & (1 << PMCS_OQ_EVENTS)) { 2669 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2670 (1 << PMCS_OQ_EVENTS)); 2671 obdb ^= (1 << PMCS_OQ_EVENTS); 2672 handled++; 2673 pmcs_event_intr(pwp); 2674 } 2675 if (obdb) { 2676 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2677 "interrupt bits not handled (0x%x)", obdb); 2678 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, obdb); 2679 handled++; 2680 } 2681 if (pwp->int_type == PMCS_INT_MSI) { 2682 handled++; 2683 } 2684 return (handled? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 2685 } 2686 2687 void 2688 pmcs_fatal_handler(pmcs_hw_t *pwp) 2689 { 2690 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, "Fatal Interrupt caught"); 2691 2692 mutex_enter(&pwp->lock); 2693 pwp->state = STATE_DEAD; 2694 2695 /* 2696 * Attempt a hot reset. In case of failure, pmcs_hot_reset() will 2697 * handle the failure and issue any required FM notifications. 2698 * See pmcs_subr.c for more details. 2699 */ 2700 if (pmcs_hot_reset(pwp)) { 2701 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2702 "%s: hot reset failure", __func__); 2703 } else { 2704 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2705 "%s: hot reset complete", __func__); 2706 pwp->last_reset_reason = PMCS_LAST_RST_FATAL_ERROR; 2707 } 2708 mutex_exit(&pwp->lock); 2709 } 2710 2711 /* 2712 * Called with PHY lock and target statlock held and scratch acquired. 2713 */ 2714 boolean_t 2715 pmcs_assign_device(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt) 2716 { 2717 pmcs_phy_t *pptr = tgt->phy; 2718 2719 switch (pptr->dtype) { 2720 case SAS: 2721 case EXPANDER: 2722 break; 2723 case SATA: 2724 tgt->ca = 1; 2725 break; 2726 default: 2727 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2728 "%s: Target %p has PHY %p with invalid dtype", 2729 __func__, (void *)tgt, (void *)pptr); 2730 return (B_FALSE); 2731 } 2732 2733 tgt->new = 1; 2734 tgt->dev_gone = 0; 2735 tgt->recover_wait = 0; 2736 2737 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2738 "%s: config %s vtgt %u for " SAS_ADDR_FMT, __func__, 2739 pptr->path, tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2740 2741 if (pmcs_add_new_device(pwp, tgt) != B_TRUE) { 2742 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2743 "%s: Failed for vtgt %u / WWN " SAS_ADDR_FMT, __func__, 2744 tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2745 mutex_destroy(&tgt->statlock); 2746 mutex_destroy(&tgt->wqlock); 2747 mutex_destroy(&tgt->aqlock); 2748 return (B_FALSE); 2749 } 2750 2751 return (B_TRUE); 2752 } 2753 2754 /* 2755 * Called with softstate lock held 2756 */ 2757 void 2758 pmcs_remove_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2759 { 2760 pmcs_xscsi_t *xp; 2761 unsigned int vtgt; 2762 2763 ASSERT(mutex_owned(&pwp->lock)); 2764 2765 for (vtgt = 0; vtgt < pwp->max_dev; vtgt++) { 2766 xp = pwp->targets[vtgt]; 2767 if (xp == NULL) { 2768 continue; 2769 } 2770 2771 mutex_enter(&xp->statlock); 2772 if (xp->phy == pptr) { 2773 if (xp->new) { 2774 xp->new = 0; 2775 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2776 "cancel config of vtgt %u", vtgt); 2777 } else { 2778 pmcs_clear_xp(pwp, xp); 2779 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2780 "Removed tgt 0x%p vtgt %u", 2781 (void *)xp, vtgt); 2782 } 2783 mutex_exit(&xp->statlock); 2784 break; 2785 } 2786 mutex_exit(&xp->statlock); 2787 } 2788 } 2789 2790 void 2791 pmcs_prt_impl(pmcs_hw_t *pwp, pmcs_prt_level_t level, 2792 pmcs_phy_t *phyp, pmcs_xscsi_t *target, const char *fmt, ...) 2793 { 2794 va_list ap; 2795 int written = 0; 2796 char *ptr; 2797 uint32_t elem_size = PMCS_TBUF_ELEM_SIZE - 1; 2798 boolean_t system_log; 2799 int system_log_level; 2800 2801 switch (level) { 2802 case PMCS_PRT_DEBUG_DEVEL: 2803 case PMCS_PRT_DEBUG_DEV_STATE: 2804 case PMCS_PRT_DEBUG_PHY_LOCKING: 2805 case PMCS_PRT_DEBUG_SCSI_STATUS: 2806 case PMCS_PRT_DEBUG_UNDERFLOW: 2807 case PMCS_PRT_DEBUG_CONFIG: 2808 case PMCS_PRT_DEBUG_IPORT: 2809 case PMCS_PRT_DEBUG_MAP: 2810 case PMCS_PRT_DEBUG3: 2811 case PMCS_PRT_DEBUG2: 2812 case PMCS_PRT_DEBUG1: 2813 case PMCS_PRT_DEBUG: 2814 system_log = B_FALSE; 2815 break; 2816 case PMCS_PRT_INFO: 2817 system_log = B_TRUE; 2818 system_log_level = CE_CONT; 2819 break; 2820 case PMCS_PRT_WARN: 2821 system_log = B_TRUE; 2822 system_log_level = CE_NOTE; 2823 break; 2824 case PMCS_PRT_ERR: 2825 system_log = B_TRUE; 2826 system_log_level = CE_WARN; 2827 break; 2828 default: 2829 return; 2830 } 2831 2832 mutex_enter(&pmcs_trace_lock); 2833 gethrestime(&pmcs_tbuf_ptr->timestamp); 2834 ptr = pmcs_tbuf_ptr->buf; 2835 2836 /* 2837 * Store the pertinent PHY and target information if there is any 2838 */ 2839 if (target == NULL) { 2840 pmcs_tbuf_ptr->target_num = PMCS_INVALID_TARGET_NUM; 2841 pmcs_tbuf_ptr->target_ua[0] = '\0'; 2842 } else { 2843 pmcs_tbuf_ptr->target_num = target->target_num; 2844 (void) strncpy(pmcs_tbuf_ptr->target_ua, target->ua, 2845 PMCS_TBUF_UA_MAX_SIZE); 2846 } 2847 2848 if (phyp == NULL) { 2849 (void) memset(pmcs_tbuf_ptr->phy_sas_address, 0, 8); 2850 pmcs_tbuf_ptr->phy_path[0] = '\0'; 2851 pmcs_tbuf_ptr->phy_dtype = NOTHING; 2852 } else { 2853 (void) memcpy(pmcs_tbuf_ptr->phy_sas_address, 2854 phyp->sas_address, 8); 2855 (void) strncpy(pmcs_tbuf_ptr->phy_path, phyp->path, 32); 2856 pmcs_tbuf_ptr->phy_dtype = phyp->dtype; 2857 } 2858 2859 written += snprintf(ptr, elem_size, "pmcs%d:%d: ", 2860 ddi_get_instance(pwp->dip), level); 2861 ptr += strlen(ptr); 2862 va_start(ap, fmt); 2863 written += vsnprintf(ptr, elem_size - written, fmt, ap); 2864 va_end(ap); 2865 if (written > elem_size - 1) { 2866 /* Indicate truncation */ 2867 pmcs_tbuf_ptr->buf[elem_size - 1] = '+'; 2868 } 2869 if (++pmcs_tbuf_idx == pmcs_tbuf_num_elems) { 2870 pmcs_tbuf_ptr = pmcs_tbuf; 2871 pmcs_tbuf_wrap = B_TRUE; 2872 pmcs_tbuf_idx = 0; 2873 } else { 2874 ++pmcs_tbuf_ptr; 2875 } 2876 mutex_exit(&pmcs_trace_lock); 2877 2878 /* 2879 * When pmcs_force_syslog in non-zero, everything goes also 2880 * to syslog, at CE_CONT level. 2881 */ 2882 if (pmcs_force_syslog) { 2883 system_log = B_TRUE; 2884 system_log_level = CE_CONT; 2885 } 2886 2887 /* 2888 * Anything that comes in with PMCS_PRT_INFO, WARN, or ERR also 2889 * goes to syslog. 2890 */ 2891 if (system_log) { 2892 char local[196]; 2893 2894 switch (system_log_level) { 2895 case CE_CONT: 2896 (void) snprintf(local, sizeof (local), "%sINFO: ", 2897 pmcs_console ? "" : "?"); 2898 break; 2899 case CE_NOTE: 2900 case CE_WARN: 2901 local[0] = 0; 2902 break; 2903 default: 2904 return; 2905 } 2906 2907 ptr = local; 2908 ptr += strlen(local); 2909 (void) snprintf(ptr, (sizeof (local)) - 2910 ((size_t)ptr - (size_t)local), "pmcs%d: ", 2911 ddi_get_instance(pwp->dip)); 2912 ptr += strlen(ptr); 2913 va_start(ap, fmt); 2914 (void) vsnprintf(ptr, 2915 (sizeof (local)) - ((size_t)ptr - (size_t)local), fmt, ap); 2916 va_end(ap); 2917 if (level == CE_CONT) { 2918 (void) strlcat(local, "\n", sizeof (local)); 2919 } 2920 cmn_err(system_log_level, local); 2921 } 2922 2923 } 2924 2925 /* 2926 * pmcs_acquire_scratch 2927 * 2928 * If "wait" is true, the caller will wait until it can acquire the scratch. 2929 * This implies the caller needs to be in a context where spinning for an 2930 * indeterminate amount of time is acceptable. 2931 */ 2932 int 2933 pmcs_acquire_scratch(pmcs_hw_t *pwp, boolean_t wait) 2934 { 2935 int rval; 2936 2937 if (!wait) { 2938 return (atomic_swap_8(&pwp->scratch_locked, 1)); 2939 } 2940 2941 /* 2942 * Caller will wait for scratch. 2943 */ 2944 while ((rval = atomic_swap_8(&pwp->scratch_locked, 1)) != 0) { 2945 drv_usecwait(100); 2946 } 2947 2948 return (rval); 2949 } 2950 2951 void 2952 pmcs_release_scratch(pmcs_hw_t *pwp) 2953 { 2954 pwp->scratch_locked = 0; 2955 } 2956 2957 static void 2958 pmcs_create_phy_stats(pmcs_iport_t *iport) 2959 { 2960 sas_phy_stats_t *ps; 2961 pmcs_hw_t *pwp; 2962 pmcs_phy_t *phyp; 2963 int ndata; 2964 char ks_name[KSTAT_STRLEN]; 2965 2966 ASSERT(iport != NULL); 2967 pwp = iport->pwp; 2968 ASSERT(pwp != NULL); 2969 2970 mutex_enter(&iport->lock); 2971 2972 for (phyp = list_head(&iport->phys); 2973 phyp != NULL; 2974 phyp = list_next(&iport->phys, phyp)) { 2975 2976 pmcs_lock_phy(phyp); 2977 2978 if (phyp->phy_stats != NULL) { 2979 pmcs_unlock_phy(phyp); 2980 /* We've already created this kstat instance */ 2981 continue; 2982 } 2983 2984 ndata = (sizeof (sas_phy_stats_t)/sizeof (kstat_named_t)); 2985 2986 (void) snprintf(ks_name, sizeof (ks_name), 2987 "%s.%llx.%d.%d", ddi_driver_name(iport->dip), 2988 (longlong_t)pwp->sas_wwns[0], 2989 ddi_get_instance(iport->dip), phyp->phynum); 2990 2991 phyp->phy_stats = kstat_create("pmcs", 2992 ddi_get_instance(iport->dip), ks_name, KSTAT_SAS_PHY_CLASS, 2993 KSTAT_TYPE_NAMED, ndata, 0); 2994 2995 if (phyp->phy_stats == NULL) { 2996 pmcs_unlock_phy(phyp); 2997 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2998 "%s: Failed to create %s kstats", __func__, 2999 ks_name); 3000 continue; 3001 } 3002 3003 ps = (sas_phy_stats_t *)phyp->phy_stats->ks_data; 3004 3005 kstat_named_init(&ps->seconds_since_last_reset, 3006 "SecondsSinceLastReset", KSTAT_DATA_ULONGLONG); 3007 kstat_named_init(&ps->tx_frames, 3008 "TxFrames", KSTAT_DATA_ULONGLONG); 3009 kstat_named_init(&ps->rx_frames, 3010 "RxFrames", KSTAT_DATA_ULONGLONG); 3011 kstat_named_init(&ps->tx_words, 3012 "TxWords", KSTAT_DATA_ULONGLONG); 3013 kstat_named_init(&ps->rx_words, 3014 "RxWords", KSTAT_DATA_ULONGLONG); 3015 kstat_named_init(&ps->invalid_dword_count, 3016 "InvalidDwordCount", KSTAT_DATA_ULONGLONG); 3017 kstat_named_init(&ps->running_disparity_error_count, 3018 "RunningDisparityErrorCount", KSTAT_DATA_ULONGLONG); 3019 kstat_named_init(&ps->loss_of_dword_sync_count, 3020 "LossofDwordSyncCount", KSTAT_DATA_ULONGLONG); 3021 kstat_named_init(&ps->phy_reset_problem_count, 3022 "PhyResetProblemCount", KSTAT_DATA_ULONGLONG); 3023 3024 phyp->phy_stats->ks_private = phyp; 3025 phyp->phy_stats->ks_update = pmcs_update_phy_stats; 3026 kstat_install(phyp->phy_stats); 3027 pmcs_unlock_phy(phyp); 3028 } 3029 3030 mutex_exit(&iport->lock); 3031 } 3032 3033 int 3034 pmcs_update_phy_stats(kstat_t *ks, int rw) 3035 { 3036 int val, ret = DDI_FAILURE; 3037 pmcs_phy_t *pptr = (pmcs_phy_t *)ks->ks_private; 3038 pmcs_hw_t *pwp = pptr->pwp; 3039 sas_phy_stats_t *ps = ks->ks_data; 3040 3041 _NOTE(ARGUNUSED(rw)); 3042 ASSERT((pptr != NULL) && (pwp != NULL)); 3043 3044 /* 3045 * We just want to lock against other invocations of kstat; 3046 * we don't need to pmcs_lock_phy() for this. 3047 */ 3048 mutex_enter(&pptr->phy_lock); 3049 3050 /* Get Stats from Chip */ 3051 val = pmcs_get_diag_report(pwp, PMCS_INVALID_DWORD_CNT, pptr->phynum); 3052 if (val == DDI_FAILURE) 3053 goto fail; 3054 ps->invalid_dword_count.value.ull = (unsigned long long)val; 3055 3056 val = pmcs_get_diag_report(pwp, PMCS_DISPARITY_ERR_CNT, pptr->phynum); 3057 if (val == DDI_FAILURE) 3058 goto fail; 3059 ps->running_disparity_error_count.value.ull = (unsigned long long)val; 3060 3061 val = pmcs_get_diag_report(pwp, PMCS_LOST_DWORD_SYNC_CNT, pptr->phynum); 3062 if (val == DDI_FAILURE) 3063 goto fail; 3064 ps->loss_of_dword_sync_count.value.ull = (unsigned long long)val; 3065 3066 val = pmcs_get_diag_report(pwp, PMCS_RESET_FAILED_CNT, pptr->phynum); 3067 if (val == DDI_FAILURE) 3068 goto fail; 3069 ps->phy_reset_problem_count.value.ull = (unsigned long long)val; 3070 3071 ret = DDI_SUCCESS; 3072 fail: 3073 mutex_exit(&pptr->phy_lock); 3074 return (ret); 3075 } 3076 3077 static void 3078 pmcs_destroy_phy_stats(pmcs_iport_t *iport) 3079 { 3080 pmcs_phy_t *phyp; 3081 3082 ASSERT(iport != NULL); 3083 mutex_enter(&iport->lock); 3084 phyp = iport->pptr; 3085 if (phyp == NULL) { 3086 mutex_exit(&iport->lock); 3087 return; 3088 } 3089 3090 pmcs_lock_phy(phyp); 3091 if (phyp->phy_stats != NULL) { 3092 kstat_delete(phyp->phy_stats); 3093 phyp->phy_stats = NULL; 3094 } 3095 pmcs_unlock_phy(phyp); 3096 3097 mutex_exit(&iport->lock); 3098 } 3099 3100 /*ARGSUSED*/ 3101 static int 3102 pmcs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 3103 { 3104 /* 3105 * as the driver can always deal with an error in any dma or 3106 * access handle, we can just return the fme_status value. 3107 */ 3108 pci_ereport_post(dip, err, NULL); 3109 return (err->fme_status); 3110 } 3111 3112 static void 3113 pmcs_fm_init(pmcs_hw_t *pwp) 3114 { 3115 ddi_iblock_cookie_t fm_ibc; 3116 3117 /* Only register with IO Fault Services if we have some capability */ 3118 if (pwp->fm_capabilities) { 3119 /* Adjust access and dma attributes for FMA */ 3120 pwp->reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 3121 pwp->iqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3122 pwp->oqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3123 pwp->cip_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3124 pwp->fwlog_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3125 3126 /* 3127 * Register capabilities with IO Fault Services. 3128 */ 3129 ddi_fm_init(pwp->dip, &pwp->fm_capabilities, &fm_ibc); 3130 3131 /* 3132 * Initialize pci ereport capabilities if ereport 3133 * capable (should always be.) 3134 */ 3135 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 3136 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3137 pci_ereport_setup(pwp->dip); 3138 } 3139 3140 /* 3141 * Register error callback if error callback capable. 3142 */ 3143 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3144 ddi_fm_handler_register(pwp->dip, 3145 pmcs_fm_error_cb, (void *) pwp); 3146 } 3147 } 3148 } 3149 3150 static void 3151 pmcs_fm_fini(pmcs_hw_t *pwp) 3152 { 3153 /* Only unregister FMA capabilities if registered */ 3154 if (pwp->fm_capabilities) { 3155 /* 3156 * Un-register error callback if error callback capable. 3157 */ 3158 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3159 ddi_fm_handler_unregister(pwp->dip); 3160 } 3161 3162 /* 3163 * Release any resources allocated by pci_ereport_setup() 3164 */ 3165 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 3166 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3167 pci_ereport_teardown(pwp->dip); 3168 } 3169 3170 /* Unregister from IO Fault Services */ 3171 ddi_fm_fini(pwp->dip); 3172 3173 /* Adjust access and dma attributes for FMA */ 3174 pwp->reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 3175 pwp->iqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3176 pwp->oqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3177 pwp->cip_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3178 pwp->fwlog_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3179 } 3180 } 3181 3182 static boolean_t 3183 pmcs_fabricate_wwid(pmcs_hw_t *pwp) 3184 { 3185 char *cp, c; 3186 uint64_t adr; 3187 int i; 3188 3189 cp = &c; 3190 (void) ddi_strtoul(hw_serial, &cp, 10, (unsigned long *)&adr); 3191 3192 if (adr == 0) { 3193 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 3194 "%s: No serial number available to fabricate WWN", 3195 __func__); 3196 3197 adr = (uint64_t)gethrtime(); 3198 } 3199 3200 adr <<= 8; 3201 adr |= ((uint64_t)ddi_get_instance(pwp->dip) << 52); 3202 adr |= (5ULL << 60); 3203 3204 for (i = 0; i < PMCS_MAX_PORTS; i++) { 3205 pwp->sas_wwns[i] = adr + i; 3206 } 3207 3208 return (B_TRUE); 3209 } 3210