1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 #include <sys/scsi/adapters/pmcs/pmcs.h> 25 26 #define PMCS_DRIVER_VERSION "pmcs HBA device driver" 27 28 static char *pmcs_driver_rev = PMCS_DRIVER_VERSION; 29 30 /* 31 * Non-DDI Compliant stuff 32 */ 33 extern char hw_serial[]; 34 35 /* 36 * Global driver data 37 */ 38 void *pmcs_softc_state = NULL; 39 void *pmcs_iport_softstate = NULL; 40 41 /* 42 * Tracing and Logging info 43 */ 44 pmcs_tbuf_t *pmcs_tbuf = NULL; 45 uint32_t pmcs_tbuf_num_elems = 0; 46 pmcs_tbuf_t *pmcs_tbuf_ptr; 47 uint32_t pmcs_tbuf_idx = 0; 48 boolean_t pmcs_tbuf_wrap = B_FALSE; 49 static kmutex_t pmcs_trace_lock; 50 51 /* 52 * If pmcs_force_syslog value is non-zero, all messages put in the trace log 53 * will also be sent to system log. 54 */ 55 int pmcs_force_syslog = 0; 56 int pmcs_console = 0; 57 58 /* 59 * External References 60 */ 61 extern int ncpus_online; 62 63 /* 64 * Local static data 65 */ 66 static int fwlog_level = 3; 67 static int physpeed = PHY_LINK_ALL; 68 static int phymode = PHY_LM_AUTO; 69 static int block_mask = 0; 70 static int phymap_usec = 3 * MICROSEC; 71 static int iportmap_usec = 2 * MICROSEC; 72 73 #ifdef DEBUG 74 static int debug_mask = 1; 75 #else 76 static int debug_mask = 0; 77 #endif 78 79 #ifdef DISABLE_MSIX 80 static int disable_msix = 1; 81 #else 82 static int disable_msix = 0; 83 #endif 84 85 #ifdef DISABLE_MSI 86 static int disable_msi = 1; 87 #else 88 static int disable_msi = 0; 89 #endif 90 91 static uint16_t maxqdepth = 0xfffe; 92 93 /* 94 * Local prototypes 95 */ 96 static int pmcs_attach(dev_info_t *, ddi_attach_cmd_t); 97 static int pmcs_detach(dev_info_t *, ddi_detach_cmd_t); 98 static int pmcs_unattach(pmcs_hw_t *); 99 static int pmcs_iport_unattach(pmcs_iport_t *); 100 static int pmcs_add_more_chunks(pmcs_hw_t *, unsigned long); 101 static void pmcs_watchdog(void *); 102 static int pmcs_setup_intr(pmcs_hw_t *); 103 static int pmcs_teardown_intr(pmcs_hw_t *); 104 105 static uint_t pmcs_nonio_ix(caddr_t, caddr_t); 106 static uint_t pmcs_general_ix(caddr_t, caddr_t); 107 static uint_t pmcs_event_ix(caddr_t, caddr_t); 108 static uint_t pmcs_iodone_ix(caddr_t, caddr_t); 109 static uint_t pmcs_fatal_ix(caddr_t, caddr_t); 110 static uint_t pmcs_all_intr(caddr_t, caddr_t); 111 static int pmcs_quiesce(dev_info_t *dip); 112 static boolean_t pmcs_fabricate_wwid(pmcs_hw_t *); 113 114 static void pmcs_create_all_phy_stats(pmcs_iport_t *); 115 int pmcs_update_phy_stats(kstat_t *, int); 116 static void pmcs_destroy_phy_stats(pmcs_iport_t *); 117 118 static void pmcs_fm_fini(pmcs_hw_t *pwp); 119 static void pmcs_fm_init(pmcs_hw_t *pwp); 120 static int pmcs_fm_error_cb(dev_info_t *dip, 121 ddi_fm_error_t *err, const void *impl_data); 122 123 /* 124 * Local configuration data 125 */ 126 static struct dev_ops pmcs_ops = { 127 DEVO_REV, /* devo_rev, */ 128 0, /* refcnt */ 129 ddi_no_info, /* info */ 130 nulldev, /* identify */ 131 nulldev, /* probe */ 132 pmcs_attach, /* attach */ 133 pmcs_detach, /* detach */ 134 nodev, /* reset */ 135 NULL, /* driver operations */ 136 NULL, /* bus operations */ 137 ddi_power, /* power management */ 138 pmcs_quiesce /* quiesce */ 139 }; 140 141 static struct modldrv modldrv = { 142 &mod_driverops, 143 PMCS_DRIVER_VERSION, 144 &pmcs_ops, /* driver ops */ 145 }; 146 static struct modlinkage modlinkage = { 147 MODREV_1, &modldrv, NULL 148 }; 149 150 const ddi_dma_attr_t pmcs_dattr = { 151 DMA_ATTR_V0, /* dma_attr version */ 152 0x0000000000000000ull, /* dma_attr_addr_lo */ 153 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 154 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 155 0x0000000000000001ull, /* dma_attr_align */ 156 0x00000078, /* dma_attr_burstsizes */ 157 0x00000001, /* dma_attr_minxfer */ 158 0x00000000FFFFFFFFull, /* dma_attr_maxxfer */ 159 0x00000000FFFFFFFFull, /* dma_attr_seg */ 160 1, /* dma_attr_sgllen */ 161 512, /* dma_attr_granular */ 162 0 /* dma_attr_flags */ 163 }; 164 165 static ddi_device_acc_attr_t rattr = { 166 DDI_DEVICE_ATTR_V1, 167 DDI_STRUCTURE_LE_ACC, 168 DDI_STRICTORDER_ACC, 169 DDI_DEFAULT_ACC 170 }; 171 172 173 /* 174 * Attach/Detach functions 175 */ 176 177 int 178 _init(void) 179 { 180 int ret; 181 182 ret = ddi_soft_state_init(&pmcs_softc_state, sizeof (pmcs_hw_t), 1); 183 if (ret != 0) { 184 cmn_err(CE_WARN, "?soft state init failed for pmcs"); 185 return (ret); 186 } 187 188 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 189 cmn_err(CE_WARN, "?scsi_hba_init failed for pmcs"); 190 ddi_soft_state_fini(&pmcs_softc_state); 191 return (ret); 192 } 193 194 /* 195 * Allocate soft state for iports 196 */ 197 ret = ddi_soft_state_init(&pmcs_iport_softstate, 198 sizeof (pmcs_iport_t), 2); 199 if (ret != 0) { 200 cmn_err(CE_WARN, "?iport soft state init failed for pmcs"); 201 ddi_soft_state_fini(&pmcs_softc_state); 202 return (ret); 203 } 204 205 ret = mod_install(&modlinkage); 206 if (ret != 0) { 207 cmn_err(CE_WARN, "?mod_install failed for pmcs (%d)", ret); 208 scsi_hba_fini(&modlinkage); 209 ddi_soft_state_fini(&pmcs_iport_softstate); 210 ddi_soft_state_fini(&pmcs_softc_state); 211 return (ret); 212 } 213 214 /* Initialize the global trace lock */ 215 mutex_init(&pmcs_trace_lock, NULL, MUTEX_DRIVER, NULL); 216 217 return (0); 218 } 219 220 int 221 _fini(void) 222 { 223 int ret; 224 if ((ret = mod_remove(&modlinkage)) != 0) { 225 return (ret); 226 } 227 scsi_hba_fini(&modlinkage); 228 229 /* Free pmcs log buffer and destroy the global lock */ 230 if (pmcs_tbuf) { 231 kmem_free(pmcs_tbuf, 232 pmcs_tbuf_num_elems * sizeof (pmcs_tbuf_t)); 233 pmcs_tbuf = NULL; 234 } 235 mutex_destroy(&pmcs_trace_lock); 236 237 ddi_soft_state_fini(&pmcs_iport_softstate); 238 ddi_soft_state_fini(&pmcs_softc_state); 239 return (0); 240 } 241 242 int 243 _info(struct modinfo *modinfop) 244 { 245 return (mod_info(&modlinkage, modinfop)); 246 } 247 248 static int 249 pmcs_iport_attach(dev_info_t *dip) 250 { 251 pmcs_iport_t *iport; 252 pmcs_hw_t *pwp; 253 scsi_hba_tran_t *tran; 254 void *ua_priv = NULL; 255 char *iport_ua; 256 char *init_port; 257 int hba_inst; 258 int inst; 259 260 hba_inst = ddi_get_instance(ddi_get_parent(dip)); 261 inst = ddi_get_instance(dip); 262 263 pwp = ddi_get_soft_state(pmcs_softc_state, hba_inst); 264 if (pwp == NULL) { 265 cmn_err(CE_WARN, "%s: No HBA softstate for instance %d", 266 __func__, inst); 267 return (DDI_FAILURE); 268 } 269 270 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) { 271 return (DDI_FAILURE); 272 } 273 274 if ((iport_ua = scsi_hba_iport_unit_address(dip)) == NULL) { 275 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 276 "%s: invoked with NULL unit address, inst (%d)", 277 __func__, inst); 278 return (DDI_FAILURE); 279 } 280 281 if (ddi_soft_state_zalloc(pmcs_iport_softstate, inst) != DDI_SUCCESS) { 282 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 283 "Failed to alloc soft state for iport %d", inst); 284 return (DDI_FAILURE); 285 } 286 287 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 288 if (iport == NULL) { 289 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 290 "cannot get iport soft state"); 291 goto iport_attach_fail1; 292 } 293 294 mutex_init(&iport->lock, NULL, MUTEX_DRIVER, 295 DDI_INTR_PRI(pwp->intr_pri)); 296 cv_init(&iport->refcnt_cv, NULL, CV_DEFAULT, NULL); 297 cv_init(&iport->smp_cv, NULL, CV_DEFAULT, NULL); 298 mutex_init(&iport->refcnt_lock, NULL, MUTEX_DRIVER, 299 DDI_INTR_PRI(pwp->intr_pri)); 300 mutex_init(&iport->smp_lock, NULL, MUTEX_DRIVER, 301 DDI_INTR_PRI(pwp->intr_pri)); 302 303 /* Set some data on the iport handle */ 304 iport->dip = dip; 305 iport->pwp = pwp; 306 307 /* Dup the UA into the iport handle */ 308 iport->ua = strdup(iport_ua); 309 310 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 311 tran->tran_hba_private = iport; 312 313 list_create(&iport->phys, sizeof (pmcs_phy_t), 314 offsetof(pmcs_phy_t, list_node)); 315 316 /* 317 * If our unit address is active in the phymap, configure our 318 * iport's phylist. 319 */ 320 mutex_enter(&iport->lock); 321 ua_priv = sas_phymap_lookup_uapriv(pwp->hss_phymap, iport->ua); 322 if (ua_priv) { 323 /* Non-NULL private data indicates the unit address is active */ 324 iport->ua_state = UA_ACTIVE; 325 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) { 326 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 327 "%s: failed to " 328 "configure phys on iport handle (0x%p), " 329 " unit address [%s]", __func__, 330 (void *)iport, iport_ua); 331 mutex_exit(&iport->lock); 332 goto iport_attach_fail2; 333 } 334 } else { 335 iport->ua_state = UA_INACTIVE; 336 } 337 mutex_exit(&iport->lock); 338 339 /* Allocate string-based soft state pool for targets */ 340 iport->tgt_sstate = NULL; 341 if (ddi_soft_state_bystr_init(&iport->tgt_sstate, 342 sizeof (pmcs_xscsi_t), PMCS_TGT_SSTATE_SZ) != 0) { 343 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 344 "cannot get iport tgt soft state"); 345 goto iport_attach_fail2; 346 } 347 348 /* Create this iport's target map */ 349 if (pmcs_iport_tgtmap_create(iport) == B_FALSE) { 350 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 351 "Failed to create tgtmap on iport %d", inst); 352 goto iport_attach_fail3; 353 } 354 355 /* Set up the 'initiator-port' DDI property on this iport */ 356 init_port = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP); 357 if (pwp->separate_ports) { 358 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 359 "%s: separate ports not supported", __func__); 360 } else { 361 /* Set initiator-port value to the HBA's base WWN */ 362 (void) scsi_wwn_to_wwnstr(pwp->sas_wwns[0], 1, 363 init_port); 364 } 365 366 mutex_enter(&iport->lock); 367 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_STRING, 368 SCSI_ADDR_PROP_INITIATOR_PORT, init_port); 369 kmem_free(init_port, PMCS_MAX_UA_SIZE); 370 371 /* Set up a 'num-phys' DDI property for the iport node */ 372 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 373 &iport->nphy); 374 mutex_exit(&iport->lock); 375 376 /* Create kstats for each of the phys in this port */ 377 pmcs_create_all_phy_stats(iport); 378 379 /* 380 * Insert this iport handle into our list and set 381 * iports_attached on the HBA node. 382 */ 383 rw_enter(&pwp->iports_lock, RW_WRITER); 384 ASSERT(!list_link_active(&iport->list_node)); 385 list_insert_tail(&pwp->iports, iport); 386 pwp->iports_attached = 1; 387 pwp->num_iports++; 388 rw_exit(&pwp->iports_lock); 389 390 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 391 "iport%d attached", inst); 392 ddi_report_dev(dip); 393 return (DDI_SUCCESS); 394 395 /* teardown and fail */ 396 iport_attach_fail3: 397 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 398 iport_attach_fail2: 399 list_destroy(&iport->phys); 400 strfree(iport->ua); 401 mutex_destroy(&iport->refcnt_lock); 402 mutex_destroy(&iport->smp_lock); 403 cv_destroy(&iport->refcnt_cv); 404 cv_destroy(&iport->smp_cv); 405 mutex_destroy(&iport->lock); 406 iport_attach_fail1: 407 ddi_soft_state_free(pmcs_iport_softstate, inst); 408 return (DDI_FAILURE); 409 } 410 411 static int 412 pmcs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 413 { 414 scsi_hba_tran_t *tran; 415 char chiprev, *fwsupport, hw_rev[24], fw_rev[24]; 416 off_t set3size; 417 int inst, i; 418 int sm_hba = 1; 419 int protocol = 0; 420 int num_phys = 0; 421 pmcs_hw_t *pwp; 422 pmcs_phy_t *phyp; 423 uint32_t num_threads; 424 char buf[64]; 425 char *fwl_file; 426 427 switch (cmd) { 428 case DDI_ATTACH: 429 break; 430 431 case DDI_PM_RESUME: 432 case DDI_RESUME: 433 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 434 if (!tran) { 435 return (DDI_FAILURE); 436 } 437 /* No DDI_?_RESUME on iport nodes */ 438 if (scsi_hba_iport_unit_address(dip) != NULL) { 439 return (DDI_SUCCESS); 440 } 441 pwp = TRAN2PMC(tran); 442 if (pwp == NULL) { 443 return (DDI_FAILURE); 444 } 445 446 mutex_enter(&pwp->lock); 447 pwp->suspended = 0; 448 if (pwp->tq) { 449 ddi_taskq_resume(pwp->tq); 450 } 451 mutex_exit(&pwp->lock); 452 return (DDI_SUCCESS); 453 454 default: 455 return (DDI_FAILURE); 456 } 457 458 /* 459 * If this is an iport node, invoke iport attach. 460 */ 461 if (scsi_hba_iport_unit_address(dip) != NULL) { 462 return (pmcs_iport_attach(dip)); 463 } 464 465 /* 466 * From here on is attach for the HBA node 467 */ 468 469 #ifdef DEBUG 470 /* 471 * Check to see if this unit is to be disabled. We can't disable 472 * on a per-iport node. It's either the entire HBA or nothing. 473 */ 474 (void) snprintf(buf, sizeof (buf), 475 "disable-instance-%d", ddi_get_instance(dip)); 476 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 477 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, buf, 0)) { 478 cmn_err(CE_NOTE, "pmcs%d: disabled by configuration", 479 ddi_get_instance(dip)); 480 return (DDI_FAILURE); 481 } 482 #endif 483 484 /* 485 * Allocate softstate 486 */ 487 inst = ddi_get_instance(dip); 488 if (ddi_soft_state_zalloc(pmcs_softc_state, inst) != DDI_SUCCESS) { 489 cmn_err(CE_WARN, "pmcs%d: Failed to alloc soft state", inst); 490 return (DDI_FAILURE); 491 } 492 493 pwp = ddi_get_soft_state(pmcs_softc_state, inst); 494 if (pwp == NULL) { 495 cmn_err(CE_WARN, "pmcs%d: cannot get soft state", inst); 496 ddi_soft_state_free(pmcs_softc_state, inst); 497 return (DDI_FAILURE); 498 } 499 pwp->dip = dip; 500 STAILQ_INIT(&pwp->dq); 501 STAILQ_INIT(&pwp->cq); 502 STAILQ_INIT(&pwp->wf); 503 STAILQ_INIT(&pwp->pf); 504 505 /* 506 * Create the list for iports and init its lock. 507 */ 508 list_create(&pwp->iports, sizeof (pmcs_iport_t), 509 offsetof(pmcs_iport_t, list_node)); 510 rw_init(&pwp->iports_lock, NULL, RW_DRIVER, NULL); 511 512 pwp->state = STATE_PROBING; 513 514 /* 515 * Get driver.conf properties 516 */ 517 pwp->debug_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 518 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-debug-mask", 519 debug_mask); 520 pwp->phyid_block_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 521 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phyid-block-mask", 522 block_mask); 523 pwp->physpeed = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 524 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-physpeed", physpeed); 525 pwp->phymode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 526 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phymode", phymode); 527 pwp->fwlog = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 528 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fwlog", fwlog_level); 529 if (pwp->fwlog > PMCS_FWLOG_MAX) { 530 pwp->fwlog = PMCS_FWLOG_MAX; 531 } 532 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, "pmcs-fwlogfile", 533 &fwl_file) == DDI_SUCCESS)) { 534 if (snprintf(pwp->fwlogfile_aap1, MAXPATHLEN, "%s%d-aap1.0", 535 fwl_file, ddi_get_instance(dip)) > MAXPATHLEN) { 536 pwp->fwlogfile_aap1[0] = '\0'; 537 pwp->fwlogfile_iop[0] = '\0'; 538 } else if (snprintf(pwp->fwlogfile_iop, MAXPATHLEN, 539 "%s%d-iop.0", fwl_file, 540 ddi_get_instance(dip)) > MAXPATHLEN) { 541 pwp->fwlogfile_aap1[0] = '\0'; 542 pwp->fwlogfile_iop[0] = '\0'; 543 } 544 ddi_prop_free(fwl_file); 545 } else { 546 pwp->fwlogfile_aap1[0] = '\0'; 547 pwp->fwlogfile_iop[0] = '\0'; 548 } 549 550 pwp->open_retry_interval = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 551 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-open-retry-interval", 552 OPEN_RETRY_INTERVAL_DEF); 553 if (pwp->open_retry_interval > OPEN_RETRY_INTERVAL_MAX) { 554 pwp->open_retry_interval = OPEN_RETRY_INTERVAL_MAX; 555 } 556 557 mutex_enter(&pmcs_trace_lock); 558 if (pmcs_tbuf == NULL) { 559 /* Allocate trace buffer */ 560 pmcs_tbuf_num_elems = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 561 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-tbuf-num-elems", 562 PMCS_TBUF_NUM_ELEMS_DEF); 563 if ((pmcs_tbuf_num_elems == DDI_PROP_NOT_FOUND) || 564 (pmcs_tbuf_num_elems == 0)) { 565 pmcs_tbuf_num_elems = PMCS_TBUF_NUM_ELEMS_DEF; 566 } 567 568 pmcs_tbuf = kmem_zalloc(pmcs_tbuf_num_elems * 569 sizeof (pmcs_tbuf_t), KM_SLEEP); 570 pmcs_tbuf_ptr = pmcs_tbuf; 571 pmcs_tbuf_idx = 0; 572 } 573 mutex_exit(&pmcs_trace_lock); 574 575 if (pwp->fwlog && strlen(pwp->fwlogfile_aap1) > 0) { 576 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 577 "%s: firmware event log files: %s, %s", __func__, 578 pwp->fwlogfile_aap1, pwp->fwlogfile_iop); 579 pwp->fwlog_file = 1; 580 } else { 581 if (pwp->fwlog == 0) { 582 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 583 "%s: No firmware event log will be written " 584 "(event log disabled)", __func__); 585 } else { 586 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 587 "%s: No firmware event log will be written " 588 "(no filename configured - too long?)", __func__); 589 } 590 pwp->fwlog_file = 0; 591 } 592 593 disable_msix = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 594 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msix", 595 disable_msix); 596 disable_msi = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 597 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msi", 598 disable_msi); 599 maxqdepth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 600 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-maxqdepth", maxqdepth); 601 pwp->fw_force_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 602 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fw-force-update", 0); 603 if (pwp->fw_force_update == 0) { 604 pwp->fw_disable_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 605 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 606 "pmcs-fw-disable-update", 0); 607 } 608 pwp->ioq_depth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 609 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-num-io-qentries", 610 PMCS_NQENTRY); 611 612 /* 613 * Initialize FMA 614 */ 615 pwp->dev_acc_attr = pwp->reg_acc_attr = rattr; 616 pwp->iqp_dma_attr = pwp->oqp_dma_attr = 617 pwp->regdump_dma_attr = pwp->cip_dma_attr = 618 pwp->fwlog_dma_attr = pmcs_dattr; 619 pwp->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, pwp->dip, 620 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "fm-capable", 621 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 622 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 623 pmcs_fm_init(pwp); 624 625 /* 626 * Map registers 627 */ 628 if (pci_config_setup(dip, &pwp->pci_acc_handle)) { 629 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 630 "pci config setup failed"); 631 ddi_soft_state_free(pmcs_softc_state, inst); 632 return (DDI_FAILURE); 633 } 634 635 /* 636 * Get the size of register set 3. 637 */ 638 if (ddi_dev_regsize(dip, PMCS_REGSET_3, &set3size) != DDI_SUCCESS) { 639 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 640 "unable to get size of register set %d", PMCS_REGSET_3); 641 pci_config_teardown(&pwp->pci_acc_handle); 642 ddi_soft_state_free(pmcs_softc_state, inst); 643 return (DDI_FAILURE); 644 } 645 646 /* 647 * Map registers 648 */ 649 pwp->reg_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 650 651 if (ddi_regs_map_setup(dip, PMCS_REGSET_0, (caddr_t *)&pwp->msg_regs, 652 0, 0, &pwp->reg_acc_attr, &pwp->msg_acc_handle)) { 653 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 654 "failed to map Message Unit registers"); 655 pci_config_teardown(&pwp->pci_acc_handle); 656 ddi_soft_state_free(pmcs_softc_state, inst); 657 return (DDI_FAILURE); 658 } 659 660 if (ddi_regs_map_setup(dip, PMCS_REGSET_1, (caddr_t *)&pwp->top_regs, 661 0, 0, &pwp->reg_acc_attr, &pwp->top_acc_handle)) { 662 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 663 "failed to map TOP registers"); 664 ddi_regs_map_free(&pwp->msg_acc_handle); 665 pci_config_teardown(&pwp->pci_acc_handle); 666 ddi_soft_state_free(pmcs_softc_state, inst); 667 return (DDI_FAILURE); 668 } 669 670 if (ddi_regs_map_setup(dip, PMCS_REGSET_2, (caddr_t *)&pwp->gsm_regs, 671 0, 0, &pwp->reg_acc_attr, &pwp->gsm_acc_handle)) { 672 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 673 "failed to map GSM registers"); 674 ddi_regs_map_free(&pwp->top_acc_handle); 675 ddi_regs_map_free(&pwp->msg_acc_handle); 676 pci_config_teardown(&pwp->pci_acc_handle); 677 ddi_soft_state_free(pmcs_softc_state, inst); 678 return (DDI_FAILURE); 679 } 680 681 if (ddi_regs_map_setup(dip, PMCS_REGSET_3, (caddr_t *)&pwp->mpi_regs, 682 0, 0, &pwp->reg_acc_attr, &pwp->mpi_acc_handle)) { 683 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 684 "failed to map MPI registers"); 685 ddi_regs_map_free(&pwp->top_acc_handle); 686 ddi_regs_map_free(&pwp->gsm_acc_handle); 687 ddi_regs_map_free(&pwp->msg_acc_handle); 688 pci_config_teardown(&pwp->pci_acc_handle); 689 ddi_soft_state_free(pmcs_softc_state, inst); 690 return (DDI_FAILURE); 691 } 692 pwp->mpibar = 693 (((5U << 2) + 0x10) << PMCS_MSGU_MPI_BAR_SHIFT) | set3size; 694 695 /* 696 * Make sure we can support this card. 697 */ 698 pwp->chiprev = pmcs_rd_topunit(pwp, PMCS_DEVICE_REVISION); 699 700 switch (pwp->chiprev) { 701 case PMCS_PM8001_REV_A: 702 case PMCS_PM8001_REV_B: 703 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 704 "Rev A/B Card no longer supported"); 705 goto failure; 706 case PMCS_PM8001_REV_C: 707 break; 708 default: 709 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 710 "Unknown chip revision (%d)", pwp->chiprev); 711 goto failure; 712 } 713 714 /* 715 * Allocate DMA addressable area for Inbound and Outbound Queue indices 716 * that the chip needs to access plus a space for scratch usage 717 */ 718 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 719 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pwp->cip_acchdls, 720 &pwp->cip_handles, ptob(1), (caddr_t *)&pwp->cip, 721 &pwp->ciaddr) == B_FALSE) { 722 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 723 "Failed to setup DMA for index/scratch"); 724 goto failure; 725 } 726 727 bzero(pwp->cip, ptob(1)); 728 pwp->scratch = &pwp->cip[PMCS_INDICES_SIZE]; 729 pwp->scratch_dma = pwp->ciaddr + PMCS_INDICES_SIZE; 730 731 /* 732 * Allocate DMA S/G list chunks 733 */ 734 (void) pmcs_add_more_chunks(pwp, ptob(1) * PMCS_MIN_CHUNK_PAGES); 735 736 /* 737 * Allocate a DMA addressable area for the firmware log (if needed) 738 */ 739 if (pwp->fwlog) { 740 /* 741 * Align to event log header and entry size 742 */ 743 pwp->fwlog_dma_attr.dma_attr_align = 32; 744 if (pmcs_dma_setup(pwp, &pwp->fwlog_dma_attr, 745 &pwp->fwlog_acchdl, 746 &pwp->fwlog_hndl, PMCS_FWLOG_SIZE, 747 (caddr_t *)&pwp->fwlogp, 748 &pwp->fwaddr) == B_FALSE) { 749 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 750 "Failed to setup DMA for fwlog area"); 751 pwp->fwlog = 0; 752 } else { 753 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE); 754 pwp->fwlogp_aap1 = (pmcs_fw_event_hdr_t *)pwp->fwlogp; 755 pwp->fwlogp_iop = (pmcs_fw_event_hdr_t *)((void *) 756 ((caddr_t)pwp->fwlogp + (PMCS_FWLOG_SIZE / 2))); 757 } 758 } 759 760 if (pwp->flash_chunk_addr == NULL) { 761 pwp->regdump_dma_attr.dma_attr_align = PMCS_FLASH_CHUNK_SIZE; 762 if (pmcs_dma_setup(pwp, &pwp->regdump_dma_attr, 763 &pwp->regdump_acchdl, 764 &pwp->regdump_hndl, PMCS_FLASH_CHUNK_SIZE, 765 (caddr_t *)&pwp->flash_chunkp, &pwp->flash_chunk_addr) == 766 B_FALSE) { 767 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 768 "Failed to setup DMA for register dump area"); 769 goto failure; 770 } 771 bzero(pwp->flash_chunkp, PMCS_FLASH_CHUNK_SIZE); 772 } 773 774 /* 775 * More bits of local initialization... 776 */ 777 pwp->tq = ddi_taskq_create(dip, "_tq", 4, TASKQ_DEFAULTPRI, 0); 778 if (pwp->tq == NULL) { 779 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 780 "unable to create worker taskq"); 781 goto failure; 782 } 783 784 /* 785 * Cache of structures for dealing with I/O completion callbacks. 786 */ 787 (void) snprintf(buf, sizeof (buf), "pmcs_iocomp_cb_cache%d", inst); 788 pwp->iocomp_cb_cache = kmem_cache_create(buf, 789 sizeof (pmcs_iocomp_cb_t), 16, NULL, NULL, NULL, NULL, NULL, 0); 790 791 /* 792 * Cache of PHY structures 793 */ 794 (void) snprintf(buf, sizeof (buf), "pmcs_phy_cache%d", inst); 795 pwp->phy_cache = kmem_cache_create(buf, sizeof (pmcs_phy_t), 8, 796 pmcs_phy_constructor, pmcs_phy_destructor, NULL, (void *)pwp, 797 NULL, 0); 798 799 /* 800 * Allocate space for the I/O completion threads 801 */ 802 num_threads = ncpus_online; 803 if (num_threads > PMCS_MAX_CQ_THREADS) { 804 num_threads = PMCS_MAX_CQ_THREADS; 805 } 806 807 pwp->cq_info.cq_thr_info = kmem_zalloc(sizeof (pmcs_cq_thr_info_t) * 808 num_threads, KM_SLEEP); 809 pwp->cq_info.cq_threads = num_threads; 810 pwp->cq_info.cq_next_disp_thr = 0; 811 pwp->cq_info.cq_stop = B_FALSE; 812 813 /* 814 * Set the quantum value in clock ticks for the I/O interrupt 815 * coalescing timer. 816 */ 817 pwp->io_intr_coal.quantum = drv_usectohz(PMCS_QUANTUM_TIME_USECS); 818 819 /* 820 * We have a delicate dance here. We need to set up 821 * interrupts so we know how to set up some OQC 822 * tables. However, while we're setting up table 823 * access, we may need to flash new firmware and 824 * reset the card, which will take some finessing. 825 */ 826 827 /* 828 * Set up interrupts here. 829 */ 830 switch (pmcs_setup_intr(pwp)) { 831 case 0: 832 break; 833 case EIO: 834 pwp->stuck = 1; 835 /* FALLTHROUGH */ 836 default: 837 goto failure; 838 } 839 840 /* 841 * Set these up now becuase they are used to initialize the OQC tables. 842 * 843 * If we have MSI or MSI-X interrupts set up and we have enough 844 * vectors for each OQ, the Outbound Queue vectors can all be the 845 * same as the appropriate interrupt routine will have been called 846 * and the doorbell register automatically cleared. 847 * This keeps us from having to check the Outbound Doorbell register 848 * when the routines for these interrupts are called. 849 * 850 * If we have Legacy INT-X interrupts set up or we didn't have enough 851 * MSI/MSI-X vectors to uniquely identify each OQ, we point these 852 * vectors to the bits we would like to have set in the Outbound 853 * Doorbell register because pmcs_all_intr will read the doorbell 854 * register to find out why we have an interrupt and write the 855 * corresponding 'clear' bit for that interrupt. 856 */ 857 858 switch (pwp->intr_cnt) { 859 case 1: 860 /* 861 * Only one vector, so we must check all OQs for MSI. For 862 * INT-X, there's only one vector anyway, so we can just 863 * use the outbound queue bits to keep from having to 864 * check each queue for each interrupt. 865 */ 866 if (pwp->int_type == PMCS_INT_FIXED) { 867 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 868 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 869 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 870 } else { 871 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 872 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_IODONE; 873 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_IODONE; 874 } 875 break; 876 case 2: 877 /* With 2, we can at least isolate IODONE */ 878 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 879 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 880 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_GENERAL; 881 break; 882 case 4: 883 /* With 4 vectors, everybody gets one */ 884 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 885 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 886 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 887 break; 888 } 889 890 /* 891 * Do the first part of setup 892 */ 893 if (pmcs_setup(pwp)) { 894 goto failure; 895 } 896 pmcs_report_fwversion(pwp); 897 898 /* 899 * Now do some additonal allocations based upon information 900 * gathered during MPI setup. 901 */ 902 pwp->root_phys = kmem_zalloc(pwp->nphy * sizeof (pmcs_phy_t), KM_SLEEP); 903 ASSERT(pwp->nphy < SAS2_PHYNUM_MAX); 904 phyp = pwp->root_phys; 905 for (i = 0; i < pwp->nphy; i++) { 906 if (i < pwp->nphy-1) { 907 phyp->sibling = (phyp + 1); 908 } 909 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER, 910 DDI_INTR_PRI(pwp->intr_pri)); 911 phyp->phynum = i & SAS2_PHYNUM_MASK; 912 pmcs_phy_name(pwp, phyp, phyp->path, sizeof (phyp->path)); 913 phyp->pwp = pwp; 914 phyp->device_id = PMCS_INVALID_DEVICE_ID; 915 phyp->portid = PMCS_PHY_INVALID_PORT_ID; 916 phyp++; 917 } 918 919 pwp->work = kmem_zalloc(pwp->max_cmd * sizeof (pmcwork_t), KM_SLEEP); 920 for (i = 0; i < pwp->max_cmd - 1; i++) { 921 pmcwork_t *pwrk = &pwp->work[i]; 922 mutex_init(&pwrk->lock, NULL, MUTEX_DRIVER, 923 DDI_INTR_PRI(pwp->intr_pri)); 924 cv_init(&pwrk->sleep_cv, NULL, CV_DRIVER, NULL); 925 STAILQ_INSERT_TAIL(&pwp->wf, pwrk, next); 926 927 } 928 pwp->targets = (pmcs_xscsi_t **) 929 kmem_zalloc(pwp->max_dev * sizeof (pmcs_xscsi_t *), KM_SLEEP); 930 931 pwp->iqpt = (pmcs_iqp_trace_t *) 932 kmem_zalloc(sizeof (pmcs_iqp_trace_t), KM_SLEEP); 933 pwp->iqpt->head = kmem_zalloc(PMCS_IQP_TRACE_BUFFER_SIZE, KM_SLEEP); 934 pwp->iqpt->curpos = pwp->iqpt->head; 935 pwp->iqpt->size_left = PMCS_IQP_TRACE_BUFFER_SIZE; 936 937 /* 938 * Start MPI communication. 939 */ 940 if (pmcs_start_mpi(pwp)) { 941 if (pmcs_soft_reset(pwp, B_FALSE)) { 942 goto failure; 943 } 944 pwp->last_reset_reason = PMCS_LAST_RST_ATTACH; 945 } 946 947 /* 948 * Do some initial acceptance tests. 949 * This tests interrupts and queues. 950 */ 951 if (pmcs_echo_test(pwp)) { 952 goto failure; 953 } 954 955 /* Read VPD - if it exists */ 956 if (pmcs_get_nvmd(pwp, PMCS_NVMD_VPD, PMCIN_NVMD_VPD, 0, NULL, 0)) { 957 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 958 "%s: Unable to read VPD: " 959 "attempting to fabricate", __func__); 960 /* 961 * When we release, this must goto failure and the call 962 * to pmcs_fabricate_wwid is removed. 963 */ 964 /* goto failure; */ 965 if (!pmcs_fabricate_wwid(pwp)) { 966 goto failure; 967 } 968 } 969 970 /* 971 * We're now officially running 972 */ 973 pwp->state = STATE_RUNNING; 974 975 /* 976 * Check firmware versions and load new firmware 977 * if needed and reset. 978 */ 979 if (pmcs_firmware_update(pwp)) { 980 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 981 "%s: Firmware update failed", __func__); 982 goto failure; 983 } 984 985 /* 986 * Create completion threads. 987 */ 988 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 989 pwp->cq_info.cq_thr_info[i].cq_pwp = pwp; 990 pwp->cq_info.cq_thr_info[i].cq_thread = 991 thread_create(NULL, 0, pmcs_scsa_cq_run, 992 &pwp->cq_info.cq_thr_info[i], 0, &p0, TS_RUN, minclsyspri); 993 } 994 995 /* 996 * Create one thread to deal with the updating of the interrupt 997 * coalescing timer. 998 */ 999 pwp->ict_thread = thread_create(NULL, 0, pmcs_check_intr_coal, 1000 pwp, 0, &p0, TS_RUN, minclsyspri); 1001 1002 /* 1003 * Kick off the watchdog 1004 */ 1005 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 1006 drv_usectohz(PMCS_WATCH_INTERVAL)); 1007 /* 1008 * Do the SCSI attachment code (before starting phys) 1009 */ 1010 if (pmcs_scsa_init(pwp, &pmcs_dattr)) { 1011 goto failure; 1012 } 1013 pwp->hba_attached = 1; 1014 1015 /* Check all acc & dma handles allocated in attach */ 1016 if (pmcs_check_acc_dma_handle(pwp)) { 1017 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 1018 goto failure; 1019 } 1020 1021 /* 1022 * Create the phymap for this HBA instance 1023 */ 1024 if (sas_phymap_create(dip, phymap_usec, PHYMAP_MODE_SIMPLE, NULL, 1025 pwp, pmcs_phymap_activate, pmcs_phymap_deactivate, 1026 &pwp->hss_phymap) != DDI_SUCCESS) { 1027 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1028 "%s: pmcs%d phymap_create failed", __func__, inst); 1029 goto failure; 1030 } 1031 ASSERT(pwp->hss_phymap); 1032 1033 /* 1034 * Create the iportmap for this HBA instance 1035 */ 1036 if (scsi_hba_iportmap_create(dip, iportmap_usec, 1037 &pwp->hss_iportmap) != DDI_SUCCESS) { 1038 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1039 "%s: pmcs%d iportmap_create failed", __func__, inst); 1040 goto failure; 1041 } 1042 ASSERT(pwp->hss_iportmap); 1043 1044 /* 1045 * Start the PHYs. 1046 */ 1047 if (pmcs_start_phys(pwp)) { 1048 goto failure; 1049 } 1050 1051 /* 1052 * From this point on, we can't fail. 1053 */ 1054 ddi_report_dev(dip); 1055 1056 /* SM-HBA */ 1057 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SMHBA_SUPPORTED, 1058 &sm_hba); 1059 1060 /* SM-HBA */ 1061 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_DRV_VERSION, 1062 pmcs_driver_rev); 1063 1064 /* SM-HBA */ 1065 chiprev = 'A' + pwp->chiprev; 1066 (void) snprintf(hw_rev, 2, "%s", &chiprev); 1067 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_HWARE_VERSION, 1068 hw_rev); 1069 1070 /* SM-HBA */ 1071 switch (PMCS_FW_TYPE(pwp)) { 1072 case PMCS_FW_TYPE_RELEASED: 1073 fwsupport = "Released"; 1074 break; 1075 case PMCS_FW_TYPE_DEVELOPMENT: 1076 fwsupport = "Development"; 1077 break; 1078 case PMCS_FW_TYPE_ALPHA: 1079 fwsupport = "Alpha"; 1080 break; 1081 case PMCS_FW_TYPE_BETA: 1082 fwsupport = "Beta"; 1083 break; 1084 default: 1085 fwsupport = "Special"; 1086 break; 1087 } 1088 (void) snprintf(fw_rev, sizeof (fw_rev), "%x.%x.%x %s", 1089 PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp), 1090 fwsupport); 1091 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_FWARE_VERSION, 1092 fw_rev); 1093 1094 /* SM-HBA */ 1095 num_phys = pwp->nphy; 1096 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_NUM_PHYS_HBA, 1097 &num_phys); 1098 1099 /* SM-HBA */ 1100 protocol = SAS_SSP_SUPPORT | SAS_SATA_SUPPORT | SAS_SMP_SUPPORT; 1101 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SUPPORTED_PROTOCOL, 1102 &protocol); 1103 1104 /* Receptacle properties (FMA) */ 1105 pwp->recept_labels[0] = PMCS_RECEPT_LABEL_0; 1106 pwp->recept_pm[0] = PMCS_RECEPT_PM_0; 1107 pwp->recept_labels[1] = PMCS_RECEPT_LABEL_1; 1108 pwp->recept_pm[1] = PMCS_RECEPT_PM_1; 1109 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1110 SCSI_HBA_PROP_RECEPTACLE_LABEL, &pwp->recept_labels[0], 1111 PMCS_NUM_RECEPTACLES) != DDI_PROP_SUCCESS) { 1112 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1113 "%s: failed to create %s property", __func__, 1114 "receptacle-label"); 1115 } 1116 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip, 1117 SCSI_HBA_PROP_RECEPTACLE_PM, &pwp->recept_pm[0], 1118 PMCS_NUM_RECEPTACLES) != DDI_PROP_SUCCESS) { 1119 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1120 "%s: failed to create %s property", __func__, 1121 "receptacle-pm"); 1122 } 1123 1124 return (DDI_SUCCESS); 1125 1126 failure: 1127 if (pmcs_unattach(pwp)) { 1128 pwp->stuck = 1; 1129 } 1130 return (DDI_FAILURE); 1131 } 1132 1133 int 1134 pmcs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1135 { 1136 int inst = ddi_get_instance(dip); 1137 pmcs_iport_t *iport = NULL; 1138 pmcs_hw_t *pwp = NULL; 1139 scsi_hba_tran_t *tran; 1140 1141 if (scsi_hba_iport_unit_address(dip) != NULL) { 1142 /* iport node */ 1143 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 1144 ASSERT(iport); 1145 if (iport == NULL) { 1146 return (DDI_FAILURE); 1147 } 1148 pwp = iport->pwp; 1149 } else { 1150 /* hba node */ 1151 pwp = (pmcs_hw_t *)ddi_get_soft_state(pmcs_softc_state, inst); 1152 ASSERT(pwp); 1153 if (pwp == NULL) { 1154 return (DDI_FAILURE); 1155 } 1156 } 1157 1158 switch (cmd) { 1159 case DDI_DETACH: 1160 if (iport) { 1161 /* iport detach */ 1162 if (pmcs_iport_unattach(iport)) { 1163 return (DDI_FAILURE); 1164 } 1165 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1166 "iport%d detached", inst); 1167 return (DDI_SUCCESS); 1168 } else { 1169 /* HBA detach */ 1170 if (pmcs_unattach(pwp)) { 1171 return (DDI_FAILURE); 1172 } 1173 return (DDI_SUCCESS); 1174 } 1175 1176 case DDI_SUSPEND: 1177 case DDI_PM_SUSPEND: 1178 /* No DDI_SUSPEND on iport nodes */ 1179 if (iport) { 1180 return (DDI_SUCCESS); 1181 } 1182 1183 if (pwp->stuck) { 1184 return (DDI_FAILURE); 1185 } 1186 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 1187 if (!tran) { 1188 return (DDI_FAILURE); 1189 } 1190 1191 pwp = TRAN2PMC(tran); 1192 if (pwp == NULL) { 1193 return (DDI_FAILURE); 1194 } 1195 mutex_enter(&pwp->lock); 1196 if (pwp->tq) { 1197 ddi_taskq_suspend(pwp->tq); 1198 } 1199 pwp->suspended = 1; 1200 mutex_exit(&pwp->lock); 1201 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "PMC8X6G suspending"); 1202 return (DDI_SUCCESS); 1203 1204 default: 1205 return (DDI_FAILURE); 1206 } 1207 } 1208 1209 static int 1210 pmcs_iport_unattach(pmcs_iport_t *iport) 1211 { 1212 pmcs_hw_t *pwp = iport->pwp; 1213 1214 /* 1215 * First, check if there are still any configured targets on this 1216 * iport. If so, we fail detach. 1217 */ 1218 if (pmcs_iport_has_targets(pwp, iport)) { 1219 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1220 "iport%d detach failure: iport has targets (luns)", 1221 ddi_get_instance(iport->dip)); 1222 return (DDI_FAILURE); 1223 } 1224 1225 /* 1226 * Remove this iport from our list if it is inactive in the phymap. 1227 */ 1228 rw_enter(&pwp->iports_lock, RW_WRITER); 1229 mutex_enter(&iport->lock); 1230 1231 if (iport->ua_state == UA_ACTIVE) { 1232 mutex_exit(&iport->lock); 1233 rw_exit(&pwp->iports_lock); 1234 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1235 "iport%d detach failure: " 1236 "iport unit address active in phymap", 1237 ddi_get_instance(iport->dip)); 1238 return (DDI_FAILURE); 1239 } 1240 1241 /* If it's our only iport, clear iports_attached */ 1242 ASSERT(pwp->num_iports >= 1); 1243 if (--pwp->num_iports == 0) { 1244 pwp->iports_attached = 0; 1245 } 1246 1247 ASSERT(list_link_active(&iport->list_node)); 1248 list_remove(&pwp->iports, iport); 1249 rw_exit(&pwp->iports_lock); 1250 1251 /* 1252 * We have removed the iport handle from the HBA's iports list, 1253 * there will be no new references to it. Two things must be 1254 * guarded against here. First, we could have PHY up events, 1255 * adding themselves to the iport->phys list and grabbing ref's 1256 * on our iport handle. Second, we could have existing references 1257 * to this iport handle from a point in time prior to the list 1258 * removal above. 1259 * 1260 * So first, destroy the phys list. Remove any phys that have snuck 1261 * in after the phymap deactivate, dropping the refcnt accordingly. 1262 * If these PHYs are still up if and when the phymap reactivates 1263 * (i.e. when this iport reattaches), we'll populate the list with 1264 * them and bump the refcnt back up. 1265 */ 1266 pmcs_remove_phy_from_iport(iport, NULL); 1267 ASSERT(list_is_empty(&iport->phys)); 1268 list_destroy(&iport->phys); 1269 mutex_exit(&iport->lock); 1270 1271 /* 1272 * Second, wait for any other references to this iport to be 1273 * dropped, then continue teardown. 1274 */ 1275 mutex_enter(&iport->refcnt_lock); 1276 while (iport->refcnt != 0) { 1277 cv_wait(&iport->refcnt_cv, &iport->refcnt_lock); 1278 } 1279 mutex_exit(&iport->refcnt_lock); 1280 1281 /* Delete kstats */ 1282 pmcs_destroy_phy_stats(iport); 1283 1284 /* Destroy the iport target map */ 1285 if (pmcs_iport_tgtmap_destroy(iport) == B_FALSE) { 1286 return (DDI_FAILURE); 1287 } 1288 1289 /* Free the tgt soft state */ 1290 if (iport->tgt_sstate != NULL) { 1291 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 1292 } 1293 1294 /* Free our unit address string */ 1295 strfree(iport->ua); 1296 1297 /* Finish teardown and free the softstate */ 1298 mutex_destroy(&iport->refcnt_lock); 1299 mutex_destroy(&iport->smp_lock); 1300 ASSERT(iport->refcnt == 0); 1301 cv_destroy(&iport->refcnt_cv); 1302 cv_destroy(&iport->smp_cv); 1303 mutex_destroy(&iport->lock); 1304 ddi_soft_state_free(pmcs_iport_softstate, ddi_get_instance(iport->dip)); 1305 1306 return (DDI_SUCCESS); 1307 } 1308 1309 static int 1310 pmcs_unattach(pmcs_hw_t *pwp) 1311 { 1312 int i; 1313 enum pwpstate curstate; 1314 pmcs_cq_thr_info_t *cqti; 1315 1316 /* 1317 * Tear down the interrupt infrastructure. 1318 */ 1319 if (pmcs_teardown_intr(pwp)) { 1320 pwp->stuck = 1; 1321 } 1322 pwp->intr_cnt = 0; 1323 1324 /* 1325 * Grab a lock, if initted, to set state. 1326 */ 1327 if (pwp->locks_initted) { 1328 mutex_enter(&pwp->lock); 1329 if (pwp->state != STATE_DEAD) { 1330 pwp->state = STATE_UNPROBING; 1331 } 1332 curstate = pwp->state; 1333 mutex_exit(&pwp->lock); 1334 1335 /* 1336 * Stop the I/O completion threads. 1337 */ 1338 mutex_enter(&pwp->cq_lock); 1339 pwp->cq_info.cq_stop = B_TRUE; 1340 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 1341 if (pwp->cq_info.cq_thr_info[i].cq_thread) { 1342 cqti = &pwp->cq_info.cq_thr_info[i]; 1343 mutex_enter(&cqti->cq_thr_lock); 1344 cv_signal(&cqti->cq_cv); 1345 mutex_exit(&cqti->cq_thr_lock); 1346 mutex_exit(&pwp->cq_lock); 1347 thread_join(cqti->cq_thread->t_did); 1348 mutex_enter(&pwp->cq_lock); 1349 } 1350 } 1351 mutex_exit(&pwp->cq_lock); 1352 1353 /* 1354 * Stop the interrupt coalescing timer thread 1355 */ 1356 if (pwp->ict_thread) { 1357 mutex_enter(&pwp->ict_lock); 1358 pwp->io_intr_coal.stop_thread = B_TRUE; 1359 cv_signal(&pwp->ict_cv); 1360 mutex_exit(&pwp->ict_lock); 1361 thread_join(pwp->ict_thread->t_did); 1362 } 1363 } else { 1364 if (pwp->state != STATE_DEAD) { 1365 pwp->state = STATE_UNPROBING; 1366 } 1367 curstate = pwp->state; 1368 } 1369 1370 /* 1371 * Make sure that any pending watchdog won't 1372 * be called from this point on out. 1373 */ 1374 (void) untimeout(pwp->wdhandle); 1375 /* 1376 * After the above action, the watchdog 1377 * timer that starts up the worker task 1378 * may trigger but will exit immediately 1379 * on triggering. 1380 * 1381 * Now that this is done, we can destroy 1382 * the task queue, which will wait if we're 1383 * running something on it. 1384 */ 1385 if (pwp->tq) { 1386 ddi_taskq_destroy(pwp->tq); 1387 pwp->tq = NULL; 1388 } 1389 1390 pmcs_fm_fini(pwp); 1391 1392 if (pwp->hba_attached) { 1393 (void) scsi_hba_detach(pwp->dip); 1394 pwp->hba_attached = 0; 1395 } 1396 1397 /* 1398 * If the chip hasn't been marked dead, shut it down now 1399 * to bring it back to a known state without attempting 1400 * a soft reset. 1401 */ 1402 if (curstate != STATE_DEAD && pwp->locks_initted) { 1403 /* 1404 * De-register all registered devices 1405 */ 1406 pmcs_deregister_devices(pwp, pwp->root_phys); 1407 1408 /* 1409 * Stop all the phys. 1410 */ 1411 pmcs_stop_phys(pwp); 1412 1413 /* 1414 * Shut Down Message Passing 1415 */ 1416 (void) pmcs_stop_mpi(pwp); 1417 1418 /* 1419 * Reset chip 1420 */ 1421 (void) pmcs_soft_reset(pwp, B_FALSE); 1422 pwp->last_reset_reason = PMCS_LAST_RST_DETACH; 1423 } 1424 1425 /* 1426 * Turn off interrupts on the chip 1427 */ 1428 if (pwp->mpi_acc_handle) { 1429 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1430 } 1431 1432 if (pwp->hss_iportmap != NULL) { 1433 /* Destroy the iportmap */ 1434 scsi_hba_iportmap_destroy(pwp->hss_iportmap); 1435 } 1436 1437 if (pwp->hss_phymap != NULL) { 1438 /* Destroy the phymap */ 1439 sas_phymap_destroy(pwp->hss_phymap); 1440 } 1441 1442 /* Destroy the iports lock and list */ 1443 rw_destroy(&pwp->iports_lock); 1444 ASSERT(list_is_empty(&pwp->iports)); 1445 list_destroy(&pwp->iports); 1446 1447 /* Destroy pwp's lock */ 1448 if (pwp->locks_initted) { 1449 mutex_destroy(&pwp->lock); 1450 mutex_destroy(&pwp->dma_lock); 1451 mutex_destroy(&pwp->axil_lock); 1452 mutex_destroy(&pwp->cq_lock); 1453 mutex_destroy(&pwp->config_lock); 1454 mutex_destroy(&pwp->ict_lock); 1455 mutex_destroy(&pwp->wfree_lock); 1456 mutex_destroy(&pwp->pfree_lock); 1457 mutex_destroy(&pwp->dead_phylist_lock); 1458 #ifdef DEBUG 1459 mutex_destroy(&pwp->dbglock); 1460 #endif 1461 cv_destroy(&pwp->config_cv); 1462 cv_destroy(&pwp->ict_cv); 1463 cv_destroy(&pwp->drain_cv); 1464 pwp->locks_initted = 0; 1465 } 1466 1467 /* 1468 * Free DMA handles and associated consistent memory 1469 */ 1470 if (pwp->regdump_hndl) { 1471 if (ddi_dma_unbind_handle(pwp->regdump_hndl) != DDI_SUCCESS) { 1472 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1473 "Condition check failed " 1474 "at %s():%d", __func__, __LINE__); 1475 } 1476 ddi_dma_free_handle(&pwp->regdump_hndl); 1477 ddi_dma_mem_free(&pwp->regdump_acchdl); 1478 pwp->regdump_hndl = 0; 1479 } 1480 if (pwp->fwlog_hndl) { 1481 if (ddi_dma_unbind_handle(pwp->fwlog_hndl) != DDI_SUCCESS) { 1482 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1483 "Condition check failed " 1484 "at %s():%d", __func__, __LINE__); 1485 } 1486 ddi_dma_free_handle(&pwp->fwlog_hndl); 1487 ddi_dma_mem_free(&pwp->fwlog_acchdl); 1488 pwp->fwlog_hndl = 0; 1489 } 1490 if (pwp->cip_handles) { 1491 if (ddi_dma_unbind_handle(pwp->cip_handles) != DDI_SUCCESS) { 1492 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1493 "Condition check failed " 1494 "at %s():%d", __func__, __LINE__); 1495 } 1496 ddi_dma_free_handle(&pwp->cip_handles); 1497 ddi_dma_mem_free(&pwp->cip_acchdls); 1498 pwp->cip_handles = 0; 1499 } 1500 for (i = 0; i < PMCS_NOQ; i++) { 1501 if (pwp->oqp_handles[i]) { 1502 if (ddi_dma_unbind_handle(pwp->oqp_handles[i]) != 1503 DDI_SUCCESS) { 1504 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1505 "Condition check failed at %s():%d", 1506 __func__, __LINE__); 1507 } 1508 ddi_dma_free_handle(&pwp->oqp_handles[i]); 1509 ddi_dma_mem_free(&pwp->oqp_acchdls[i]); 1510 pwp->oqp_handles[i] = 0; 1511 } 1512 } 1513 for (i = 0; i < PMCS_NIQ; i++) { 1514 if (pwp->iqp_handles[i]) { 1515 if (ddi_dma_unbind_handle(pwp->iqp_handles[i]) != 1516 DDI_SUCCESS) { 1517 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1518 "Condition check failed at %s():%d", 1519 __func__, __LINE__); 1520 } 1521 ddi_dma_free_handle(&pwp->iqp_handles[i]); 1522 ddi_dma_mem_free(&pwp->iqp_acchdls[i]); 1523 pwp->iqp_handles[i] = 0; 1524 } 1525 } 1526 1527 pmcs_free_dma_chunklist(pwp); 1528 1529 /* 1530 * Unmap registers and destroy access handles 1531 */ 1532 if (pwp->mpi_acc_handle) { 1533 ddi_regs_map_free(&pwp->mpi_acc_handle); 1534 pwp->mpi_acc_handle = 0; 1535 } 1536 if (pwp->top_acc_handle) { 1537 ddi_regs_map_free(&pwp->top_acc_handle); 1538 pwp->top_acc_handle = 0; 1539 } 1540 if (pwp->gsm_acc_handle) { 1541 ddi_regs_map_free(&pwp->gsm_acc_handle); 1542 pwp->gsm_acc_handle = 0; 1543 } 1544 if (pwp->msg_acc_handle) { 1545 ddi_regs_map_free(&pwp->msg_acc_handle); 1546 pwp->msg_acc_handle = 0; 1547 } 1548 if (pwp->pci_acc_handle) { 1549 pci_config_teardown(&pwp->pci_acc_handle); 1550 pwp->pci_acc_handle = 0; 1551 } 1552 1553 /* 1554 * Do memory allocation cleanup. 1555 */ 1556 while (pwp->dma_freelist) { 1557 pmcs_dmachunk_t *this = pwp->dma_freelist; 1558 pwp->dma_freelist = this->nxt; 1559 kmem_free(this, sizeof (pmcs_dmachunk_t)); 1560 } 1561 1562 /* 1563 * Free pools 1564 */ 1565 if (pwp->iocomp_cb_cache) { 1566 kmem_cache_destroy(pwp->iocomp_cb_cache); 1567 } 1568 1569 /* 1570 * Free all PHYs (at level > 0), then free the cache 1571 */ 1572 pmcs_free_all_phys(pwp, pwp->root_phys); 1573 if (pwp->phy_cache) { 1574 kmem_cache_destroy(pwp->phy_cache); 1575 } 1576 1577 /* 1578 * Free root PHYs 1579 */ 1580 if (pwp->root_phys) { 1581 pmcs_phy_t *phyp = pwp->root_phys; 1582 for (i = 0; i < pwp->nphy; i++) { 1583 mutex_destroy(&phyp->phy_lock); 1584 phyp = phyp->sibling; 1585 } 1586 kmem_free(pwp->root_phys, pwp->nphy * sizeof (pmcs_phy_t)); 1587 pwp->root_phys = NULL; 1588 pwp->nphy = 0; 1589 } 1590 1591 /* Free the targets list */ 1592 if (pwp->targets) { 1593 kmem_free(pwp->targets, 1594 sizeof (pmcs_xscsi_t *) * pwp->max_dev); 1595 } 1596 1597 /* 1598 * Free work structures 1599 */ 1600 1601 if (pwp->work && pwp->max_cmd) { 1602 for (i = 0; i < pwp->max_cmd - 1; i++) { 1603 pmcwork_t *pwrk = &pwp->work[i]; 1604 mutex_destroy(&pwrk->lock); 1605 cv_destroy(&pwrk->sleep_cv); 1606 } 1607 kmem_free(pwp->work, sizeof (pmcwork_t) * pwp->max_cmd); 1608 pwp->work = NULL; 1609 pwp->max_cmd = 0; 1610 } 1611 1612 /* 1613 * Do last property and SCSA cleanup 1614 */ 1615 if (pwp->tran) { 1616 scsi_hba_tran_free(pwp->tran); 1617 pwp->tran = NULL; 1618 } 1619 if (pwp->reset_notify_listf) { 1620 scsi_hba_reset_notify_tear_down(pwp->reset_notify_listf); 1621 pwp->reset_notify_listf = NULL; 1622 } 1623 ddi_prop_remove_all(pwp->dip); 1624 if (pwp->stuck) { 1625 return (-1); 1626 } 1627 1628 /* Free register dump area if allocated */ 1629 if (pwp->regdumpp) { 1630 kmem_free(pwp->regdumpp, PMCS_REG_DUMP_SIZE); 1631 pwp->regdumpp = NULL; 1632 } 1633 if (pwp->iqpt && pwp->iqpt->head) { 1634 kmem_free(pwp->iqpt->head, PMCS_IQP_TRACE_BUFFER_SIZE); 1635 pwp->iqpt->head = pwp->iqpt->curpos = NULL; 1636 } 1637 if (pwp->iqpt) { 1638 kmem_free(pwp->iqpt, sizeof (pmcs_iqp_trace_t)); 1639 pwp->iqpt = NULL; 1640 } 1641 1642 ddi_soft_state_free(pmcs_softc_state, ddi_get_instance(pwp->dip)); 1643 return (0); 1644 } 1645 1646 /* 1647 * quiesce (9E) entry point 1648 * 1649 * This function is called when the system is single-threaded at high PIL 1650 * with preemption disabled. Therefore, the function must not block/wait/sleep. 1651 * 1652 * Returns DDI_SUCCESS or DDI_FAILURE. 1653 * 1654 */ 1655 static int 1656 pmcs_quiesce(dev_info_t *dip) 1657 { 1658 pmcs_hw_t *pwp; 1659 scsi_hba_tran_t *tran; 1660 1661 if ((tran = ddi_get_driver_private(dip)) == NULL) 1662 return (DDI_SUCCESS); 1663 1664 /* No quiesce necessary on a per-iport basis */ 1665 if (scsi_hba_iport_unit_address(dip) != NULL) { 1666 return (DDI_SUCCESS); 1667 } 1668 1669 if ((pwp = TRAN2PMC(tran)) == NULL) 1670 return (DDI_SUCCESS); 1671 1672 /* Stop MPI & Reset chip (no need to re-initialize) */ 1673 (void) pmcs_stop_mpi(pwp); 1674 (void) pmcs_soft_reset(pwp, B_TRUE); 1675 pwp->last_reset_reason = PMCS_LAST_RST_QUIESCE; 1676 1677 return (DDI_SUCCESS); 1678 } 1679 1680 /* 1681 * Called with xp->statlock and PHY lock and scratch acquired. 1682 */ 1683 static int 1684 pmcs_add_sata_device(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1685 { 1686 ata_identify_t *ati; 1687 int result, i; 1688 pmcs_phy_t *pptr; 1689 uint16_t *a; 1690 union { 1691 uint8_t nsa[8]; 1692 uint16_t nsb[4]; 1693 } u; 1694 1695 /* 1696 * Safe defaults - use only if this target is brand new (i.e. doesn't 1697 * already have these settings configured) 1698 */ 1699 if (xp->capacity == 0) { 1700 xp->capacity = (uint64_t)-1; 1701 xp->ca = 1; 1702 xp->qdepth = 1; 1703 xp->pio = 1; 1704 } 1705 1706 pptr = xp->phy; 1707 1708 /* 1709 * We only try and issue an IDENTIFY for first level 1710 * (direct attached) devices. We don't try and 1711 * set other quirks here (this will happen later, 1712 * if the device is fully configured) 1713 */ 1714 if (pptr->level) { 1715 return (0); 1716 } 1717 1718 mutex_exit(&xp->statlock); 1719 result = pmcs_sata_identify(pwp, pptr); 1720 mutex_enter(&xp->statlock); 1721 1722 if (result) { 1723 return (result); 1724 } 1725 ati = pwp->scratch; 1726 a = &ati->word108; 1727 for (i = 0; i < 4; i++) { 1728 u.nsb[i] = ddi_swap16(*a++); 1729 } 1730 1731 /* 1732 * Check the returned data for being a valid (NAA=5) WWN. 1733 * If so, use that and override the SAS address we were 1734 * given at Link Up time. 1735 */ 1736 if ((u.nsa[0] >> 4) == 5) { 1737 (void) memcpy(pptr->sas_address, u.nsa, 8); 1738 } 1739 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 1740 "%s: %s has SAS ADDRESS " SAS_ADDR_FMT, 1741 __func__, pptr->path, SAS_ADDR_PRT(pptr->sas_address)); 1742 return (0); 1743 } 1744 1745 /* 1746 * Called with PHY lock and target statlock held and scratch acquired 1747 */ 1748 static boolean_t 1749 pmcs_add_new_device(pmcs_hw_t *pwp, pmcs_xscsi_t *target) 1750 { 1751 ASSERT(target != NULL); 1752 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target, "%s: target = 0x%p", 1753 __func__, (void *) target); 1754 1755 switch (target->phy->dtype) { 1756 case SATA: 1757 if (pmcs_add_sata_device(pwp, target) != 0) { 1758 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, target->phy, 1759 target, "%s: add_sata_device failed for tgt 0x%p", 1760 __func__, (void *) target); 1761 return (B_FALSE); 1762 } 1763 break; 1764 case SAS: 1765 target->qdepth = maxqdepth; 1766 break; 1767 case EXPANDER: 1768 target->qdepth = 1; 1769 break; 1770 } 1771 1772 target->new = 0; 1773 target->assigned = 1; 1774 target->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1775 target->dtype = target->phy->dtype; 1776 1777 /* 1778 * Set the PHY's config stop time to 0. This is one of the final 1779 * stops along the config path, so we're indicating that we 1780 * successfully configured the PHY. 1781 */ 1782 target->phy->config_stop = 0; 1783 1784 return (B_TRUE); 1785 } 1786 1787 void 1788 pmcs_worker(void *arg) 1789 { 1790 pmcs_hw_t *pwp = arg; 1791 ulong_t work_flags; 1792 1793 DTRACE_PROBE2(pmcs__worker, ulong_t, pwp->work_flags, boolean_t, 1794 pwp->config_changed); 1795 1796 if (pwp->state != STATE_RUNNING) { 1797 return; 1798 } 1799 1800 work_flags = atomic_swap_ulong(&pwp->work_flags, 0); 1801 1802 if (work_flags & PMCS_WORK_FLAG_DUMP_REGS) { 1803 mutex_enter(&pwp->lock); 1804 pmcs_register_dump_int(pwp); 1805 mutex_exit(&pwp->lock); 1806 } 1807 1808 if (work_flags & PMCS_WORK_FLAG_SAS_HW_ACK) { 1809 pmcs_ack_events(pwp); 1810 } 1811 1812 if (work_flags & PMCS_WORK_FLAG_SPINUP_RELEASE) { 1813 mutex_enter(&pwp->lock); 1814 pmcs_spinup_release(pwp, NULL); 1815 mutex_exit(&pwp->lock); 1816 } 1817 1818 if (work_flags & PMCS_WORK_FLAG_SSP_EVT_RECOVERY) { 1819 pmcs_ssp_event_recovery(pwp); 1820 } 1821 1822 if (work_flags & PMCS_WORK_FLAG_DS_ERR_RECOVERY) { 1823 pmcs_dev_state_recovery(pwp, NULL); 1824 } 1825 1826 if (work_flags & PMCS_WORK_FLAG_DEREGISTER_DEV) { 1827 pmcs_deregister_device_work(pwp, NULL); 1828 } 1829 1830 if (work_flags & PMCS_WORK_FLAG_DISCOVER) { 1831 pmcs_discover(pwp); 1832 } 1833 1834 if (work_flags & PMCS_WORK_FLAG_ABORT_HANDLE) { 1835 if (pmcs_abort_handler(pwp)) { 1836 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 1837 } 1838 } 1839 1840 if (work_flags & PMCS_WORK_FLAG_SATA_RUN) { 1841 pmcs_sata_work(pwp); 1842 } 1843 1844 if (work_flags & PMCS_WORK_FLAG_RUN_QUEUES) { 1845 pmcs_scsa_wq_run(pwp); 1846 mutex_enter(&pwp->lock); 1847 PMCS_CQ_RUN(pwp); 1848 mutex_exit(&pwp->lock); 1849 } 1850 1851 if (work_flags & PMCS_WORK_FLAG_ADD_DMA_CHUNKS) { 1852 if (pmcs_add_more_chunks(pwp, 1853 ptob(1) * PMCS_ADDTL_CHUNK_PAGES)) { 1854 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 1855 } else { 1856 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1857 } 1858 } 1859 } 1860 1861 static int 1862 pmcs_add_more_chunks(pmcs_hw_t *pwp, unsigned long nsize) 1863 { 1864 pmcs_dmachunk_t *dc; 1865 unsigned long dl; 1866 pmcs_chunk_t *pchunk = NULL; 1867 1868 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 1869 1870 pchunk = kmem_zalloc(sizeof (pmcs_chunk_t), KM_SLEEP); 1871 if (pchunk == NULL) { 1872 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1873 "Not enough memory for DMA chunks"); 1874 return (-1); 1875 } 1876 1877 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pchunk->acc_handle, 1878 &pchunk->dma_handle, nsize, (caddr_t *)&pchunk->addrp, 1879 &pchunk->dma_addr) == B_FALSE) { 1880 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1881 "Failed to setup DMA for chunks"); 1882 kmem_free(pchunk, sizeof (pmcs_chunk_t)); 1883 return (-1); 1884 } 1885 1886 if ((pmcs_check_acc_handle(pchunk->acc_handle) != DDI_SUCCESS) || 1887 (pmcs_check_dma_handle(pchunk->dma_handle) != DDI_SUCCESS)) { 1888 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1889 return (-1); 1890 } 1891 1892 bzero(pchunk->addrp, nsize); 1893 dc = NULL; 1894 for (dl = 0; dl < (nsize / PMCS_SGL_CHUNKSZ); dl++) { 1895 pmcs_dmachunk_t *tmp; 1896 tmp = kmem_alloc(sizeof (pmcs_dmachunk_t), KM_SLEEP); 1897 tmp->nxt = dc; 1898 dc = tmp; 1899 } 1900 mutex_enter(&pwp->dma_lock); 1901 pmcs_idma_chunks(pwp, dc, pchunk, nsize); 1902 pwp->nchunks++; 1903 mutex_exit(&pwp->dma_lock); 1904 return (0); 1905 } 1906 1907 static void 1908 pmcs_check_forward_progress(pmcs_hw_t *pwp) 1909 { 1910 pmcwork_t *wrkp; 1911 uint32_t *iqp; 1912 uint32_t cur_iqci; 1913 uint32_t cur_work_idx; 1914 uint32_t cur_msgu_tick; 1915 uint32_t cur_iop_tick; 1916 int i; 1917 1918 mutex_enter(&pwp->lock); 1919 1920 if (pwp->state == STATE_IN_RESET) { 1921 mutex_exit(&pwp->lock); 1922 return; 1923 } 1924 1925 /* 1926 * Ensure that inbound work is getting picked up. First, check to 1927 * see if new work has been posted. If it has, ensure that the 1928 * work is moving forward by checking the consumer index and the 1929 * last_htag for the work being processed against what we saw last 1930 * time. Note: we use the work structure's 'last_htag' because at 1931 * any given moment it could be freed back, thus clearing 'htag' 1932 * and setting 'last_htag' (see pmcs_pwork). 1933 */ 1934 for (i = 0; i < PMCS_NIQ; i++) { 1935 cur_iqci = pmcs_rd_iqci(pwp, i); 1936 iqp = &pwp->iqp[i][cur_iqci * (PMCS_QENTRY_SIZE >> 2)]; 1937 cur_work_idx = PMCS_TAG_INDEX(LE_32(*(iqp+1))); 1938 wrkp = &pwp->work[cur_work_idx]; 1939 if (cur_iqci == pwp->shadow_iqpi[i]) { 1940 pwp->last_iqci[i] = cur_iqci; 1941 pwp->last_htag[i] = wrkp->last_htag; 1942 continue; 1943 } 1944 if ((cur_iqci == pwp->last_iqci[i]) && 1945 (wrkp->last_htag == pwp->last_htag[i])) { 1946 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 1947 "Inbound Queue stall detected, issuing reset"); 1948 goto hot_reset; 1949 } 1950 pwp->last_iqci[i] = cur_iqci; 1951 pwp->last_htag[i] = wrkp->last_htag; 1952 } 1953 1954 /* 1955 * Check heartbeat on both the MSGU and IOP. It is unlikely that 1956 * we'd ever fail here, as the inbound queue monitoring code above 1957 * would detect a stall due to either of these elements being 1958 * stalled, but we might as well keep an eye on them. 1959 */ 1960 cur_msgu_tick = pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK); 1961 if (cur_msgu_tick == pwp->last_msgu_tick) { 1962 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 1963 "Stall detected on MSGU, issuing reset"); 1964 goto hot_reset; 1965 } 1966 pwp->last_msgu_tick = cur_msgu_tick; 1967 1968 cur_iop_tick = pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK); 1969 if (cur_iop_tick == pwp->last_iop_tick) { 1970 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 1971 "Stall detected on IOP, issuing reset"); 1972 goto hot_reset; 1973 } 1974 pwp->last_iop_tick = cur_iop_tick; 1975 1976 mutex_exit(&pwp->lock); 1977 return; 1978 1979 hot_reset: 1980 pwp->state = STATE_DEAD; 1981 /* 1982 * We've detected a stall. Attempt to recover service via hot 1983 * reset. In case of failure, pmcs_hot_reset() will handle the 1984 * failure and issue any required FM notifications. 1985 * See pmcs_subr.c for more details. 1986 */ 1987 if (pmcs_hot_reset(pwp)) { 1988 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 1989 "%s: hot reset failure", __func__); 1990 } else { 1991 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 1992 "%s: hot reset complete", __func__); 1993 pwp->last_reset_reason = PMCS_LAST_RST_STALL; 1994 } 1995 mutex_exit(&pwp->lock); 1996 } 1997 1998 static void 1999 pmcs_check_commands(pmcs_hw_t *pwp) 2000 { 2001 pmcs_cmd_t *sp; 2002 size_t amt; 2003 char path[32]; 2004 pmcwork_t *pwrk; 2005 pmcs_xscsi_t *target; 2006 pmcs_phy_t *phyp; 2007 int rval; 2008 2009 for (pwrk = pwp->work; pwrk < &pwp->work[pwp->max_cmd]; pwrk++) { 2010 mutex_enter(&pwrk->lock); 2011 2012 /* 2013 * If the command isn't active, we can't be timing it still. 2014 * Active means the tag is not free and the state is "on chip". 2015 */ 2016 if (!PMCS_COMMAND_ACTIVE(pwrk)) { 2017 mutex_exit(&pwrk->lock); 2018 continue; 2019 } 2020 2021 /* 2022 * No timer active for this command. 2023 */ 2024 if (pwrk->timer == 0) { 2025 mutex_exit(&pwrk->lock); 2026 continue; 2027 } 2028 2029 /* 2030 * Knock off bits for the time interval. 2031 */ 2032 if (pwrk->timer >= US2WT(PMCS_WATCH_INTERVAL)) { 2033 pwrk->timer -= US2WT(PMCS_WATCH_INTERVAL); 2034 } else { 2035 pwrk->timer = 0; 2036 } 2037 if (pwrk->timer > 0) { 2038 mutex_exit(&pwrk->lock); 2039 continue; 2040 } 2041 2042 /* 2043 * The command has now officially timed out. 2044 * Get the path for it. If it doesn't have 2045 * a phy pointer any more, it's really dead 2046 * and can just be put back on the free list. 2047 * There should *not* be any commands associated 2048 * with it any more. 2049 */ 2050 if (pwrk->phy == NULL) { 2051 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2052 "dead command with gone phy being recycled"); 2053 ASSERT(pwrk->xp == NULL); 2054 pmcs_pwork(pwp, pwrk); 2055 continue; 2056 } 2057 amt = sizeof (path); 2058 amt = min(sizeof (pwrk->phy->path), amt); 2059 (void) memcpy(path, pwrk->phy->path, amt); 2060 2061 /* 2062 * If this is a non-SCSA command, stop here. Eventually 2063 * we might do something with non-SCSA commands here- 2064 * but so far their timeout mechanisms are handled in 2065 * the WAIT_FOR macro. 2066 */ 2067 if (pwrk->xp == NULL) { 2068 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2069 "%s: non-SCSA cmd tag 0x%x timed out", 2070 path, pwrk->htag); 2071 mutex_exit(&pwrk->lock); 2072 continue; 2073 } 2074 2075 sp = pwrk->arg; 2076 ASSERT(sp != NULL); 2077 2078 /* 2079 * Mark it as timed out. 2080 */ 2081 CMD2PKT(sp)->pkt_reason = CMD_TIMEOUT; 2082 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2083 #ifdef DEBUG 2084 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 2085 "%s: SCSA cmd tag 0x%x timed out (state %x) onwire=%d", 2086 path, pwrk->htag, pwrk->state, pwrk->onwire); 2087 #else 2088 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 2089 "%s: SCSA cmd tag 0x%x timed out (state %x)", 2090 path, pwrk->htag, pwrk->state); 2091 #endif 2092 /* 2093 * Mark the work structure as timed out. 2094 */ 2095 pwrk->state = PMCS_WORK_STATE_TIMED_OUT; 2096 phyp = pwrk->phy; 2097 target = pwrk->xp; 2098 mutex_exit(&pwrk->lock); 2099 2100 pmcs_lock_phy(phyp); 2101 mutex_enter(&target->statlock); 2102 2103 /* 2104 * No point attempting recovery if the device is gone 2105 */ 2106 if (target->dev_gone) { 2107 mutex_exit(&target->statlock); 2108 pmcs_unlock_phy(phyp); 2109 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2110 "%s: tgt(0x%p) is gone. Returning CMD_DEV_GONE " 2111 "for htag 0x%08x", __func__, 2112 (void *)target, pwrk->htag); 2113 mutex_enter(&pwrk->lock); 2114 if (!PMCS_COMMAND_DONE(pwrk)) { 2115 /* Complete this command here */ 2116 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2117 "%s: Completing cmd (htag 0x%08x) " 2118 "anyway", __func__, pwrk->htag); 2119 pwrk->dead = 1; 2120 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 2121 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 2122 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 2123 } else { 2124 mutex_exit(&pwrk->lock); 2125 } 2126 continue; 2127 } 2128 2129 mutex_exit(&target->statlock); 2130 rval = pmcs_abort(pwp, phyp, pwrk->htag, 0, 1); 2131 if (rval) { 2132 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2133 "%s: Bad status (%d) on abort of HTAG 0x%08x", 2134 __func__, rval, pwrk->htag); 2135 pmcs_unlock_phy(phyp); 2136 mutex_enter(&pwrk->lock); 2137 if (!PMCS_COMMAND_DONE(pwrk)) { 2138 /* Complete this command here */ 2139 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2140 "%s: Completing cmd (htag 0x%08x) " 2141 "anyway", __func__, pwrk->htag); 2142 if (target->dev_gone) { 2143 pwrk->dead = 1; 2144 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 2145 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 2146 } 2147 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 2148 } else { 2149 mutex_exit(&pwrk->lock); 2150 } 2151 pmcs_lock_phy(phyp); 2152 /* 2153 * No need to reschedule ABORT if we get any other 2154 * status 2155 */ 2156 if (rval == ENOMEM) { 2157 phyp->abort_sent = 0; 2158 phyp->abort_pending = 1; 2159 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 2160 } 2161 } 2162 pmcs_unlock_phy(phyp); 2163 } 2164 /* 2165 * Run any completions that may have been queued up. 2166 */ 2167 PMCS_CQ_RUN(pwp); 2168 } 2169 2170 static void 2171 pmcs_watchdog(void *arg) 2172 { 2173 pmcs_hw_t *pwp = arg; 2174 2175 DTRACE_PROBE2(pmcs__watchdog, ulong_t, pwp->work_flags, boolean_t, 2176 pwp->config_changed); 2177 2178 /* 2179 * Check forward progress on the chip 2180 */ 2181 if (++pwp->watchdog_count == PMCS_FWD_PROG_TRIGGER) { 2182 pwp->watchdog_count = 0; 2183 pmcs_check_forward_progress(pwp); 2184 } 2185 2186 /* 2187 * Check to see if we need to kick discovery off again 2188 */ 2189 mutex_enter(&pwp->config_lock); 2190 if (pwp->config_restart && 2191 (ddi_get_lbolt() >= pwp->config_restart_time)) { 2192 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2193 "%s: Timer expired for re-enumeration: Start discovery", 2194 __func__); 2195 pwp->config_restart = B_FALSE; 2196 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2197 } 2198 mutex_exit(&pwp->config_lock); 2199 2200 mutex_enter(&pwp->lock); 2201 if (pwp->state != STATE_RUNNING) { 2202 mutex_exit(&pwp->lock); 2203 return; 2204 } 2205 2206 if (atomic_cas_ulong(&pwp->work_flags, 0, 0) != 0) { 2207 if (ddi_taskq_dispatch(pwp->tq, pmcs_worker, pwp, 2208 DDI_NOSLEEP) != DDI_SUCCESS) { 2209 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2210 "Could not dispatch to worker thread"); 2211 } 2212 } 2213 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 2214 drv_usectohz(PMCS_WATCH_INTERVAL)); 2215 2216 mutex_exit(&pwp->lock); 2217 2218 pmcs_check_commands(pwp); 2219 pmcs_handle_dead_phys(pwp); 2220 } 2221 2222 static int 2223 pmcs_remove_ihandlers(pmcs_hw_t *pwp, int icnt) 2224 { 2225 int i, r, rslt = 0; 2226 for (i = 0; i < icnt; i++) { 2227 r = ddi_intr_remove_handler(pwp->ih_table[i]); 2228 if (r == DDI_SUCCESS) { 2229 continue; 2230 } 2231 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2232 "%s: unable to remove interrupt handler %d", __func__, i); 2233 rslt = -1; 2234 break; 2235 } 2236 return (rslt); 2237 } 2238 2239 static int 2240 pmcs_disable_intrs(pmcs_hw_t *pwp, int icnt) 2241 { 2242 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2243 int r = ddi_intr_block_disable(&pwp->ih_table[0], 2244 pwp->intr_cnt); 2245 if (r != DDI_SUCCESS) { 2246 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2247 "unable to disable interrupt block"); 2248 return (-1); 2249 } 2250 } else { 2251 int i; 2252 for (i = 0; i < icnt; i++) { 2253 if (ddi_intr_disable(pwp->ih_table[i]) == DDI_SUCCESS) { 2254 continue; 2255 } 2256 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2257 "unable to disable interrupt %d", i); 2258 return (-1); 2259 } 2260 } 2261 return (0); 2262 } 2263 2264 static int 2265 pmcs_free_intrs(pmcs_hw_t *pwp, int icnt) 2266 { 2267 int i; 2268 for (i = 0; i < icnt; i++) { 2269 if (ddi_intr_free(pwp->ih_table[i]) == DDI_SUCCESS) { 2270 continue; 2271 } 2272 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2273 "unable to free interrupt %d", i); 2274 return (-1); 2275 } 2276 kmem_free(pwp->ih_table, pwp->ih_table_size); 2277 pwp->ih_table_size = 0; 2278 return (0); 2279 } 2280 2281 /* 2282 * Try to set up interrupts of type "type" with a minimum number of interrupts 2283 * of "min". 2284 */ 2285 static void 2286 pmcs_setup_intr_impl(pmcs_hw_t *pwp, int type, int min) 2287 { 2288 int rval, avail, count, actual, max; 2289 2290 rval = ddi_intr_get_nintrs(pwp->dip, type, &count); 2291 if ((rval != DDI_SUCCESS) || (count < min)) { 2292 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2293 "%s: get_nintrs failed; type: %d rc: %d count: %d min: %d", 2294 __func__, type, rval, count, min); 2295 return; 2296 } 2297 2298 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2299 "%s: nintrs = %d for type: %d", __func__, count, type); 2300 2301 rval = ddi_intr_get_navail(pwp->dip, type, &avail); 2302 if ((rval != DDI_SUCCESS) || (avail < min)) { 2303 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2304 "%s: get_navail failed; type: %d rc: %d avail: %d min: %d", 2305 __func__, type, rval, avail, min); 2306 return; 2307 } 2308 2309 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2310 "%s: navail = %d for type: %d", __func__, avail, type); 2311 2312 pwp->ih_table_size = avail * sizeof (ddi_intr_handle_t); 2313 pwp->ih_table = kmem_alloc(pwp->ih_table_size, KM_SLEEP); 2314 2315 switch (type) { 2316 case DDI_INTR_TYPE_MSIX: 2317 pwp->int_type = PMCS_INT_MSIX; 2318 max = PMCS_MAX_MSIX; 2319 break; 2320 case DDI_INTR_TYPE_MSI: 2321 pwp->int_type = PMCS_INT_MSI; 2322 max = PMCS_MAX_MSI; 2323 break; 2324 case DDI_INTR_TYPE_FIXED: 2325 default: 2326 pwp->int_type = PMCS_INT_FIXED; 2327 max = PMCS_MAX_FIXED; 2328 break; 2329 } 2330 2331 rval = ddi_intr_alloc(pwp->dip, pwp->ih_table, type, 0, max, &actual, 2332 DDI_INTR_ALLOC_NORMAL); 2333 if (rval != DDI_SUCCESS) { 2334 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2335 "%s: ddi_intr_alloc failed; type: %d rc: %d", 2336 __func__, type, rval); 2337 kmem_free(pwp->ih_table, pwp->ih_table_size); 2338 pwp->ih_table = NULL; 2339 pwp->ih_table_size = 0; 2340 pwp->intr_cnt = 0; 2341 pwp->int_type = PMCS_INT_NONE; 2342 return; 2343 } 2344 2345 pwp->intr_cnt = actual; 2346 } 2347 2348 /* 2349 * Set up interrupts. 2350 * We return one of three values: 2351 * 2352 * 0 - success 2353 * EAGAIN - failure to set up interrupts 2354 * EIO - "" + we're now stuck partly enabled 2355 * 2356 * If EIO is returned, we can't unload the driver. 2357 */ 2358 static int 2359 pmcs_setup_intr(pmcs_hw_t *pwp) 2360 { 2361 int i, r, itypes, oqv_count; 2362 ddi_intr_handler_t **iv_table; 2363 size_t iv_table_size; 2364 uint_t pri; 2365 2366 if (ddi_intr_get_supported_types(pwp->dip, &itypes) != DDI_SUCCESS) { 2367 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2368 "cannot get interrupt types"); 2369 return (EAGAIN); 2370 } 2371 2372 if (disable_msix) { 2373 itypes &= ~DDI_INTR_TYPE_MSIX; 2374 } 2375 if (disable_msi) { 2376 itypes &= ~DDI_INTR_TYPE_MSI; 2377 } 2378 2379 /* 2380 * We won't know what firmware we're running until we call pmcs_setup, 2381 * and we can't call pmcs_setup until we establish interrupts. 2382 */ 2383 2384 pwp->int_type = PMCS_INT_NONE; 2385 2386 /* 2387 * We want PMCS_MAX_MSIX vectors for MSI-X. Anything less would be 2388 * uncivilized. 2389 */ 2390 if (itypes & DDI_INTR_TYPE_MSIX) { 2391 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSIX, PMCS_MAX_MSIX); 2392 if (pwp->int_type == PMCS_INT_MSIX) { 2393 itypes = 0; 2394 } 2395 } 2396 2397 if (itypes & DDI_INTR_TYPE_MSI) { 2398 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSI, 1); 2399 if (pwp->int_type == PMCS_INT_MSI) { 2400 itypes = 0; 2401 } 2402 } 2403 2404 if (itypes & DDI_INTR_TYPE_FIXED) { 2405 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_FIXED, 1); 2406 if (pwp->int_type == PMCS_INT_FIXED) { 2407 itypes = 0; 2408 } 2409 } 2410 2411 if (pwp->intr_cnt == 0) { 2412 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2413 "No interrupts available"); 2414 return (EAGAIN); 2415 } 2416 2417 iv_table_size = sizeof (ddi_intr_handler_t *) * pwp->intr_cnt; 2418 iv_table = kmem_alloc(iv_table_size, KM_SLEEP); 2419 2420 /* 2421 * Get iblock cookie and add handlers. 2422 */ 2423 switch (pwp->intr_cnt) { 2424 case 1: 2425 iv_table[0] = pmcs_all_intr; 2426 break; 2427 case 2: 2428 iv_table[0] = pmcs_iodone_ix; 2429 iv_table[1] = pmcs_nonio_ix; 2430 break; 2431 case 4: 2432 iv_table[PMCS_MSIX_GENERAL] = pmcs_general_ix; 2433 iv_table[PMCS_MSIX_IODONE] = pmcs_iodone_ix; 2434 iv_table[PMCS_MSIX_EVENTS] = pmcs_event_ix; 2435 iv_table[PMCS_MSIX_FATAL] = pmcs_fatal_ix; 2436 break; 2437 default: 2438 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2439 "%s: intr_cnt = %d - unexpected", __func__, pwp->intr_cnt); 2440 kmem_free(iv_table, iv_table_size); 2441 return (EAGAIN); 2442 } 2443 2444 for (i = 0; i < pwp->intr_cnt; i++) { 2445 r = ddi_intr_add_handler(pwp->ih_table[i], iv_table[i], 2446 (caddr_t)pwp, NULL); 2447 if (r != DDI_SUCCESS) { 2448 kmem_free(iv_table, iv_table_size); 2449 if (pmcs_remove_ihandlers(pwp, i)) { 2450 return (EIO); 2451 } 2452 if (pmcs_free_intrs(pwp, i)) { 2453 return (EIO); 2454 } 2455 pwp->intr_cnt = 0; 2456 return (EAGAIN); 2457 } 2458 } 2459 2460 kmem_free(iv_table, iv_table_size); 2461 2462 if (ddi_intr_get_cap(pwp->ih_table[0], &pwp->intr_cap) != DDI_SUCCESS) { 2463 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2464 "unable to get int capabilities"); 2465 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2466 return (EIO); 2467 } 2468 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2469 return (EIO); 2470 } 2471 pwp->intr_cnt = 0; 2472 return (EAGAIN); 2473 } 2474 2475 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2476 r = ddi_intr_block_enable(&pwp->ih_table[0], pwp->intr_cnt); 2477 if (r != DDI_SUCCESS) { 2478 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2479 "intr blk enable failed"); 2480 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2481 return (EIO); 2482 } 2483 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2484 return (EIO); 2485 } 2486 pwp->intr_cnt = 0; 2487 return (EFAULT); 2488 } 2489 } else { 2490 for (i = 0; i < pwp->intr_cnt; i++) { 2491 r = ddi_intr_enable(pwp->ih_table[i]); 2492 if (r == DDI_SUCCESS) { 2493 continue; 2494 } 2495 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2496 "unable to enable interrupt %d", i); 2497 if (pmcs_disable_intrs(pwp, i)) { 2498 return (EIO); 2499 } 2500 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2501 return (EIO); 2502 } 2503 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2504 return (EIO); 2505 } 2506 pwp->intr_cnt = 0; 2507 return (EAGAIN); 2508 } 2509 } 2510 2511 /* 2512 * Set up locks. 2513 */ 2514 if (ddi_intr_get_pri(pwp->ih_table[0], &pri) != DDI_SUCCESS) { 2515 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2516 "unable to get interrupt priority"); 2517 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2518 return (EIO); 2519 } 2520 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2521 return (EIO); 2522 } 2523 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2524 return (EIO); 2525 } 2526 pwp->intr_cnt = 0; 2527 return (EAGAIN); 2528 } 2529 2530 pwp->locks_initted = 1; 2531 pwp->intr_pri = pri; 2532 mutex_init(&pwp->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2533 mutex_init(&pwp->dma_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2534 mutex_init(&pwp->axil_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2535 mutex_init(&pwp->cq_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2536 mutex_init(&pwp->ict_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2537 mutex_init(&pwp->config_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2538 mutex_init(&pwp->wfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2539 mutex_init(&pwp->pfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2540 mutex_init(&pwp->dead_phylist_lock, NULL, MUTEX_DRIVER, 2541 DDI_INTR_PRI(pri)); 2542 #ifdef DEBUG 2543 mutex_init(&pwp->dbglock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2544 #endif 2545 cv_init(&pwp->ict_cv, NULL, CV_DRIVER, NULL); 2546 cv_init(&pwp->drain_cv, NULL, CV_DRIVER, NULL); 2547 cv_init(&pwp->config_cv, NULL, CV_DRIVER, NULL); 2548 for (i = 0; i < PMCS_NIQ; i++) { 2549 mutex_init(&pwp->iqp_lock[i], NULL, 2550 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2551 } 2552 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 2553 mutex_init(&pwp->cq_info.cq_thr_info[i].cq_thr_lock, NULL, 2554 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2555 cv_init(&pwp->cq_info.cq_thr_info[i].cq_cv, NULL, 2556 CV_DRIVER, NULL); 2557 } 2558 2559 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%d %s interrup%s configured", 2560 pwp->intr_cnt, (pwp->int_type == PMCS_INT_MSIX)? "MSI-X" : 2561 ((pwp->int_type == PMCS_INT_MSI)? "MSI" : "INT-X"), 2562 pwp->intr_cnt == 1? "t" : "ts"); 2563 2564 2565 /* 2566 * Enable Interrupts 2567 */ 2568 if (pwp->intr_cnt > PMCS_NOQ) { 2569 oqv_count = pwp->intr_cnt; 2570 } else { 2571 oqv_count = PMCS_NOQ; 2572 } 2573 for (pri = 0xffffffff, i = 0; i < oqv_count; i++) { 2574 pri ^= (1 << i); 2575 } 2576 2577 mutex_enter(&pwp->lock); 2578 pwp->intr_mask = pri; 2579 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask); 2580 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 2581 mutex_exit(&pwp->lock); 2582 2583 return (0); 2584 } 2585 2586 static int 2587 pmcs_teardown_intr(pmcs_hw_t *pwp) 2588 { 2589 if (pwp->intr_cnt) { 2590 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2591 return (EIO); 2592 } 2593 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2594 return (EIO); 2595 } 2596 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2597 return (EIO); 2598 } 2599 pwp->intr_cnt = 0; 2600 } 2601 return (0); 2602 } 2603 2604 static uint_t 2605 pmcs_general_ix(caddr_t arg1, caddr_t arg2) 2606 { 2607 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2608 _NOTE(ARGUNUSED(arg2)); 2609 pmcs_general_intr(pwp); 2610 return (DDI_INTR_CLAIMED); 2611 } 2612 2613 static uint_t 2614 pmcs_event_ix(caddr_t arg1, caddr_t arg2) 2615 { 2616 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2617 _NOTE(ARGUNUSED(arg2)); 2618 pmcs_event_intr(pwp); 2619 return (DDI_INTR_CLAIMED); 2620 } 2621 2622 static uint_t 2623 pmcs_iodone_ix(caddr_t arg1, caddr_t arg2) 2624 { 2625 _NOTE(ARGUNUSED(arg2)); 2626 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2627 2628 /* 2629 * It's possible that if we just turned interrupt coalescing off 2630 * (and thus, re-enabled auto clear for interrupts on the I/O outbound 2631 * queue) that there was an interrupt already pending. We use 2632 * io_intr_coal.int_cleared to ensure that we still drop in here and 2633 * clear the appropriate interrupt bit one last time. 2634 */ 2635 mutex_enter(&pwp->ict_lock); 2636 if (pwp->io_intr_coal.timer_on || 2637 (pwp->io_intr_coal.int_cleared == B_FALSE)) { 2638 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2639 (1 << PMCS_OQ_IODONE)); 2640 pwp->io_intr_coal.int_cleared = B_TRUE; 2641 } 2642 mutex_exit(&pwp->ict_lock); 2643 2644 pmcs_iodone_intr(pwp); 2645 2646 return (DDI_INTR_CLAIMED); 2647 } 2648 2649 static uint_t 2650 pmcs_fatal_ix(caddr_t arg1, caddr_t arg2) 2651 { 2652 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2653 _NOTE(ARGUNUSED(arg2)); 2654 pmcs_fatal_handler(pwp); 2655 return (DDI_INTR_CLAIMED); 2656 } 2657 2658 static uint_t 2659 pmcs_nonio_ix(caddr_t arg1, caddr_t arg2) 2660 { 2661 _NOTE(ARGUNUSED(arg2)); 2662 pmcs_hw_t *pwp = (void *)arg1; 2663 uint32_t obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2664 2665 /* 2666 * Check for Fatal Interrupts 2667 */ 2668 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2669 pmcs_fatal_handler(pwp); 2670 return (DDI_INTR_CLAIMED); 2671 } 2672 2673 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2674 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2675 (1 << PMCS_OQ_GENERAL)); 2676 pmcs_general_intr(pwp); 2677 pmcs_event_intr(pwp); 2678 } 2679 2680 return (DDI_INTR_CLAIMED); 2681 } 2682 2683 static uint_t 2684 pmcs_all_intr(caddr_t arg1, caddr_t arg2) 2685 { 2686 _NOTE(ARGUNUSED(arg2)); 2687 pmcs_hw_t *pwp = (void *) arg1; 2688 uint32_t obdb; 2689 int handled = 0; 2690 2691 obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2692 2693 /* 2694 * Check for Fatal Interrupts 2695 */ 2696 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2697 pmcs_fatal_handler(pwp); 2698 return (DDI_INTR_CLAIMED); 2699 } 2700 2701 /* 2702 * Check for Outbound Queue service needed 2703 */ 2704 if (obdb & (1 << PMCS_OQ_IODONE)) { 2705 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2706 (1 << PMCS_OQ_IODONE)); 2707 obdb ^= (1 << PMCS_OQ_IODONE); 2708 handled++; 2709 pmcs_iodone_intr(pwp); 2710 } 2711 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2712 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2713 (1 << PMCS_OQ_GENERAL)); 2714 obdb ^= (1 << PMCS_OQ_GENERAL); 2715 handled++; 2716 pmcs_general_intr(pwp); 2717 } 2718 if (obdb & (1 << PMCS_OQ_EVENTS)) { 2719 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2720 (1 << PMCS_OQ_EVENTS)); 2721 obdb ^= (1 << PMCS_OQ_EVENTS); 2722 handled++; 2723 pmcs_event_intr(pwp); 2724 } 2725 if (obdb) { 2726 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2727 "interrupt bits not handled (0x%x)", obdb); 2728 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, obdb); 2729 handled++; 2730 } 2731 if (pwp->int_type == PMCS_INT_MSI) { 2732 handled++; 2733 } 2734 return (handled? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 2735 } 2736 2737 void 2738 pmcs_fatal_handler(pmcs_hw_t *pwp) 2739 { 2740 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, "Fatal Interrupt caught"); 2741 2742 mutex_enter(&pwp->lock); 2743 pwp->state = STATE_DEAD; 2744 2745 /* 2746 * Attempt a hot reset. In case of failure, pmcs_hot_reset() will 2747 * handle the failure and issue any required FM notifications. 2748 * See pmcs_subr.c for more details. 2749 */ 2750 if (pmcs_hot_reset(pwp)) { 2751 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2752 "%s: hot reset failure", __func__); 2753 } else { 2754 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2755 "%s: hot reset complete", __func__); 2756 pwp->last_reset_reason = PMCS_LAST_RST_FATAL_ERROR; 2757 } 2758 mutex_exit(&pwp->lock); 2759 } 2760 2761 /* 2762 * Called with PHY lock and target statlock held and scratch acquired. 2763 */ 2764 boolean_t 2765 pmcs_assign_device(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt) 2766 { 2767 pmcs_phy_t *pptr = tgt->phy; 2768 2769 switch (pptr->dtype) { 2770 case SAS: 2771 case EXPANDER: 2772 break; 2773 case SATA: 2774 tgt->ca = 1; 2775 break; 2776 default: 2777 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2778 "%s: Target %p has PHY %p with invalid dtype", 2779 __func__, (void *)tgt, (void *)pptr); 2780 return (B_FALSE); 2781 } 2782 2783 tgt->new = 1; 2784 tgt->dev_gone = 0; 2785 tgt->recover_wait = 0; 2786 2787 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2788 "%s: config %s vtgt %u for " SAS_ADDR_FMT, __func__, 2789 pptr->path, tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2790 2791 if (pmcs_add_new_device(pwp, tgt) != B_TRUE) { 2792 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2793 "%s: Failed for vtgt %u / WWN " SAS_ADDR_FMT, __func__, 2794 tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2795 mutex_destroy(&tgt->statlock); 2796 mutex_destroy(&tgt->wqlock); 2797 mutex_destroy(&tgt->aqlock); 2798 return (B_FALSE); 2799 } 2800 2801 return (B_TRUE); 2802 } 2803 2804 /* 2805 * Called with softstate lock held 2806 */ 2807 void 2808 pmcs_remove_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2809 { 2810 pmcs_xscsi_t *xp; 2811 unsigned int vtgt; 2812 2813 ASSERT(mutex_owned(&pwp->lock)); 2814 2815 for (vtgt = 0; vtgt < pwp->max_dev; vtgt++) { 2816 xp = pwp->targets[vtgt]; 2817 if (xp == NULL) { 2818 continue; 2819 } 2820 2821 mutex_enter(&xp->statlock); 2822 if (xp->phy == pptr) { 2823 if (xp->new) { 2824 xp->new = 0; 2825 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2826 "cancel config of vtgt %u", vtgt); 2827 } else { 2828 pmcs_clear_xp(pwp, xp); 2829 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2830 "Removed tgt 0x%p vtgt %u", 2831 (void *)xp, vtgt); 2832 } 2833 mutex_exit(&xp->statlock); 2834 break; 2835 } 2836 mutex_exit(&xp->statlock); 2837 } 2838 } 2839 2840 void 2841 pmcs_prt_impl(pmcs_hw_t *pwp, pmcs_prt_level_t level, 2842 pmcs_phy_t *phyp, pmcs_xscsi_t *target, const char *fmt, ...) 2843 { 2844 va_list ap; 2845 int written = 0; 2846 char *ptr; 2847 uint32_t elem_size = PMCS_TBUF_ELEM_SIZE - 1; 2848 boolean_t system_log; 2849 int system_log_level; 2850 2851 switch (level) { 2852 case PMCS_PRT_DEBUG_DEVEL: 2853 case PMCS_PRT_DEBUG_DEV_STATE: 2854 case PMCS_PRT_DEBUG_PHY_LOCKING: 2855 case PMCS_PRT_DEBUG_SCSI_STATUS: 2856 case PMCS_PRT_DEBUG_UNDERFLOW: 2857 case PMCS_PRT_DEBUG_CONFIG: 2858 case PMCS_PRT_DEBUG_IPORT: 2859 case PMCS_PRT_DEBUG_MAP: 2860 case PMCS_PRT_DEBUG3: 2861 case PMCS_PRT_DEBUG2: 2862 case PMCS_PRT_DEBUG1: 2863 case PMCS_PRT_DEBUG: 2864 system_log = B_FALSE; 2865 break; 2866 case PMCS_PRT_INFO: 2867 system_log = B_TRUE; 2868 system_log_level = CE_CONT; 2869 break; 2870 case PMCS_PRT_WARN: 2871 system_log = B_TRUE; 2872 system_log_level = CE_NOTE; 2873 break; 2874 case PMCS_PRT_ERR: 2875 system_log = B_TRUE; 2876 system_log_level = CE_WARN; 2877 break; 2878 default: 2879 return; 2880 } 2881 2882 mutex_enter(&pmcs_trace_lock); 2883 gethrestime(&pmcs_tbuf_ptr->timestamp); 2884 ptr = pmcs_tbuf_ptr->buf; 2885 2886 /* 2887 * Store the pertinent PHY and target information if there is any 2888 */ 2889 if (target == NULL) { 2890 pmcs_tbuf_ptr->target_num = PMCS_INVALID_TARGET_NUM; 2891 pmcs_tbuf_ptr->target_ua[0] = '\0'; 2892 } else { 2893 pmcs_tbuf_ptr->target_num = target->target_num; 2894 (void) strncpy(pmcs_tbuf_ptr->target_ua, target->ua, 2895 PMCS_TBUF_UA_MAX_SIZE); 2896 } 2897 2898 if (phyp == NULL) { 2899 (void) memset(pmcs_tbuf_ptr->phy_sas_address, 0, 8); 2900 pmcs_tbuf_ptr->phy_path[0] = '\0'; 2901 pmcs_tbuf_ptr->phy_dtype = NOTHING; 2902 } else { 2903 (void) memcpy(pmcs_tbuf_ptr->phy_sas_address, 2904 phyp->sas_address, 8); 2905 (void) strncpy(pmcs_tbuf_ptr->phy_path, phyp->path, 32); 2906 pmcs_tbuf_ptr->phy_dtype = phyp->dtype; 2907 } 2908 2909 written += snprintf(ptr, elem_size, "pmcs%d:%d: ", 2910 ddi_get_instance(pwp->dip), level); 2911 ptr += strlen(ptr); 2912 va_start(ap, fmt); 2913 written += vsnprintf(ptr, elem_size - written, fmt, ap); 2914 va_end(ap); 2915 if (written > elem_size - 1) { 2916 /* Indicate truncation */ 2917 pmcs_tbuf_ptr->buf[elem_size - 1] = '+'; 2918 } 2919 if (++pmcs_tbuf_idx == pmcs_tbuf_num_elems) { 2920 pmcs_tbuf_ptr = pmcs_tbuf; 2921 pmcs_tbuf_wrap = B_TRUE; 2922 pmcs_tbuf_idx = 0; 2923 } else { 2924 ++pmcs_tbuf_ptr; 2925 } 2926 mutex_exit(&pmcs_trace_lock); 2927 2928 /* 2929 * When pmcs_force_syslog in non-zero, everything goes also 2930 * to syslog, at CE_CONT level. 2931 */ 2932 if (pmcs_force_syslog) { 2933 system_log = B_TRUE; 2934 system_log_level = CE_CONT; 2935 } 2936 2937 /* 2938 * Anything that comes in with PMCS_PRT_INFO, WARN, or ERR also 2939 * goes to syslog. 2940 */ 2941 if (system_log) { 2942 char local[196]; 2943 2944 switch (system_log_level) { 2945 case CE_CONT: 2946 (void) snprintf(local, sizeof (local), "%sINFO: ", 2947 pmcs_console ? "" : "?"); 2948 break; 2949 case CE_NOTE: 2950 case CE_WARN: 2951 local[0] = 0; 2952 break; 2953 default: 2954 return; 2955 } 2956 2957 ptr = local; 2958 ptr += strlen(local); 2959 (void) snprintf(ptr, (sizeof (local)) - 2960 ((size_t)ptr - (size_t)local), "pmcs%d: ", 2961 ddi_get_instance(pwp->dip)); 2962 ptr += strlen(ptr); 2963 va_start(ap, fmt); 2964 (void) vsnprintf(ptr, 2965 (sizeof (local)) - ((size_t)ptr - (size_t)local), fmt, ap); 2966 va_end(ap); 2967 if (level == CE_CONT) { 2968 (void) strlcat(local, "\n", sizeof (local)); 2969 } 2970 cmn_err(system_log_level, local); 2971 } 2972 2973 } 2974 2975 /* 2976 * pmcs_acquire_scratch 2977 * 2978 * If "wait" is true, the caller will wait until it can acquire the scratch. 2979 * This implies the caller needs to be in a context where spinning for an 2980 * indeterminate amount of time is acceptable. 2981 */ 2982 int 2983 pmcs_acquire_scratch(pmcs_hw_t *pwp, boolean_t wait) 2984 { 2985 int rval; 2986 2987 if (!wait) { 2988 return (atomic_swap_8(&pwp->scratch_locked, 1)); 2989 } 2990 2991 /* 2992 * Caller will wait for scratch. 2993 */ 2994 while ((rval = atomic_swap_8(&pwp->scratch_locked, 1)) != 0) { 2995 drv_usecwait(100); 2996 } 2997 2998 return (rval); 2999 } 3000 3001 void 3002 pmcs_release_scratch(pmcs_hw_t *pwp) 3003 { 3004 pwp->scratch_locked = 0; 3005 } 3006 3007 /* Called with iport_lock and phy lock held */ 3008 void 3009 pmcs_create_one_phy_stats(pmcs_iport_t *iport, pmcs_phy_t *phyp) 3010 { 3011 sas_phy_stats_t *ps; 3012 pmcs_hw_t *pwp; 3013 int ndata; 3014 char ks_name[KSTAT_STRLEN]; 3015 3016 ASSERT(mutex_owned(&iport->lock)); 3017 pwp = iport->pwp; 3018 ASSERT(pwp != NULL); 3019 ASSERT(mutex_owned(&phyp->phy_lock)); 3020 3021 if (phyp->phy_stats != NULL) { 3022 /* 3023 * Delete existing kstats with name containing 3024 * old iport instance# and allow creation of 3025 * new kstats with new iport instance# in the name. 3026 */ 3027 kstat_delete(phyp->phy_stats); 3028 } 3029 3030 ndata = (sizeof (sas_phy_stats_t)/sizeof (kstat_named_t)); 3031 3032 (void) snprintf(ks_name, sizeof (ks_name), 3033 "%s.%llx.%d.%d", ddi_driver_name(iport->dip), 3034 (longlong_t)pwp->sas_wwns[0], 3035 ddi_get_instance(iport->dip), phyp->phynum); 3036 3037 phyp->phy_stats = kstat_create("pmcs", 3038 ddi_get_instance(iport->dip), ks_name, KSTAT_SAS_PHY_CLASS, 3039 KSTAT_TYPE_NAMED, ndata, 0); 3040 3041 if (phyp->phy_stats == NULL) { 3042 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 3043 "%s: Failed to create %s kstats for PHY(0x%p) at %s", 3044 __func__, ks_name, (void *)phyp, phyp->path); 3045 } 3046 3047 ps = (sas_phy_stats_t *)phyp->phy_stats->ks_data; 3048 3049 kstat_named_init(&ps->seconds_since_last_reset, 3050 "SecondsSinceLastReset", KSTAT_DATA_ULONGLONG); 3051 kstat_named_init(&ps->tx_frames, 3052 "TxFrames", KSTAT_DATA_ULONGLONG); 3053 kstat_named_init(&ps->rx_frames, 3054 "RxFrames", KSTAT_DATA_ULONGLONG); 3055 kstat_named_init(&ps->tx_words, 3056 "TxWords", KSTAT_DATA_ULONGLONG); 3057 kstat_named_init(&ps->rx_words, 3058 "RxWords", KSTAT_DATA_ULONGLONG); 3059 kstat_named_init(&ps->invalid_dword_count, 3060 "InvalidDwordCount", KSTAT_DATA_ULONGLONG); 3061 kstat_named_init(&ps->running_disparity_error_count, 3062 "RunningDisparityErrorCount", KSTAT_DATA_ULONGLONG); 3063 kstat_named_init(&ps->loss_of_dword_sync_count, 3064 "LossofDwordSyncCount", KSTAT_DATA_ULONGLONG); 3065 kstat_named_init(&ps->phy_reset_problem_count, 3066 "PhyResetProblemCount", KSTAT_DATA_ULONGLONG); 3067 3068 phyp->phy_stats->ks_private = phyp; 3069 phyp->phy_stats->ks_update = pmcs_update_phy_stats; 3070 kstat_install(phyp->phy_stats); 3071 } 3072 3073 static void 3074 pmcs_create_all_phy_stats(pmcs_iport_t *iport) 3075 { 3076 pmcs_hw_t *pwp; 3077 pmcs_phy_t *phyp; 3078 3079 ASSERT(iport != NULL); 3080 pwp = iport->pwp; 3081 ASSERT(pwp != NULL); 3082 3083 mutex_enter(&iport->lock); 3084 3085 for (phyp = list_head(&iport->phys); 3086 phyp != NULL; 3087 phyp = list_next(&iport->phys, phyp)) { 3088 3089 mutex_enter(&phyp->phy_lock); 3090 pmcs_create_one_phy_stats(iport, phyp); 3091 mutex_exit(&phyp->phy_lock); 3092 } 3093 3094 mutex_exit(&iport->lock); 3095 } 3096 3097 int 3098 pmcs_update_phy_stats(kstat_t *ks, int rw) 3099 { 3100 int val, ret = DDI_FAILURE; 3101 pmcs_phy_t *pptr = (pmcs_phy_t *)ks->ks_private; 3102 pmcs_hw_t *pwp = pptr->pwp; 3103 sas_phy_stats_t *ps = ks->ks_data; 3104 3105 _NOTE(ARGUNUSED(rw)); 3106 ASSERT((pptr != NULL) && (pwp != NULL)); 3107 3108 /* 3109 * We just want to lock against other invocations of kstat; 3110 * we don't need to pmcs_lock_phy() for this. 3111 */ 3112 mutex_enter(&pptr->phy_lock); 3113 3114 /* Get Stats from Chip */ 3115 val = pmcs_get_diag_report(pwp, PMCS_INVALID_DWORD_CNT, pptr->phynum); 3116 if (val == DDI_FAILURE) 3117 goto fail; 3118 ps->invalid_dword_count.value.ull = (unsigned long long)val; 3119 3120 val = pmcs_get_diag_report(pwp, PMCS_DISPARITY_ERR_CNT, pptr->phynum); 3121 if (val == DDI_FAILURE) 3122 goto fail; 3123 ps->running_disparity_error_count.value.ull = (unsigned long long)val; 3124 3125 val = pmcs_get_diag_report(pwp, PMCS_LOST_DWORD_SYNC_CNT, pptr->phynum); 3126 if (val == DDI_FAILURE) 3127 goto fail; 3128 ps->loss_of_dword_sync_count.value.ull = (unsigned long long)val; 3129 3130 val = pmcs_get_diag_report(pwp, PMCS_RESET_FAILED_CNT, pptr->phynum); 3131 if (val == DDI_FAILURE) 3132 goto fail; 3133 ps->phy_reset_problem_count.value.ull = (unsigned long long)val; 3134 3135 ret = DDI_SUCCESS; 3136 fail: 3137 mutex_exit(&pptr->phy_lock); 3138 return (ret); 3139 } 3140 3141 static void 3142 pmcs_destroy_phy_stats(pmcs_iport_t *iport) 3143 { 3144 pmcs_phy_t *phyp; 3145 3146 ASSERT(iport != NULL); 3147 mutex_enter(&iport->lock); 3148 phyp = iport->pptr; 3149 if (phyp == NULL) { 3150 mutex_exit(&iport->lock); 3151 return; 3152 } 3153 3154 for (phyp = list_head(&iport->phys); 3155 phyp != NULL; 3156 phyp = list_next(&iport->phys, phyp)) { 3157 3158 mutex_enter(&phyp->phy_lock); 3159 if (phyp->phy_stats != NULL) { 3160 kstat_delete(phyp->phy_stats); 3161 phyp->phy_stats = NULL; 3162 } 3163 mutex_exit(&phyp->phy_lock); 3164 } 3165 3166 mutex_exit(&iport->lock); 3167 } 3168 3169 /*ARGSUSED*/ 3170 static int 3171 pmcs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 3172 { 3173 /* 3174 * as the driver can always deal with an error in any dma or 3175 * access handle, we can just return the fme_status value. 3176 */ 3177 pci_ereport_post(dip, err, NULL); 3178 return (err->fme_status); 3179 } 3180 3181 static void 3182 pmcs_fm_init(pmcs_hw_t *pwp) 3183 { 3184 ddi_iblock_cookie_t fm_ibc; 3185 3186 /* Only register with IO Fault Services if we have some capability */ 3187 if (pwp->fm_capabilities) { 3188 /* Adjust access and dma attributes for FMA */ 3189 pwp->reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 3190 pwp->iqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3191 pwp->oqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3192 pwp->cip_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3193 pwp->fwlog_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3194 3195 /* 3196 * Register capabilities with IO Fault Services. 3197 */ 3198 ddi_fm_init(pwp->dip, &pwp->fm_capabilities, &fm_ibc); 3199 3200 /* 3201 * Initialize pci ereport capabilities if ereport 3202 * capable (should always be.) 3203 */ 3204 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 3205 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3206 pci_ereport_setup(pwp->dip); 3207 } 3208 3209 /* 3210 * Register error callback if error callback capable. 3211 */ 3212 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3213 ddi_fm_handler_register(pwp->dip, 3214 pmcs_fm_error_cb, (void *) pwp); 3215 } 3216 } 3217 } 3218 3219 static void 3220 pmcs_fm_fini(pmcs_hw_t *pwp) 3221 { 3222 /* Only unregister FMA capabilities if registered */ 3223 if (pwp->fm_capabilities) { 3224 /* 3225 * Un-register error callback if error callback capable. 3226 */ 3227 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3228 ddi_fm_handler_unregister(pwp->dip); 3229 } 3230 3231 /* 3232 * Release any resources allocated by pci_ereport_setup() 3233 */ 3234 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 3235 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3236 pci_ereport_teardown(pwp->dip); 3237 } 3238 3239 /* Unregister from IO Fault Services */ 3240 ddi_fm_fini(pwp->dip); 3241 3242 /* Adjust access and dma attributes for FMA */ 3243 pwp->reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 3244 pwp->iqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3245 pwp->oqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3246 pwp->cip_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3247 pwp->fwlog_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3248 } 3249 } 3250 3251 static boolean_t 3252 pmcs_fabricate_wwid(pmcs_hw_t *pwp) 3253 { 3254 char *cp, c; 3255 uint64_t adr; 3256 int i; 3257 3258 cp = &c; 3259 (void) ddi_strtoul(hw_serial, &cp, 10, (unsigned long *)&adr); 3260 3261 if (adr == 0) { 3262 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 3263 "%s: No serial number available to fabricate WWN", 3264 __func__); 3265 3266 adr = (uint64_t)gethrtime(); 3267 } 3268 3269 adr <<= 8; 3270 adr |= ((uint64_t)ddi_get_instance(pwp->dip) << 52); 3271 adr |= (5ULL << 60); 3272 3273 for (i = 0; i < PMCS_MAX_PORTS; i++) { 3274 pwp->sas_wwns[i] = adr + i; 3275 } 3276 3277 return (B_TRUE); 3278 } 3279