1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * This file contains various support routines. 27 */ 28 29 #include <sys/scsi/adapters/pmcs/pmcs.h> 30 31 /* 32 * Local static data 33 */ 34 static int tgtmap_stable_usec = MICROSEC; /* 1 second */ 35 static int tgtmap_csync_usec = 10 * MICROSEC; /* 10 seconds */ 36 37 /* 38 * SAS Topology Configuration 39 */ 40 static void pmcs_new_tport(pmcs_hw_t *, pmcs_phy_t *); 41 static void pmcs_configure_expander(pmcs_hw_t *, pmcs_phy_t *, pmcs_iport_t *); 42 43 static void pmcs_check_expanders(pmcs_hw_t *, pmcs_phy_t *); 44 static void pmcs_check_expander(pmcs_hw_t *, pmcs_phy_t *); 45 static void pmcs_clear_expander(pmcs_hw_t *, pmcs_phy_t *, int); 46 47 static int pmcs_expander_get_nphy(pmcs_hw_t *, pmcs_phy_t *); 48 static int pmcs_expander_content_discover(pmcs_hw_t *, pmcs_phy_t *, 49 pmcs_phy_t *); 50 51 static int pmcs_smp_function_result(pmcs_hw_t *, smp_response_frame_t *); 52 static void pmcs_flush_nonio_cmds(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt); 53 static boolean_t pmcs_validate_devid(pmcs_phy_t *, pmcs_phy_t *, uint32_t); 54 static void pmcs_clear_phys(pmcs_hw_t *, pmcs_phy_t *); 55 static int pmcs_configure_new_devices(pmcs_hw_t *, pmcs_phy_t *); 56 static void pmcs_begin_observations(pmcs_hw_t *); 57 static void pmcs_flush_observations(pmcs_hw_t *); 58 static boolean_t pmcs_report_observations(pmcs_hw_t *); 59 static boolean_t pmcs_report_iport_observations(pmcs_hw_t *, pmcs_iport_t *, 60 pmcs_phy_t *); 61 static pmcs_phy_t *pmcs_find_phy_needing_work(pmcs_hw_t *, pmcs_phy_t *); 62 static int pmcs_kill_devices(pmcs_hw_t *, pmcs_phy_t *); 63 static void pmcs_lock_phy_impl(pmcs_phy_t *, int); 64 static void pmcs_unlock_phy_impl(pmcs_phy_t *, int); 65 static pmcs_phy_t *pmcs_clone_phy(pmcs_phy_t *); 66 static boolean_t pmcs_configure_phy(pmcs_hw_t *, pmcs_phy_t *); 67 static void pmcs_reap_dead_phy(pmcs_phy_t *); 68 static pmcs_iport_t *pmcs_get_iport_by_ua(pmcs_hw_t *, char *); 69 static boolean_t pmcs_phy_target_match(pmcs_phy_t *); 70 static void pmcs_iport_active(pmcs_iport_t *); 71 static void pmcs_tgtmap_activate_cb(void *, char *, scsi_tgtmap_tgt_type_t, 72 void **); 73 static boolean_t pmcs_tgtmap_deactivate_cb(void *, char *, 74 scsi_tgtmap_tgt_type_t, void *, scsi_tgtmap_deact_rsn_t); 75 static void pmcs_add_dead_phys(pmcs_hw_t *, pmcs_phy_t *); 76 static void pmcs_get_fw_version(pmcs_hw_t *); 77 static int pmcs_get_time_stamp(pmcs_hw_t *, uint64_t *, hrtime_t *); 78 79 /* 80 * Often used strings 81 */ 82 const char pmcs_nowrk[] = "%s: unable to get work structure"; 83 const char pmcs_nomsg[] = "%s: unable to get Inbound Message entry"; 84 const char pmcs_timeo[] = "%s: command timed out"; 85 86 extern const ddi_dma_attr_t pmcs_dattr; 87 extern kmutex_t pmcs_trace_lock; 88 89 /* 90 * Some Initial setup steps. 91 */ 92 93 int 94 pmcs_setup(pmcs_hw_t *pwp) 95 { 96 uint32_t barval = pwp->mpibar; 97 uint32_t i, scratch, regbar, regoff, barbar, baroff; 98 uint32_t new_ioq_depth, ferr = 0; 99 100 /* 101 * Check current state. If we're not at READY state, 102 * we can't go further. 103 */ 104 scratch = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 105 if ((scratch & PMCS_MSGU_AAP_STATE_MASK) == PMCS_MSGU_AAP_STATE_ERROR) { 106 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 107 "%s: AAP Error State (0x%x)", 108 __func__, pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 109 PMCS_MSGU_AAP_ERROR_MASK); 110 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE); 111 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 112 return (-1); 113 } 114 if ((scratch & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) { 115 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 116 "%s: AAP unit not ready (state 0x%x)", 117 __func__, scratch & PMCS_MSGU_AAP_STATE_MASK); 118 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE); 119 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 120 return (-1); 121 } 122 123 /* 124 * Read the offset from the Message Unit scratchpad 0 register. 125 * This allows us to read the MPI Configuration table. 126 * 127 * Check its signature for validity. 128 */ 129 baroff = barval; 130 barbar = barval >> PMCS_MSGU_MPI_BAR_SHIFT; 131 baroff &= PMCS_MSGU_MPI_OFFSET_MASK; 132 133 regoff = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0); 134 regbar = regoff >> PMCS_MSGU_MPI_BAR_SHIFT; 135 regoff &= PMCS_MSGU_MPI_OFFSET_MASK; 136 137 if (regoff > baroff) { 138 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 139 "%s: bad MPI Table Length (register offset=0x%08x, " 140 "passed offset=0x%08x)", __func__, regoff, baroff); 141 return (-1); 142 } 143 if (regbar != barbar) { 144 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 145 "%s: bad MPI BAR (register BAROFF=0x%08x, " 146 "passed BAROFF=0x%08x)", __func__, regbar, barbar); 147 return (-1); 148 } 149 pwp->mpi_offset = regoff; 150 if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS) != PMCS_SIGNATURE) { 151 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 152 "%s: Bad MPI Configuration Table Signature 0x%x", __func__, 153 pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS)); 154 return (-1); 155 } 156 157 if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR) != PMCS_MPI_REVISION1) { 158 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 159 "%s: Bad MPI Configuration Revision 0x%x", __func__, 160 pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR)); 161 return (-1); 162 } 163 164 /* 165 * Generate offsets for the General System, Inbound Queue Configuration 166 * and Outbound Queue configuration tables. This way the macros to 167 * access those tables will work correctly. 168 */ 169 pwp->mpi_gst_offset = 170 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_GSTO); 171 pwp->mpi_iqc_offset = 172 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IQCTO); 173 pwp->mpi_oqc_offset = 174 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_OQCTO); 175 176 pmcs_get_fw_version(pwp); 177 178 pwp->max_cmd = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MOIO); 179 pwp->max_dev = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO0) >> 16; 180 181 pwp->max_iq = PMCS_MNIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 182 pwp->max_oq = PMCS_MNOQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 183 pwp->nphy = PMCS_NPHY(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 184 if (pwp->max_iq <= PMCS_NIQ) { 185 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 186 "%s: not enough Inbound Queues supported " 187 "(need %d, max_oq=%d)", __func__, pwp->max_iq, PMCS_NIQ); 188 return (-1); 189 } 190 if (pwp->max_oq <= PMCS_NOQ) { 191 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 192 "%s: not enough Outbound Queues supported " 193 "(need %d, max_oq=%d)", __func__, pwp->max_oq, PMCS_NOQ); 194 return (-1); 195 } 196 if (pwp->nphy == 0) { 197 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 198 "%s: zero phys reported", __func__); 199 return (-1); 200 } 201 if (PMCS_HPIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1))) { 202 pwp->hipri_queue = (1 << PMCS_IQ_OTHER); 203 } 204 205 206 for (i = 0; i < pwp->nphy; i++) { 207 PMCS_MPI_EVQSET(pwp, PMCS_OQ_EVENTS, i); 208 PMCS_MPI_NCQSET(pwp, PMCS_OQ_EVENTS, i); 209 } 210 211 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_INFO2, 212 (PMCS_OQ_EVENTS << GENERAL_EVENT_OQ_SHIFT) | 213 (PMCS_OQ_EVENTS << DEVICE_HANDLE_REMOVED_SHIFT)); 214 215 /* 216 * Verify that ioq_depth is valid (> 0 and not so high that it 217 * would cause us to overrun the chip with commands). 218 */ 219 if (pwp->ioq_depth == 0) { 220 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 221 "%s: I/O queue depth set to 0. Setting to %d", 222 __func__, PMCS_NQENTRY); 223 pwp->ioq_depth = PMCS_NQENTRY; 224 } 225 226 if (pwp->ioq_depth < PMCS_MIN_NQENTRY) { 227 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 228 "%s: I/O queue depth set too low (%d). Setting to %d", 229 __func__, pwp->ioq_depth, PMCS_MIN_NQENTRY); 230 pwp->ioq_depth = PMCS_MIN_NQENTRY; 231 } 232 233 if (pwp->ioq_depth > (pwp->max_cmd / (PMCS_IO_IQ_MASK + 1))) { 234 new_ioq_depth = pwp->max_cmd / (PMCS_IO_IQ_MASK + 1); 235 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 236 "%s: I/O queue depth set too high (%d). Setting to %d", 237 __func__, pwp->ioq_depth, new_ioq_depth); 238 pwp->ioq_depth = new_ioq_depth; 239 } 240 241 /* 242 * Allocate consistent memory for OQs and IQs. 243 */ 244 pwp->iqp_dma_attr = pwp->oqp_dma_attr = pmcs_dattr; 245 pwp->iqp_dma_attr.dma_attr_align = 246 pwp->oqp_dma_attr.dma_attr_align = PMCS_QENTRY_SIZE; 247 248 /* 249 * The Rev C chip has the ability to do PIO to or from consistent 250 * memory anywhere in a 64 bit address space, but the firmware is 251 * not presently set up to do so. 252 */ 253 pwp->iqp_dma_attr.dma_attr_addr_hi = 254 pwp->oqp_dma_attr.dma_attr_addr_hi = 0x000000FFFFFFFFFFull; 255 256 for (i = 0; i < PMCS_NIQ; i++) { 257 if (pmcs_dma_setup(pwp, &pwp->iqp_dma_attr, 258 &pwp->iqp_acchdls[i], 259 &pwp->iqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth, 260 (caddr_t *)&pwp->iqp[i], &pwp->iqaddr[i]) == B_FALSE) { 261 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 262 "Failed to setup DMA for iqp[%d]", i); 263 return (-1); 264 } 265 bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 266 } 267 268 for (i = 0; i < PMCS_NOQ; i++) { 269 if (pmcs_dma_setup(pwp, &pwp->oqp_dma_attr, 270 &pwp->oqp_acchdls[i], 271 &pwp->oqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth, 272 (caddr_t *)&pwp->oqp[i], &pwp->oqaddr[i]) == B_FALSE) { 273 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 274 "Failed to setup DMA for oqp[%d]", i); 275 return (-1); 276 } 277 bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 278 } 279 280 /* 281 * Install the IQ and OQ addresses (and null out the rest). 282 */ 283 for (i = 0; i < pwp->max_iq; i++) { 284 pwp->iqpi_offset[i] = pmcs_rd_iqc_tbl(pwp, PMCS_IQPIOFFX(i)); 285 if (i < PMCS_NIQ) { 286 if (i != PMCS_IQ_OTHER) { 287 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 288 pwp->ioq_depth | (PMCS_QENTRY_SIZE << 16)); 289 } else { 290 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 291 (1 << 30) | pwp->ioq_depth | 292 (PMCS_QENTRY_SIZE << 16)); 293 } 294 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 295 DWORD1(pwp->iqaddr[i])); 296 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 297 DWORD0(pwp->iqaddr[i])); 298 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 299 DWORD1(pwp->ciaddr+IQ_OFFSET(i))); 300 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 301 DWORD0(pwp->ciaddr+IQ_OFFSET(i))); 302 } else { 303 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0); 304 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0); 305 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0); 306 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0); 307 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0); 308 } 309 } 310 311 for (i = 0; i < pwp->max_oq; i++) { 312 pwp->oqci_offset[i] = pmcs_rd_oqc_tbl(pwp, PMCS_OQCIOFFX(i)); 313 if (i < PMCS_NOQ) { 314 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), pwp->ioq_depth | 315 (PMCS_QENTRY_SIZE << 16) | OQIEX); 316 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 317 DWORD1(pwp->oqaddr[i])); 318 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 319 DWORD0(pwp->oqaddr[i])); 320 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 321 DWORD1(pwp->ciaddr+OQ_OFFSET(i))); 322 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 323 DWORD0(pwp->ciaddr+OQ_OFFSET(i))); 324 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 325 pwp->oqvec[i] << 24); 326 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 327 } else { 328 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0); 329 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0); 330 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0); 331 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0); 332 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0); 333 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0); 334 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 335 } 336 } 337 338 /* 339 * Set up logging, if defined. 340 */ 341 if (pwp->fwlog) { 342 uint64_t logdma = pwp->fwaddr; 343 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAH, DWORD1(logdma)); 344 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAL, DWORD0(logdma)); 345 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBS, PMCS_FWLOG_SIZE >> 1); 346 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELSEV, pwp->fwlog); 347 logdma += (PMCS_FWLOG_SIZE >> 1); 348 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAH, DWORD1(logdma)); 349 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAL, DWORD0(logdma)); 350 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBS, PMCS_FWLOG_SIZE >> 1); 351 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELSEV, pwp->fwlog); 352 } 353 354 /* 355 * Interrupt vectors, outbound queues, and odb_auto_clear 356 * 357 * MSI/MSI-X: 358 * If we got 4 interrupt vectors, we'll assign one to each outbound 359 * queue as well as the fatal interrupt, and auto clear can be set 360 * for each. 361 * 362 * If we only got 2 vectors, one will be used for I/O completions 363 * and the other for the other two vectors. In this case, auto_ 364 * clear can only be set for I/Os, which is fine. The fatal 365 * interrupt will be mapped to the PMCS_FATAL_INTERRUPT bit, which 366 * is not an interrupt vector. 367 * 368 * MSI/MSI-X/INT-X: 369 * If we only got 1 interrupt vector, auto_clear must be set to 0, 370 * and again the fatal interrupt will be mapped to the 371 * PMCS_FATAL_INTERRUPT bit (again, not an interrupt vector). 372 */ 373 374 switch (pwp->int_type) { 375 case PMCS_INT_MSIX: 376 case PMCS_INT_MSI: 377 switch (pwp->intr_cnt) { 378 case 1: 379 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 380 (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 381 pwp->odb_auto_clear = 0; 382 break; 383 case 2: 384 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 385 (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 386 pwp->odb_auto_clear = (1 << PMCS_FATAL_INTERRUPT) | 387 (1 << PMCS_MSIX_IODONE); 388 break; 389 case 4: 390 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 391 (PMCS_MSIX_FATAL << PMCS_FERIV_SHIFT)); 392 pwp->odb_auto_clear = (1 << PMCS_MSIX_FATAL) | 393 (1 << PMCS_MSIX_GENERAL) | (1 << PMCS_MSIX_IODONE) | 394 (1 << PMCS_MSIX_EVENTS); 395 break; 396 } 397 break; 398 399 case PMCS_INT_FIXED: 400 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 401 PMCS_FERRIE | (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 402 pwp->odb_auto_clear = 0; 403 break; 404 } 405 406 /* 407 * If the open retry interval is non-zero, set it. 408 */ 409 if (pwp->open_retry_interval != 0) { 410 int phynum; 411 412 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 413 "%s: Setting open retry interval to %d usecs", __func__, 414 pwp->open_retry_interval); 415 for (phynum = 0; phynum < pwp->nphy; phynum ++) { 416 pmcs_wr_gsm_reg(pwp, OPEN_RETRY_INTERVAL(phynum), 417 pwp->open_retry_interval); 418 } 419 } 420 421 /* 422 * Enable Interrupt Reassertion 423 * Default Delay 1000us 424 */ 425 ferr = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FERR); 426 if ((ferr & PMCS_MPI_IRAE) == 0) { 427 ferr &= ~(PMCS_MPI_IRAU | PMCS_MPI_IRAD_MASK); 428 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, ferr | PMCS_MPI_IRAE); 429 } 430 431 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, pwp->odb_auto_clear); 432 pwp->mpi_table_setup = 1; 433 return (0); 434 } 435 436 /* 437 * Start the Message Passing protocol with the PMC chip. 438 */ 439 int 440 pmcs_start_mpi(pmcs_hw_t *pwp) 441 { 442 int i; 443 444 pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPIINI); 445 for (i = 0; i < 1000; i++) { 446 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & 447 PMCS_MSGU_IBDB_MPIINI) == 0) { 448 break; 449 } 450 drv_usecwait(1000); 451 } 452 if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPIINI) { 453 return (-1); 454 } 455 drv_usecwait(500000); 456 457 /* 458 * Check to make sure we got to INIT state. 459 */ 460 if (PMCS_MPI_S(pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE)) != 461 PMCS_MPI_STATE_INIT) { 462 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 463 "%s: MPI launch failed (GST 0x%x DBCLR 0x%x)", __func__, 464 pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE), 465 pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB_CLEAR)); 466 return (-1); 467 } 468 return (0); 469 } 470 471 /* 472 * Stop the Message Passing protocol with the PMC chip. 473 */ 474 int 475 pmcs_stop_mpi(pmcs_hw_t *pwp) 476 { 477 int i; 478 479 for (i = 0; i < pwp->max_iq; i++) { 480 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0); 481 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0); 482 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0); 483 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0); 484 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0); 485 } 486 for (i = 0; i < pwp->max_oq; i++) { 487 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0); 488 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0); 489 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0); 490 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0); 491 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0); 492 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0); 493 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 494 } 495 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 0); 496 pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPICTU); 497 for (i = 0; i < 2000; i++) { 498 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & 499 PMCS_MSGU_IBDB_MPICTU) == 0) { 500 break; 501 } 502 drv_usecwait(1000); 503 } 504 if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPICTU) { 505 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 506 "%s: MPI stop failed", __func__); 507 return (-1); 508 } 509 return (0); 510 } 511 512 /* 513 * Do a sequence of ECHO messages to test for MPI functionality, 514 * all inbound and outbound queue functionality and interrupts. 515 */ 516 int 517 pmcs_echo_test(pmcs_hw_t *pwp) 518 { 519 echo_test_t fred; 520 struct pmcwork *pwrk; 521 uint32_t *msg, count; 522 int iqe = 0, iqo = 0, result, rval = 0; 523 int iterations; 524 hrtime_t echo_start, echo_end, echo_total; 525 526 ASSERT(pwp->max_cmd > 0); 527 528 /* 529 * We want iterations to be max_cmd * 3 to ensure that we run the 530 * echo test enough times to iterate through every inbound queue 531 * at least twice. 532 */ 533 iterations = pwp->max_cmd * 3; 534 535 echo_total = 0; 536 count = 0; 537 538 while (count < iterations) { 539 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 540 if (pwrk == NULL) { 541 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 542 pmcs_nowrk, __func__); 543 rval = -1; 544 break; 545 } 546 547 mutex_enter(&pwp->iqp_lock[iqe]); 548 msg = GET_IQ_ENTRY(pwp, iqe); 549 if (msg == NULL) { 550 mutex_exit(&pwp->iqp_lock[iqe]); 551 pmcs_pwork(pwp, pwrk); 552 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 553 pmcs_nomsg, __func__); 554 rval = -1; 555 break; 556 } 557 558 bzero(msg, PMCS_QENTRY_SIZE); 559 560 if (iqe == PMCS_IQ_OTHER) { 561 /* This is on the high priority queue */ 562 msg[0] = LE_32(PMCS_HIPRI(pwp, iqo, PMCIN_ECHO)); 563 } else { 564 msg[0] = LE_32(PMCS_IOMB_IN_SAS(iqo, PMCIN_ECHO)); 565 } 566 msg[1] = LE_32(pwrk->htag); 567 fred.signature = 0xdeadbeef; 568 fred.count = count; 569 fred.ptr = &count; 570 (void) memcpy(&msg[2], &fred, sizeof (fred)); 571 pwrk->state = PMCS_WORK_STATE_ONCHIP; 572 573 INC_IQ_ENTRY(pwp, iqe); 574 575 echo_start = gethrtime(); 576 DTRACE_PROBE2(pmcs__echo__test__wait__start, 577 hrtime_t, echo_start, uint32_t, pwrk->htag); 578 579 if (++iqe == PMCS_NIQ) { 580 iqe = 0; 581 } 582 if (++iqo == PMCS_NOQ) { 583 iqo = 0; 584 } 585 586 WAIT_FOR(pwrk, 250, result); 587 pmcs_pwork(pwp, pwrk); 588 589 echo_end = gethrtime(); 590 DTRACE_PROBE2(pmcs__echo__test__wait__end, 591 hrtime_t, echo_end, int, result); 592 echo_total += (echo_end - echo_start); 593 594 if (result) { 595 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 596 "%s: command timed out on echo test #%d", 597 __func__, count); 598 rval = -1; 599 break; 600 } 601 } 602 603 /* 604 * The intr_threshold is adjusted by PMCS_INTR_THRESHOLD in order to 605 * remove the overhead of things like the delay in getting signaled 606 * for completion. 607 */ 608 if (echo_total != 0) { 609 pwp->io_intr_coal.intr_latency = 610 (echo_total / iterations) / 2; 611 pwp->io_intr_coal.intr_threshold = 612 PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 / 613 pwp->io_intr_coal.intr_latency); 614 } 615 616 return (rval); 617 } 618 619 /* 620 * Start the (real) phys 621 */ 622 int 623 pmcs_start_phy(pmcs_hw_t *pwp, int phynum, int linkmode, int speed) 624 { 625 int result; 626 uint32_t *msg; 627 struct pmcwork *pwrk; 628 pmcs_phy_t *pptr; 629 sas_identify_af_t sap; 630 631 mutex_enter(&pwp->lock); 632 pptr = pwp->root_phys + phynum; 633 if (pptr == NULL) { 634 mutex_exit(&pwp->lock); 635 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 636 "%s: cannot find port %d", __func__, phynum); 637 return (0); 638 } 639 640 pmcs_lock_phy(pptr); 641 mutex_exit(&pwp->lock); 642 643 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 644 if (pwrk == NULL) { 645 pmcs_unlock_phy(pptr); 646 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 647 return (-1); 648 } 649 650 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 651 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 652 653 if (msg == NULL) { 654 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 655 pmcs_unlock_phy(pptr); 656 pmcs_pwork(pwp, pwrk); 657 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 658 return (-1); 659 } 660 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_START)); 661 msg[1] = LE_32(pwrk->htag); 662 msg[2] = LE_32(linkmode | speed | phynum); 663 bzero(&sap, sizeof (sap)); 664 sap.device_type = SAS_IF_DTYPE_ENDPOINT; 665 sap.ssp_ini_port = 1; 666 667 if (pwp->separate_ports) { 668 pmcs_wwn2barray(pwp->sas_wwns[phynum], sap.sas_address); 669 } else { 670 pmcs_wwn2barray(pwp->sas_wwns[0], sap.sas_address); 671 } 672 673 ASSERT(phynum < SAS2_PHYNUM_MAX); 674 sap.phy_identifier = phynum & SAS2_PHYNUM_MASK; 675 (void) memcpy(&msg[3], &sap, sizeof (sas_identify_af_t)); 676 pwrk->state = PMCS_WORK_STATE_ONCHIP; 677 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 678 679 pptr->state.prog_min_rate = (lowbit((ulong_t)speed) - 1); 680 pptr->state.prog_max_rate = (highbit((ulong_t)speed) - 1); 681 pptr->state.hw_min_rate = PMCS_HW_MIN_LINK_RATE; 682 pptr->state.hw_max_rate = PMCS_HW_MAX_LINK_RATE; 683 684 pmcs_unlock_phy(pptr); 685 WAIT_FOR(pwrk, 1000, result); 686 pmcs_pwork(pwp, pwrk); 687 688 if (result) { 689 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 690 } else { 691 mutex_enter(&pwp->lock); 692 pwp->phys_started |= (1 << phynum); 693 mutex_exit(&pwp->lock); 694 } 695 696 return (0); 697 } 698 699 int 700 pmcs_start_phys(pmcs_hw_t *pwp) 701 { 702 int i, rval; 703 704 for (i = 0; i < pwp->nphy; i++) { 705 if ((pwp->phyid_block_mask & (1 << i)) == 0) { 706 if (pmcs_start_phy(pwp, i, 707 (pwp->phymode << PHY_MODE_SHIFT), 708 pwp->physpeed << PHY_LINK_SHIFT)) { 709 return (-1); 710 } 711 if (pmcs_clear_diag_counters(pwp, i)) { 712 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 713 "%s: failed to reset counters on PHY (%d)", 714 __func__, i); 715 } 716 } 717 } 718 719 rval = pmcs_get_time_stamp(pwp, &pwp->fw_timestamp, &pwp->hrtimestamp); 720 if (rval) { 721 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 722 "%s: Failed to obtain firmware timestamp", __func__); 723 } else { 724 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 725 "Firmware timestamp: 0x%" PRIx64, pwp->fw_timestamp); 726 } 727 728 return (0); 729 } 730 731 /* 732 * Called with PHY locked 733 */ 734 int 735 pmcs_reset_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t type) 736 { 737 uint32_t *msg; 738 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 739 const char *mbar; 740 uint32_t amt; 741 uint32_t pdevid; 742 uint32_t stsoff; 743 uint32_t status; 744 int result, level, phynum; 745 struct pmcwork *pwrk; 746 pmcs_iport_t *iport; 747 uint32_t htag; 748 749 ASSERT(mutex_owned(&pptr->phy_lock)); 750 751 bzero(iomb, PMCS_QENTRY_SIZE); 752 phynum = pptr->phynum; 753 level = pptr->level; 754 if (level > 0) { 755 pdevid = pptr->parent->device_id; 756 } else if ((level == 0) && (pptr->dtype == EXPANDER)) { 757 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 758 "%s: Not resetting HBA PHY @ %s", __func__, pptr->path); 759 return (0); 760 } 761 762 if (!pptr->iport || !pptr->valid_device_id) { 763 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 764 "%s: Can't reach PHY %s", __func__, pptr->path); 765 return (0); 766 } 767 768 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 769 770 if (pwrk == NULL) { 771 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 772 return (ENOMEM); 773 } 774 775 pwrk->arg = iomb; 776 777 /* 778 * If level > 0, we need to issue an SMP_REQUEST with a PHY_CONTROL 779 * function to do either a link reset or hard reset. If level == 0, 780 * then we do a LOCAL_PHY_CONTROL IOMB to do link/hard reset to the 781 * root (local) PHY 782 */ 783 if (level) { 784 stsoff = 2; 785 iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 786 PMCIN_SMP_REQUEST)); 787 iomb[1] = LE_32(pwrk->htag); 788 iomb[2] = LE_32(pdevid); 789 iomb[3] = LE_32(40 << SMP_REQUEST_LENGTH_SHIFT); 790 /* 791 * Send SMP PHY CONTROL/HARD or LINK RESET 792 */ 793 iomb[4] = BE_32(0x40910000); 794 iomb[5] = 0; 795 796 if (type == PMCS_PHYOP_HARD_RESET) { 797 mbar = "SMP PHY CONTROL/HARD RESET"; 798 iomb[6] = BE_32((phynum << 16) | 799 (PMCS_PHYOP_HARD_RESET << 8)); 800 } else { 801 mbar = "SMP PHY CONTROL/LINK RESET"; 802 iomb[6] = BE_32((phynum << 16) | 803 (PMCS_PHYOP_LINK_RESET << 8)); 804 } 805 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 806 "%s: sending %s to %s for phy 0x%x", 807 __func__, mbar, pptr->parent->path, pptr->phynum); 808 amt = 7; 809 } else { 810 /* 811 * Unlike most other Outbound messages, status for 812 * a local phy operation is in DWORD 3. 813 */ 814 stsoff = 3; 815 iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 816 PMCIN_LOCAL_PHY_CONTROL)); 817 iomb[1] = LE_32(pwrk->htag); 818 if (type == PMCS_PHYOP_LINK_RESET) { 819 mbar = "LOCAL PHY LINK RESET"; 820 iomb[2] = LE_32((PMCS_PHYOP_LINK_RESET << 8) | phynum); 821 } else { 822 mbar = "LOCAL PHY HARD RESET"; 823 iomb[2] = LE_32((PMCS_PHYOP_HARD_RESET << 8) | phynum); 824 } 825 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 826 "%s: sending %s to %s", __func__, mbar, pptr->path); 827 amt = 3; 828 } 829 830 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 831 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 832 if (msg == NULL) { 833 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 834 pmcs_pwork(pwp, pwrk); 835 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 836 return (ENOMEM); 837 } 838 COPY_MESSAGE(msg, iomb, amt); 839 htag = pwrk->htag; 840 841 pmcs_hold_iport(pptr->iport); 842 iport = pptr->iport; 843 pmcs_smp_acquire(iport); 844 pwrk->state = PMCS_WORK_STATE_ONCHIP; 845 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 846 pmcs_unlock_phy(pptr); 847 WAIT_FOR(pwrk, 1000, result); 848 pmcs_pwork(pwp, pwrk); 849 pmcs_lock_phy(pptr); 850 if (result) { 851 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 852 853 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 854 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 855 "%s: Unable to issue SMP abort for htag 0x%08x", 856 __func__, htag); 857 } else { 858 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 859 "%s: Issuing SMP ABORT for htag 0x%08x", 860 __func__, htag); 861 } 862 pmcs_smp_release(iport); 863 pmcs_rele_iport(iport); 864 return (EIO); 865 } 866 pmcs_smp_release(iport); 867 pmcs_rele_iport(iport); 868 status = LE_32(iomb[stsoff]); 869 870 if (status != PMCOUT_STATUS_OK) { 871 char buf[32]; 872 const char *es = pmcs_status_str(status); 873 if (es == NULL) { 874 (void) snprintf(buf, sizeof (buf), "Status 0x%x", 875 status); 876 es = buf; 877 } 878 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 879 "%s: %s action returned %s for %s", __func__, mbar, es, 880 pptr->path); 881 return (status); 882 } 883 884 return (0); 885 } 886 887 /* 888 * Stop the (real) phys. No PHY or softstate locks are required as this only 889 * happens during detach. 890 */ 891 void 892 pmcs_stop_phy(pmcs_hw_t *pwp, int phynum) 893 { 894 int result; 895 pmcs_phy_t *pptr; 896 uint32_t *msg; 897 struct pmcwork *pwrk; 898 899 pptr = pwp->root_phys + phynum; 900 if (pptr == NULL) { 901 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 902 "%s: unable to find port %d", __func__, phynum); 903 return; 904 } 905 906 if (pwp->phys_started & (1 << phynum)) { 907 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 908 909 if (pwrk == NULL) { 910 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, 911 pmcs_nowrk, __func__); 912 return; 913 } 914 915 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 916 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 917 918 if (msg == NULL) { 919 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 920 pmcs_pwork(pwp, pwrk); 921 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, 922 pmcs_nomsg, __func__); 923 return; 924 } 925 926 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_STOP)); 927 msg[1] = LE_32(pwrk->htag); 928 msg[2] = LE_32(phynum); 929 pwrk->state = PMCS_WORK_STATE_ONCHIP; 930 /* 931 * Make this unconfigured now. 932 */ 933 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 934 WAIT_FOR(pwrk, 1000, result); 935 pmcs_pwork(pwp, pwrk); 936 if (result) { 937 pmcs_prt(pwp, PMCS_PRT_DEBUG, 938 pptr, NULL, pmcs_timeo, __func__); 939 } 940 941 pwp->phys_started &= ~(1 << phynum); 942 } 943 944 pptr->configured = 0; 945 } 946 947 /* 948 * No locks should be required as this is only called during detach 949 */ 950 void 951 pmcs_stop_phys(pmcs_hw_t *pwp) 952 { 953 int i; 954 for (i = 0; i < pwp->nphy; i++) { 955 if ((pwp->phyid_block_mask & (1 << i)) == 0) { 956 pmcs_stop_phy(pwp, i); 957 } 958 } 959 } 960 961 /* 962 * Run SAS_DIAG_EXECUTE with cmd and cmd_desc passed. 963 * ERR_CNT_RESET: return status of cmd 964 * DIAG_REPORT_GET: return value of the counter 965 */ 966 int 967 pmcs_sas_diag_execute(pmcs_hw_t *pwp, uint32_t cmd, uint32_t cmd_desc, 968 uint8_t phynum) 969 { 970 uint32_t htag, *ptr, status, msg[PMCS_MSG_SIZE << 1]; 971 int result; 972 struct pmcwork *pwrk; 973 974 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 975 if (pwrk == NULL) { 976 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__); 977 return (DDI_FAILURE); 978 } 979 pwrk->arg = msg; 980 htag = pwrk->htag; 981 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_SAS_DIAG_EXECUTE)); 982 msg[1] = LE_32(htag); 983 msg[2] = LE_32((cmd << PMCS_DIAG_CMD_SHIFT) | 984 (cmd_desc << PMCS_DIAG_CMD_DESC_SHIFT) | phynum); 985 986 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 987 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 988 if (ptr == NULL) { 989 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 990 pmcs_pwork(pwp, pwrk); 991 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__); 992 return (DDI_FAILURE); 993 } 994 COPY_MESSAGE(ptr, msg, 3); 995 pwrk->state = PMCS_WORK_STATE_ONCHIP; 996 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 997 998 WAIT_FOR(pwrk, 1000, result); 999 pmcs_pwork(pwp, pwrk); 1000 if (result) { 1001 pmcs_timed_out(pwp, htag, __func__); 1002 return (DDI_FAILURE); 1003 } 1004 1005 status = LE_32(msg[3]); 1006 1007 /* Return for counter reset */ 1008 if (cmd == PMCS_ERR_CNT_RESET) 1009 return (status); 1010 1011 /* Return for counter value */ 1012 if (status) { 1013 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1014 "%s: failed, status (0x%x)", __func__, status); 1015 return (DDI_FAILURE); 1016 } 1017 return (LE_32(msg[4])); 1018 } 1019 1020 /* Get the current value of the counter for desc on phynum and return it. */ 1021 int 1022 pmcs_get_diag_report(pmcs_hw_t *pwp, uint32_t desc, uint8_t phynum) 1023 { 1024 return (pmcs_sas_diag_execute(pwp, PMCS_DIAG_REPORT_GET, desc, phynum)); 1025 } 1026 1027 /* Clear all of the counters for phynum. Returns the status of the command. */ 1028 int 1029 pmcs_clear_diag_counters(pmcs_hw_t *pwp, uint8_t phynum) 1030 { 1031 uint32_t cmd = PMCS_ERR_CNT_RESET; 1032 uint32_t cmd_desc; 1033 1034 cmd_desc = PMCS_INVALID_DWORD_CNT; 1035 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1036 return (DDI_FAILURE); 1037 1038 cmd_desc = PMCS_DISPARITY_ERR_CNT; 1039 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1040 return (DDI_FAILURE); 1041 1042 cmd_desc = PMCS_LOST_DWORD_SYNC_CNT; 1043 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1044 return (DDI_FAILURE); 1045 1046 cmd_desc = PMCS_RESET_FAILED_CNT; 1047 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1048 return (DDI_FAILURE); 1049 1050 return (DDI_SUCCESS); 1051 } 1052 1053 /* 1054 * Get firmware timestamp 1055 */ 1056 static int 1057 pmcs_get_time_stamp(pmcs_hw_t *pwp, uint64_t *fw_ts, hrtime_t *sys_hr_ts) 1058 { 1059 uint32_t htag, *ptr, msg[PMCS_MSG_SIZE << 1]; 1060 int result; 1061 struct pmcwork *pwrk; 1062 1063 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 1064 if (pwrk == NULL) { 1065 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__); 1066 return (-1); 1067 } 1068 pwrk->arg = msg; 1069 htag = pwrk->htag; 1070 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_GET_TIME_STAMP)); 1071 msg[1] = LE_32(pwrk->htag); 1072 1073 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1074 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1075 if (ptr == NULL) { 1076 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1077 pmcs_pwork(pwp, pwrk); 1078 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__); 1079 return (-1); 1080 } 1081 COPY_MESSAGE(ptr, msg, 2); 1082 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1083 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1084 1085 WAIT_FOR(pwrk, 1000, result); 1086 pmcs_pwork(pwp, pwrk); 1087 if (result) { 1088 pmcs_timed_out(pwp, htag, __func__); 1089 return (-1); 1090 } 1091 1092 mutex_enter(&pmcs_trace_lock); 1093 *sys_hr_ts = gethrtime(); 1094 gethrestime(&pwp->sys_timestamp); 1095 *fw_ts = LE_32(msg[2]) | (((uint64_t)LE_32(msg[3])) << 32); 1096 mutex_exit(&pmcs_trace_lock); 1097 return (0); 1098 } 1099 1100 /* 1101 * Dump all pertinent registers 1102 */ 1103 1104 void 1105 pmcs_register_dump(pmcs_hw_t *pwp) 1106 { 1107 int i; 1108 uint32_t val; 1109 1110 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump start", 1111 ddi_get_instance(pwp->dip)); 1112 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1113 "OBDB (intr): 0x%08x (mask): 0x%08x (clear): 0x%08x", 1114 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB), 1115 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_MASK), 1116 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR)); 1117 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH0: 0x%08x", 1118 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0)); 1119 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH1: 0x%08x", 1120 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1)); 1121 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH2: 0x%08x", 1122 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2)); 1123 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH3: 0x%08x", 1124 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH3)); 1125 for (i = 0; i < PMCS_NIQ; i++) { 1126 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "IQ %d: CI %u PI %u", 1127 i, pmcs_rd_iqci(pwp, i), pmcs_rd_iqpi(pwp, i)); 1128 } 1129 for (i = 0; i < PMCS_NOQ; i++) { 1130 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "OQ %d: CI %u PI %u", 1131 i, pmcs_rd_oqci(pwp, i), pmcs_rd_oqpi(pwp, i)); 1132 } 1133 val = pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE); 1134 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1135 "GST TABLE BASE: 0x%08x (STATE=0x%x QF=%d GSTLEN=%d HMI_ERR=0x%x)", 1136 val, PMCS_MPI_S(val), PMCS_QF(val), PMCS_GSTLEN(val) * 4, 1137 PMCS_HMI_ERR(val)); 1138 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ0: 0x%08x", 1139 pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ0)); 1140 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ1: 0x%08x", 1141 pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ1)); 1142 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE MSGU TICK: 0x%08x", 1143 pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK)); 1144 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IOP TICK: 0x%08x", 1145 pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK)); 1146 for (i = 0; i < pwp->nphy; i++) { 1147 uint32_t rerrf, pinfo, started = 0, link = 0; 1148 pinfo = pmcs_rd_gst_tbl(pwp, PMCS_GST_PHY_INFO(i)); 1149 if (pinfo & 1) { 1150 started = 1; 1151 link = pinfo & 2; 1152 } 1153 rerrf = pmcs_rd_gst_tbl(pwp, PMCS_GST_RERR_INFO(i)); 1154 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1155 "GST TABLE PHY%d STARTED=%d LINK=%d RERR=0x%08x", 1156 i, started, link, rerrf); 1157 } 1158 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump end", 1159 ddi_get_instance(pwp->dip)); 1160 } 1161 1162 /* 1163 * Handle SATA Abort and other error processing 1164 */ 1165 int 1166 pmcs_abort_handler(pmcs_hw_t *pwp) 1167 { 1168 pmcs_phy_t *pptr, *pnext, *pnext_uplevel[PMCS_MAX_XPND]; 1169 pmcs_xscsi_t *tgt; 1170 int r, level = 0; 1171 1172 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s", __func__); 1173 1174 mutex_enter(&pwp->lock); 1175 pptr = pwp->root_phys; 1176 mutex_exit(&pwp->lock); 1177 1178 while (pptr) { 1179 /* 1180 * XXX: Need to make sure this doesn't happen 1181 * XXX: when non-NCQ commands are running. 1182 */ 1183 pmcs_lock_phy(pptr); 1184 if (pptr->need_rl_ext) { 1185 ASSERT(pptr->dtype == SATA); 1186 if (pmcs_acquire_scratch(pwp, B_FALSE)) { 1187 goto next_phy; 1188 } 1189 r = pmcs_sata_abort_ncq(pwp, pptr); 1190 pmcs_release_scratch(pwp); 1191 if (r == ENOMEM) { 1192 goto next_phy; 1193 } 1194 if (r) { 1195 r = pmcs_reset_phy(pwp, pptr, 1196 PMCS_PHYOP_LINK_RESET); 1197 if (r == ENOMEM) { 1198 goto next_phy; 1199 } 1200 /* what if other failures happened? */ 1201 pptr->abort_pending = 1; 1202 pptr->abort_sent = 0; 1203 } 1204 } 1205 if (pptr->abort_pending == 0 || pptr->abort_sent) { 1206 goto next_phy; 1207 } 1208 pptr->abort_pending = 0; 1209 if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) == ENOMEM) { 1210 pptr->abort_pending = 1; 1211 goto next_phy; 1212 } 1213 pptr->abort_sent = 1; 1214 1215 /* 1216 * If the iport is no longer active, flush the queues 1217 */ 1218 if ((pptr->iport == NULL) || 1219 (pptr->iport->ua_state != UA_ACTIVE)) { 1220 tgt = pptr->target; 1221 if (tgt != NULL) { 1222 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 1223 "%s: Clearing target 0x%p, inactive iport", 1224 __func__, (void *) tgt); 1225 mutex_enter(&tgt->statlock); 1226 pmcs_clear_xp(pwp, tgt); 1227 mutex_exit(&tgt->statlock); 1228 } 1229 } 1230 1231 next_phy: 1232 if (pptr->children) { 1233 pnext = pptr->children; 1234 pnext_uplevel[level++] = pptr->sibling; 1235 } else { 1236 pnext = pptr->sibling; 1237 while ((pnext == NULL) && (level > 0)) { 1238 pnext = pnext_uplevel[--level]; 1239 } 1240 } 1241 1242 pmcs_unlock_phy(pptr); 1243 pptr = pnext; 1244 } 1245 1246 return (0); 1247 } 1248 1249 /* 1250 * Register a device (get a device handle for it). 1251 * Called with PHY lock held. 1252 */ 1253 int 1254 pmcs_register_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1255 { 1256 struct pmcwork *pwrk; 1257 int result = 0; 1258 uint32_t *msg; 1259 uint32_t tmp, status; 1260 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 1261 1262 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1263 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1264 1265 if (msg == NULL || 1266 (pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) { 1267 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1268 result = ENOMEM; 1269 goto out; 1270 } 1271 1272 pwrk->arg = iomb; 1273 pwrk->dtype = pptr->dtype; 1274 1275 msg[1] = LE_32(pwrk->htag); 1276 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_REGISTER_DEVICE)); 1277 tmp = PMCS_DEVREG_TLR | 1278 (pptr->link_rate << PMCS_DEVREG_LINK_RATE_SHIFT); 1279 if (IS_ROOT_PHY(pptr)) { 1280 msg[2] = LE_32(pptr->portid | 1281 (pptr->phynum << PMCS_PHYID_SHIFT)); 1282 } else { 1283 msg[2] = LE_32(pptr->portid); 1284 } 1285 if (pptr->dtype == SATA) { 1286 if (IS_ROOT_PHY(pptr)) { 1287 tmp |= PMCS_DEVREG_TYPE_SATA_DIRECT; 1288 } else { 1289 tmp |= PMCS_DEVREG_TYPE_SATA; 1290 } 1291 } else { 1292 tmp |= PMCS_DEVREG_TYPE_SAS; 1293 } 1294 msg[3] = LE_32(tmp); 1295 msg[4] = LE_32(PMCS_DEVREG_IT_NEXUS_TIMEOUT); 1296 (void) memcpy(&msg[5], pptr->sas_address, 8); 1297 1298 CLEAN_MESSAGE(msg, 7); 1299 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1300 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1301 1302 pmcs_unlock_phy(pptr); 1303 WAIT_FOR(pwrk, 250, result); 1304 pmcs_pwork(pwp, pwrk); 1305 pmcs_lock_phy(pptr); 1306 1307 if (result) { 1308 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 1309 result = ETIMEDOUT; 1310 goto out; 1311 } 1312 status = LE_32(iomb[2]); 1313 tmp = LE_32(iomb[3]); 1314 switch (status) { 1315 case PMCS_DEVREG_OK: 1316 case PMCS_DEVREG_DEVICE_ALREADY_REGISTERED: 1317 case PMCS_DEVREG_PHY_ALREADY_REGISTERED: 1318 if (pmcs_validate_devid(pwp->root_phys, pptr, tmp) == B_FALSE) { 1319 result = EEXIST; 1320 goto out; 1321 } else if (status != PMCS_DEVREG_OK) { 1322 if (tmp == 0xffffffff) { /* F/W bug */ 1323 pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL, 1324 "%s: phy %s already has bogus devid 0x%x", 1325 __func__, pptr->path, tmp); 1326 result = EIO; 1327 goto out; 1328 } else { 1329 pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL, 1330 "%s: phy %s already has a device id 0x%x", 1331 __func__, pptr->path, tmp); 1332 } 1333 } 1334 break; 1335 default: 1336 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1337 "%s: status 0x%x when trying to register device %s", 1338 __func__, status, pptr->path); 1339 result = EIO; 1340 goto out; 1341 } 1342 pptr->device_id = tmp; 1343 pptr->valid_device_id = 1; 1344 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Phy %s/" SAS_ADDR_FMT 1345 " registered with device_id 0x%x (portid %d)", pptr->path, 1346 SAS_ADDR_PRT(pptr->sas_address), tmp, pptr->portid); 1347 out: 1348 return (result); 1349 } 1350 1351 /* 1352 * Deregister a device (remove a device handle). 1353 * Called with PHY locked. 1354 */ 1355 void 1356 pmcs_deregister_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1357 { 1358 struct pmcwork *pwrk; 1359 uint32_t msg[PMCS_MSG_SIZE], *ptr, status; 1360 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 1361 int result; 1362 1363 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 1364 if (pwrk == NULL) { 1365 return; 1366 } 1367 1368 pwrk->arg = iomb; 1369 pwrk->dtype = pptr->dtype; 1370 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1371 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1372 if (ptr == NULL) { 1373 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1374 pmcs_pwork(pwp, pwrk); 1375 return; 1376 } 1377 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 1378 PMCIN_DEREGISTER_DEVICE_HANDLE)); 1379 msg[1] = LE_32(pwrk->htag); 1380 msg[2] = LE_32(pptr->device_id); 1381 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1382 COPY_MESSAGE(ptr, msg, 3); 1383 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1384 1385 pmcs_unlock_phy(pptr); 1386 WAIT_FOR(pwrk, 250, result); 1387 pmcs_pwork(pwp, pwrk); 1388 pmcs_lock_phy(pptr); 1389 1390 if (result) { 1391 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 1392 return; 1393 } 1394 status = LE_32(iomb[2]); 1395 if (status != PMCOUT_STATUS_OK) { 1396 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1397 "%s: status 0x%x when trying to deregister device %s", 1398 __func__, status, pptr->path); 1399 } else { 1400 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1401 "%s: device %s deregistered", __func__, pptr->path); 1402 } 1403 1404 pptr->device_id = PMCS_INVALID_DEVICE_ID; 1405 pptr->configured = 0; 1406 pptr->deregister_wait = 0; 1407 pptr->valid_device_id = 0; 1408 } 1409 1410 /* 1411 * Deregister all registered devices. 1412 */ 1413 void 1414 pmcs_deregister_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 1415 { 1416 /* 1417 * Start at the maximum level and walk back to level 0. This only 1418 * gets done during detach after all threads and timers have been 1419 * destroyed. 1420 */ 1421 while (phyp) { 1422 if (phyp->children) { 1423 pmcs_deregister_devices(pwp, phyp->children); 1424 } 1425 pmcs_lock_phy(phyp); 1426 if (phyp->valid_device_id) { 1427 pmcs_deregister_device(pwp, phyp); 1428 } 1429 pmcs_unlock_phy(phyp); 1430 phyp = phyp->sibling; 1431 } 1432 } 1433 1434 /* 1435 * Perform a 'soft' reset on the PMC chip 1436 */ 1437 int 1438 pmcs_soft_reset(pmcs_hw_t *pwp, boolean_t no_restart) 1439 { 1440 uint32_t s2, sfrbits, gsm, rapchk, wapchk, wdpchk, spc, tsmode; 1441 pmcs_phy_t *pptr; 1442 char *msg = NULL; 1443 int i; 1444 1445 /* 1446 * Disable interrupts 1447 */ 1448 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1449 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1450 1451 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%s", __func__); 1452 1453 if (pwp->locks_initted) { 1454 mutex_enter(&pwp->lock); 1455 } 1456 pwp->blocked = 1; 1457 1458 /* 1459 * Clear our softstate copies of the MSGU and IOP heartbeats. 1460 */ 1461 pwp->last_msgu_tick = pwp->last_iop_tick = 0; 1462 1463 /* 1464 * Step 1 1465 */ 1466 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2); 1467 if ((s2 & PMCS_MSGU_HOST_SOFT_RESET_READY) == 0) { 1468 pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE); 1469 pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE); 1470 for (i = 0; i < 100; i++) { 1471 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1472 PMCS_MSGU_HOST_SOFT_RESET_READY; 1473 if (s2) { 1474 break; 1475 } 1476 drv_usecwait(10000); 1477 } 1478 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1479 PMCS_MSGU_HOST_SOFT_RESET_READY; 1480 if (s2 == 0) { 1481 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1482 "%s: PMCS_MSGU_HOST_SOFT_RESET_READY never came " 1483 "ready", __func__); 1484 pmcs_register_dump(pwp); 1485 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1486 PMCS_MSGU_CPU_SOFT_RESET_READY) == 0 || 1487 (pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1488 PMCS_MSGU_CPU_SOFT_RESET_READY) == 0) { 1489 pwp->state = STATE_DEAD; 1490 pwp->blocked = 0; 1491 if (pwp->locks_initted) { 1492 mutex_exit(&pwp->lock); 1493 } 1494 return (-1); 1495 } 1496 } 1497 } 1498 1499 /* 1500 * Step 2 1501 */ 1502 pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_IOP, 0); 1503 drv_usecwait(10); 1504 pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_AAP1, 0); 1505 drv_usecwait(10); 1506 pmcs_wr_topunit(pwp, PMCS_EVENT_INT_ENABLE, 0); 1507 drv_usecwait(10); 1508 pmcs_wr_topunit(pwp, PMCS_EVENT_INT_STAT, 1509 pmcs_rd_topunit(pwp, PMCS_EVENT_INT_STAT)); 1510 drv_usecwait(10); 1511 pmcs_wr_topunit(pwp, PMCS_ERROR_INT_ENABLE, 0); 1512 drv_usecwait(10); 1513 pmcs_wr_topunit(pwp, PMCS_ERROR_INT_STAT, 1514 pmcs_rd_topunit(pwp, PMCS_ERROR_INT_STAT)); 1515 drv_usecwait(10); 1516 1517 sfrbits = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1518 PMCS_MSGU_AAP_SFR_PROGRESS; 1519 sfrbits ^= PMCS_MSGU_AAP_SFR_PROGRESS; 1520 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "PMCS_MSGU_HOST_SCRATCH0 " 1521 "%08x -> %08x", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0), 1522 HST_SFT_RESET_SIG); 1523 pmcs_wr_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0, HST_SFT_RESET_SIG); 1524 1525 /* 1526 * Step 3 1527 */ 1528 gsm = pmcs_rd_gsm_reg(pwp, 0, GSM_CFG_AND_RESET); 1529 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm, 1530 gsm & ~PMCS_SOFT_RESET_BITS); 1531 pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm & ~PMCS_SOFT_RESET_BITS); 1532 1533 /* 1534 * Step 4 1535 */ 1536 rapchk = pmcs_rd_gsm_reg(pwp, 0, READ_ADR_PARITY_CHK_EN); 1537 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN " 1538 "%08x -> %08x", rapchk, 0); 1539 pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, 0); 1540 wapchk = pmcs_rd_gsm_reg(pwp, 0, WRITE_ADR_PARITY_CHK_EN); 1541 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN " 1542 "%08x -> %08x", wapchk, 0); 1543 pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, 0); 1544 wdpchk = pmcs_rd_gsm_reg(pwp, 0, WRITE_DATA_PARITY_CHK_EN); 1545 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN " 1546 "%08x -> %08x", wdpchk, 0); 1547 pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, 0); 1548 1549 /* 1550 * Step 5 1551 */ 1552 drv_usecwait(100); 1553 1554 /* 1555 * Step 5.5 (Temporary workaround for 1.07.xx Beta) 1556 */ 1557 tsmode = pmcs_rd_gsm_reg(pwp, 0, PMCS_GPIO_TRISTATE_MODE_ADDR); 1558 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GPIO TSMODE %08x -> %08x", 1559 tsmode, tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1)); 1560 pmcs_wr_gsm_reg(pwp, PMCS_GPIO_TRISTATE_MODE_ADDR, 1561 tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1)); 1562 drv_usecwait(10); 1563 1564 /* 1565 * Step 6 1566 */ 1567 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1568 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1569 spc, spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1570 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, 1571 spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1572 drv_usecwait(10); 1573 1574 /* 1575 * Step 7 1576 */ 1577 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1578 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1579 spc, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB)); 1580 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB)); 1581 1582 /* 1583 * Step 8 1584 */ 1585 drv_usecwait(100); 1586 1587 /* 1588 * Step 9 1589 */ 1590 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1591 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1592 spc, spc | (BDMA_CORE_RSTB|OSSP_RSTB)); 1593 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc | (BDMA_CORE_RSTB|OSSP_RSTB)); 1594 1595 /* 1596 * Step 10 1597 */ 1598 drv_usecwait(100); 1599 1600 /* 1601 * Step 11 1602 */ 1603 gsm = pmcs_rd_gsm_reg(pwp, 0, GSM_CFG_AND_RESET); 1604 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm, 1605 gsm | PMCS_SOFT_RESET_BITS); 1606 pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm | PMCS_SOFT_RESET_BITS); 1607 drv_usecwait(10); 1608 1609 /* 1610 * Step 12 1611 */ 1612 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN " 1613 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, READ_ADR_PARITY_CHK_EN), 1614 rapchk); 1615 pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, rapchk); 1616 drv_usecwait(10); 1617 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN " 1618 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, WRITE_ADR_PARITY_CHK_EN), 1619 wapchk); 1620 pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, wapchk); 1621 drv_usecwait(10); 1622 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN " 1623 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, WRITE_DATA_PARITY_CHK_EN), 1624 wapchk); 1625 pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, wdpchk); 1626 drv_usecwait(10); 1627 1628 /* 1629 * Step 13 1630 */ 1631 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1632 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1633 spc, spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1634 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, 1635 spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1636 1637 /* 1638 * Step 14 1639 */ 1640 drv_usecwait(100); 1641 1642 /* 1643 * Step 15 1644 */ 1645 for (spc = 0, i = 0; i < 1000; i++) { 1646 drv_usecwait(1000); 1647 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 1648 if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) == sfrbits) { 1649 break; 1650 } 1651 } 1652 1653 if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) != sfrbits) { 1654 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1655 "SFR didn't toggle (sfr 0x%x)", spc); 1656 pwp->state = STATE_DEAD; 1657 pwp->blocked = 0; 1658 if (pwp->locks_initted) { 1659 mutex_exit(&pwp->lock); 1660 } 1661 return (-1); 1662 } 1663 1664 /* 1665 * Step 16 1666 */ 1667 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1668 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1669 1670 /* 1671 * Wait for up to 5 seconds for AAP state to come either ready or error. 1672 */ 1673 for (i = 0; i < 50; i++) { 1674 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1675 PMCS_MSGU_AAP_STATE_MASK; 1676 if (spc == PMCS_MSGU_AAP_STATE_ERROR || 1677 spc == PMCS_MSGU_AAP_STATE_READY) { 1678 break; 1679 } 1680 drv_usecwait(100000); 1681 } 1682 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 1683 if ((spc & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) { 1684 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1685 "soft reset failed (state 0x%x)", spc); 1686 pwp->state = STATE_DEAD; 1687 pwp->blocked = 0; 1688 if (pwp->locks_initted) { 1689 mutex_exit(&pwp->lock); 1690 } 1691 return (-1); 1692 } 1693 1694 /* Clear the firmware log */ 1695 if (pwp->fwlogp) { 1696 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE); 1697 } 1698 1699 /* Reset our queue indices and entries */ 1700 bzero(pwp->shadow_iqpi, sizeof (pwp->shadow_iqpi)); 1701 bzero(pwp->last_iqci, sizeof (pwp->last_iqci)); 1702 bzero(pwp->last_htag, sizeof (pwp->last_htag)); 1703 for (i = 0; i < PMCS_NIQ; i++) { 1704 if (pwp->iqp[i]) { 1705 bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 1706 pmcs_wr_iqpi(pwp, i, 0); 1707 pmcs_wr_iqci(pwp, i, 0); 1708 } 1709 } 1710 for (i = 0; i < PMCS_NOQ; i++) { 1711 if (pwp->oqp[i]) { 1712 bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 1713 pmcs_wr_oqpi(pwp, i, 0); 1714 pmcs_wr_oqci(pwp, i, 0); 1715 } 1716 1717 } 1718 1719 if (pwp->state == STATE_DEAD || pwp->state == STATE_UNPROBING || 1720 pwp->state == STATE_PROBING || pwp->locks_initted == 0) { 1721 pwp->blocked = 0; 1722 if (pwp->locks_initted) { 1723 mutex_exit(&pwp->lock); 1724 } 1725 return (0); 1726 } 1727 1728 /* 1729 * Return at this point if we dont need to startup. 1730 */ 1731 if (no_restart) { 1732 return (0); 1733 } 1734 1735 ASSERT(pwp->locks_initted != 0); 1736 1737 /* 1738 * Flush the target queues and clear each target's PHY 1739 */ 1740 if (pwp->targets) { 1741 for (i = 0; i < pwp->max_dev; i++) { 1742 pmcs_xscsi_t *xp = pwp->targets[i]; 1743 1744 if (xp == NULL) { 1745 continue; 1746 } 1747 1748 mutex_enter(&xp->statlock); 1749 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES); 1750 xp->phy = NULL; 1751 mutex_exit(&xp->statlock); 1752 } 1753 } 1754 1755 /* 1756 * Zero out the ports list, free non root phys, clear root phys 1757 */ 1758 bzero(pwp->ports, sizeof (pwp->ports)); 1759 pmcs_free_all_phys(pwp, pwp->root_phys); 1760 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 1761 pmcs_lock_phy(pptr); 1762 pmcs_clear_phy(pwp, pptr); 1763 pptr->target = NULL; 1764 pmcs_unlock_phy(pptr); 1765 } 1766 1767 /* 1768 * Restore Interrupt Mask 1769 */ 1770 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask); 1771 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1772 1773 pwp->mpi_table_setup = 0; 1774 mutex_exit(&pwp->lock); 1775 1776 /* 1777 * Set up MPI again. 1778 */ 1779 if (pmcs_setup(pwp)) { 1780 msg = "unable to setup MPI tables again"; 1781 goto fail_restart; 1782 } 1783 pmcs_report_fwversion(pwp); 1784 1785 /* 1786 * Restart MPI 1787 */ 1788 if (pmcs_start_mpi(pwp)) { 1789 msg = "unable to restart MPI again"; 1790 goto fail_restart; 1791 } 1792 1793 mutex_enter(&pwp->lock); 1794 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1795 mutex_exit(&pwp->lock); 1796 1797 /* 1798 * Run any completions 1799 */ 1800 PMCS_CQ_RUN(pwp); 1801 1802 /* 1803 * Delay 1804 */ 1805 drv_usecwait(1000000); 1806 return (0); 1807 1808 fail_restart: 1809 mutex_enter(&pwp->lock); 1810 pwp->state = STATE_DEAD; 1811 mutex_exit(&pwp->lock); 1812 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 1813 "%s: Failed: %s", __func__, msg); 1814 return (-1); 1815 } 1816 1817 1818 /* 1819 * Perform a 'hot' reset, which will soft reset the chip and 1820 * restore the state back to pre-reset context. Called with pwp 1821 * lock held. 1822 */ 1823 int 1824 pmcs_hot_reset(pmcs_hw_t *pwp) 1825 { 1826 pmcs_iport_t *iport; 1827 1828 ASSERT(mutex_owned(&pwp->lock)); 1829 pwp->state = STATE_IN_RESET; 1830 1831 /* 1832 * For any iports on this HBA, report empty target sets and 1833 * then tear them down. 1834 */ 1835 rw_enter(&pwp->iports_lock, RW_READER); 1836 for (iport = list_head(&pwp->iports); iport != NULL; 1837 iport = list_next(&pwp->iports, iport)) { 1838 mutex_enter(&iport->lock); 1839 (void) scsi_hba_tgtmap_set_begin(iport->iss_tgtmap); 1840 (void) scsi_hba_tgtmap_set_end(iport->iss_tgtmap, 0); 1841 pmcs_iport_teardown_phys(iport); 1842 mutex_exit(&iport->lock); 1843 } 1844 rw_exit(&pwp->iports_lock); 1845 1846 /* Grab a register dump, in the event that reset fails */ 1847 pmcs_register_dump_int(pwp); 1848 mutex_exit(&pwp->lock); 1849 1850 /* Ensure discovery is not running before we proceed */ 1851 mutex_enter(&pwp->config_lock); 1852 while (pwp->configuring) { 1853 cv_wait(&pwp->config_cv, &pwp->config_lock); 1854 } 1855 mutex_exit(&pwp->config_lock); 1856 1857 /* Issue soft reset and clean up related softstate */ 1858 if (pmcs_soft_reset(pwp, B_FALSE)) { 1859 /* 1860 * Disable interrupts, in case we got far enough along to 1861 * enable them, then fire off ereport and service impact. 1862 */ 1863 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1864 "%s: failed soft reset", __func__); 1865 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1866 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1867 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE); 1868 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 1869 mutex_enter(&pwp->lock); 1870 pwp->state = STATE_DEAD; 1871 return (DDI_FAILURE); 1872 } 1873 1874 mutex_enter(&pwp->lock); 1875 pwp->state = STATE_RUNNING; 1876 mutex_exit(&pwp->lock); 1877 1878 /* 1879 * Finally, restart the phys, which will bring the iports back 1880 * up and eventually result in discovery running. 1881 */ 1882 if (pmcs_start_phys(pwp)) { 1883 /* We should be up and running now, so retry */ 1884 if (pmcs_start_phys(pwp)) { 1885 /* Apparently unable to restart PHYs, fail */ 1886 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1887 "%s: failed to restart PHYs after soft reset", 1888 __func__); 1889 mutex_enter(&pwp->lock); 1890 return (DDI_FAILURE); 1891 } 1892 } 1893 1894 mutex_enter(&pwp->lock); 1895 return (DDI_SUCCESS); 1896 } 1897 1898 /* 1899 * Reset a device or a logical unit. 1900 */ 1901 int 1902 pmcs_reset_dev(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint64_t lun) 1903 { 1904 int rval = 0; 1905 1906 if (pptr == NULL) { 1907 return (ENXIO); 1908 } 1909 1910 pmcs_lock_phy(pptr); 1911 if (pptr->dtype == SAS) { 1912 /* 1913 * Some devices do not support SAS_I_T_NEXUS_RESET as 1914 * it is not a mandatory (in SAM4) task management 1915 * function, while LOGIC_UNIT_RESET is mandatory. 1916 * 1917 * The problem here is that we need to iterate over 1918 * all known LUNs to emulate the semantics of 1919 * "RESET_TARGET". 1920 * 1921 * XXX: FIX ME 1922 */ 1923 if (lun == (uint64_t)-1) { 1924 lun = 0; 1925 } 1926 rval = pmcs_ssp_tmf(pwp, pptr, SAS_LOGICAL_UNIT_RESET, 0, lun, 1927 NULL); 1928 } else if (pptr->dtype == SATA) { 1929 if (lun != 0ull) { 1930 pmcs_unlock_phy(pptr); 1931 return (EINVAL); 1932 } 1933 rval = pmcs_reset_phy(pwp, pptr, PMCS_PHYOP_LINK_RESET); 1934 } else { 1935 pmcs_unlock_phy(pptr); 1936 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1937 "%s: cannot reset a SMP device yet (%s)", 1938 __func__, pptr->path); 1939 return (EINVAL); 1940 } 1941 1942 /* 1943 * Now harvest any commands killed by this action 1944 * by issuing an ABORT for all commands on this device. 1945 * 1946 * We do this even if the the tmf or reset fails (in case there 1947 * are any dead commands around to be harvested *anyway*). 1948 * We don't have to await for the abort to complete. 1949 */ 1950 if (pmcs_abort(pwp, pptr, 0, 1, 0)) { 1951 pptr->abort_pending = 1; 1952 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 1953 } 1954 1955 pmcs_unlock_phy(pptr); 1956 return (rval); 1957 } 1958 1959 /* 1960 * Called with PHY locked. 1961 */ 1962 static int 1963 pmcs_get_device_handle(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1964 { 1965 if (pptr->valid_device_id == 0) { 1966 int result = pmcs_register_device(pwp, pptr); 1967 1968 /* 1969 * If we changed while registering, punt 1970 */ 1971 if (pptr->changed) { 1972 RESTART_DISCOVERY(pwp); 1973 return (-1); 1974 } 1975 1976 /* 1977 * If we had a failure to register, check against errors. 1978 * An ENOMEM error means we just retry (temp resource shortage). 1979 */ 1980 if (result == ENOMEM) { 1981 PHY_CHANGED(pwp, pptr); 1982 RESTART_DISCOVERY(pwp); 1983 return (-1); 1984 } 1985 1986 /* 1987 * An ETIMEDOUT error means we retry (if our counter isn't 1988 * exhausted) 1989 */ 1990 if (result == ETIMEDOUT) { 1991 if (ddi_get_lbolt() < pptr->config_stop) { 1992 PHY_CHANGED(pwp, pptr); 1993 RESTART_DISCOVERY(pwp); 1994 } else { 1995 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 1996 "%s: Retries exhausted for %s, killing", 1997 __func__, pptr->path); 1998 pptr->config_stop = 0; 1999 pmcs_kill_changed(pwp, pptr, 0); 2000 } 2001 return (-1); 2002 } 2003 /* 2004 * Other errors or no valid device id is fatal, but don't 2005 * preclude a future action. 2006 */ 2007 if (result || pptr->valid_device_id == 0) { 2008 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 2009 "%s: %s could not be registered", __func__, 2010 pptr->path); 2011 return (-1); 2012 } 2013 } 2014 return (0); 2015 } 2016 2017 int 2018 pmcs_iport_tgtmap_create(pmcs_iport_t *iport) 2019 { 2020 ASSERT(iport); 2021 if (iport == NULL) 2022 return (B_FALSE); 2023 2024 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__); 2025 2026 /* create target map */ 2027 if (scsi_hba_tgtmap_create(iport->dip, SCSI_TM_FULLSET, 2028 tgtmap_csync_usec, tgtmap_stable_usec, (void *)iport, 2029 pmcs_tgtmap_activate_cb, pmcs_tgtmap_deactivate_cb, 2030 &iport->iss_tgtmap) != DDI_SUCCESS) { 2031 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG, NULL, NULL, 2032 "%s: failed to create tgtmap", __func__); 2033 return (B_FALSE); 2034 } 2035 return (B_TRUE); 2036 } 2037 2038 int 2039 pmcs_iport_tgtmap_destroy(pmcs_iport_t *iport) 2040 { 2041 ASSERT(iport && iport->iss_tgtmap); 2042 if ((iport == NULL) || (iport->iss_tgtmap == NULL)) 2043 return (B_FALSE); 2044 2045 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__); 2046 2047 /* destroy target map */ 2048 scsi_hba_tgtmap_destroy(iport->iss_tgtmap); 2049 return (B_TRUE); 2050 } 2051 2052 /* 2053 * Remove all phys from an iport's phymap and empty it's phylist. 2054 * Called when a port has been reset by the host (see pmcs_intr.c) 2055 * or prior to issuing a soft reset if we detect a stall on the chip 2056 * (see pmcs_attach.c). 2057 */ 2058 void 2059 pmcs_iport_teardown_phys(pmcs_iport_t *iport) 2060 { 2061 pmcs_hw_t *pwp; 2062 sas_phymap_phys_t *phys; 2063 int phynum; 2064 2065 ASSERT(iport); 2066 ASSERT(mutex_owned(&iport->lock)); 2067 pwp = iport->pwp; 2068 ASSERT(pwp); 2069 2070 /* 2071 * Remove all phys from the iport handle's phy list, unset its 2072 * primary phy and update its state. 2073 */ 2074 pmcs_remove_phy_from_iport(iport, NULL); 2075 iport->pptr = NULL; 2076 iport->ua_state = UA_PEND_DEACTIVATE; 2077 2078 /* Remove all phys from the phymap */ 2079 phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua); 2080 if (phys) { 2081 while ((phynum = sas_phymap_phys_next(phys)) != -1) { 2082 (void) sas_phymap_phy_rem(pwp->hss_phymap, phynum); 2083 } 2084 sas_phymap_phys_free(phys); 2085 } 2086 } 2087 2088 /* 2089 * Query the phymap and populate the iport handle passed in. 2090 * Called with iport lock held. 2091 */ 2092 int 2093 pmcs_iport_configure_phys(pmcs_iport_t *iport) 2094 { 2095 pmcs_hw_t *pwp; 2096 pmcs_phy_t *pptr; 2097 sas_phymap_phys_t *phys; 2098 int phynum; 2099 int inst; 2100 2101 ASSERT(iport); 2102 ASSERT(mutex_owned(&iport->lock)); 2103 pwp = iport->pwp; 2104 ASSERT(pwp); 2105 inst = ddi_get_instance(iport->dip); 2106 2107 mutex_enter(&pwp->lock); 2108 ASSERT(pwp->root_phys != NULL); 2109 2110 /* 2111 * Query the phymap regarding the phys in this iport and populate 2112 * the iport's phys list. Hereafter this list is maintained via 2113 * port up and down events in pmcs_intr.c 2114 */ 2115 ASSERT(list_is_empty(&iport->phys)); 2116 phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua); 2117 ASSERT(phys != NULL); 2118 while ((phynum = sas_phymap_phys_next(phys)) != -1) { 2119 /* Grab the phy pointer from root_phys */ 2120 pptr = pwp->root_phys + phynum; 2121 ASSERT(pptr); 2122 pmcs_lock_phy(pptr); 2123 ASSERT(pptr->phynum == phynum); 2124 2125 /* 2126 * Set a back pointer in the phy to this iport. 2127 */ 2128 pptr->iport = iport; 2129 2130 /* 2131 * If this phy is the primary, set a pointer to it on our 2132 * iport handle, and set our portid from it. 2133 */ 2134 if (!pptr->subsidiary) { 2135 iport->pptr = pptr; 2136 iport->portid = pptr->portid; 2137 } 2138 2139 /* 2140 * Finally, insert the phy into our list 2141 */ 2142 pmcs_unlock_phy(pptr); 2143 pmcs_add_phy_to_iport(iport, pptr); 2144 2145 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: found " 2146 "phy %d [0x%p] on iport%d, refcnt(%d)", __func__, phynum, 2147 (void *)pptr, inst, iport->refcnt); 2148 } 2149 mutex_exit(&pwp->lock); 2150 sas_phymap_phys_free(phys); 2151 RESTART_DISCOVERY(pwp); 2152 return (DDI_SUCCESS); 2153 } 2154 2155 /* 2156 * Return the iport that ua is associated with, or NULL. If an iport is 2157 * returned, it will be held and the caller must release the hold. 2158 */ 2159 static pmcs_iport_t * 2160 pmcs_get_iport_by_ua(pmcs_hw_t *pwp, char *ua) 2161 { 2162 pmcs_iport_t *iport = NULL; 2163 2164 rw_enter(&pwp->iports_lock, RW_READER); 2165 for (iport = list_head(&pwp->iports); 2166 iport != NULL; 2167 iport = list_next(&pwp->iports, iport)) { 2168 mutex_enter(&iport->lock); 2169 if (strcmp(iport->ua, ua) == 0) { 2170 mutex_exit(&iport->lock); 2171 pmcs_hold_iport(iport); 2172 break; 2173 } 2174 mutex_exit(&iport->lock); 2175 } 2176 rw_exit(&pwp->iports_lock); 2177 2178 return (iport); 2179 } 2180 2181 /* 2182 * Return the iport that pptr is associated with, or NULL. 2183 * If an iport is returned, there is a hold that the caller must release. 2184 */ 2185 pmcs_iport_t * 2186 pmcs_get_iport_by_wwn(pmcs_hw_t *pwp, uint64_t wwn) 2187 { 2188 pmcs_iport_t *iport = NULL; 2189 char *ua; 2190 2191 ua = sas_phymap_lookup_ua(pwp->hss_phymap, pwp->sas_wwns[0], wwn); 2192 if (ua) { 2193 iport = pmcs_get_iport_by_ua(pwp, ua); 2194 if (iport) { 2195 mutex_enter(&iport->lock); 2196 pmcs_iport_active(iport); 2197 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: " 2198 "found iport [0x%p] on ua (%s), refcnt (%d)", 2199 __func__, (void *)iport, ua, iport->refcnt); 2200 mutex_exit(&iport->lock); 2201 } 2202 } 2203 2204 return (iport); 2205 } 2206 2207 /* 2208 * Promote the next phy on this port to primary, and return it. 2209 * Called when the primary PHY on a port is going down, but the port 2210 * remains up (see pmcs_intr.c). 2211 */ 2212 pmcs_phy_t * 2213 pmcs_promote_next_phy(pmcs_phy_t *prev_primary) 2214 { 2215 pmcs_hw_t *pwp; 2216 pmcs_iport_t *iport; 2217 pmcs_phy_t *pptr, *child; 2218 int portid; 2219 2220 pmcs_lock_phy(prev_primary); 2221 portid = prev_primary->portid; 2222 iport = prev_primary->iport; 2223 pwp = prev_primary->pwp; 2224 2225 /* Use the first available phy in this port */ 2226 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 2227 if ((pptr->portid == portid) && (pptr != prev_primary)) { 2228 mutex_enter(&pptr->phy_lock); 2229 break; 2230 } 2231 } 2232 2233 if (pptr == NULL) { 2234 pmcs_unlock_phy(prev_primary); 2235 return (NULL); 2236 } 2237 2238 if (iport) { 2239 mutex_enter(&iport->lock); 2240 iport->pptr = pptr; 2241 mutex_exit(&iport->lock); 2242 } 2243 2244 /* Update the phy handle with the data from the previous primary */ 2245 pptr->children = prev_primary->children; 2246 child = pptr->children; 2247 while (child) { 2248 child->parent = pptr; 2249 child = child->sibling; 2250 } 2251 pptr->ncphy = prev_primary->ncphy; 2252 pptr->width = prev_primary->width; 2253 pptr->dtype = prev_primary->dtype; 2254 pptr->pend_dtype = prev_primary->pend_dtype; 2255 pptr->tolerates_sas2 = prev_primary->tolerates_sas2; 2256 pptr->atdt = prev_primary->atdt; 2257 pptr->portid = prev_primary->portid; 2258 pptr->link_rate = prev_primary->link_rate; 2259 pptr->configured = prev_primary->configured; 2260 pptr->iport = prev_primary->iport; 2261 pptr->target = prev_primary->target; 2262 if (pptr->target) { 2263 pptr->target->phy = pptr; 2264 } 2265 2266 /* Update the phy mask properties for the affected PHYs */ 2267 /* Clear the current values... */ 2268 pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp, 2269 pptr->tgt_port_pm_tmp, B_FALSE); 2270 /* ...replace with the values from prev_primary... */ 2271 pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm_tmp, 2272 prev_primary->tgt_port_pm_tmp, B_TRUE); 2273 /* ...then clear prev_primary's PHY values from the new primary */ 2274 pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm, 2275 prev_primary->tgt_port_pm, B_FALSE); 2276 /* Clear the prev_primary's values */ 2277 pmcs_update_phy_pm_props(prev_primary, prev_primary->att_port_pm_tmp, 2278 prev_primary->tgt_port_pm_tmp, B_FALSE); 2279 2280 pptr->subsidiary = 0; 2281 2282 prev_primary->subsidiary = 1; 2283 prev_primary->children = NULL; 2284 prev_primary->target = NULL; 2285 pptr->device_id = prev_primary->device_id; 2286 pptr->valid_device_id = prev_primary->valid_device_id; 2287 pmcs_unlock_phy(prev_primary); 2288 2289 /* 2290 * We call pmcs_unlock_phy() on pptr because it now contains the 2291 * list of children. 2292 */ 2293 pmcs_unlock_phy(pptr); 2294 2295 return (pptr); 2296 } 2297 2298 void 2299 pmcs_hold_iport(pmcs_iport_t *iport) 2300 { 2301 /* 2302 * Grab a reference to this iport. 2303 */ 2304 ASSERT(iport); 2305 mutex_enter(&iport->refcnt_lock); 2306 iport->refcnt++; 2307 mutex_exit(&iport->refcnt_lock); 2308 2309 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: iport " 2310 "[0x%p] refcnt (%d)", __func__, (void *)iport, iport->refcnt); 2311 } 2312 2313 void 2314 pmcs_rele_iport(pmcs_iport_t *iport) 2315 { 2316 /* 2317 * Release a refcnt on this iport. If this is the last reference, 2318 * signal the potential waiter in pmcs_iport_unattach(). 2319 */ 2320 ASSERT(iport->refcnt > 0); 2321 mutex_enter(&iport->refcnt_lock); 2322 iport->refcnt--; 2323 mutex_exit(&iport->refcnt_lock); 2324 if (iport->refcnt == 0) { 2325 cv_signal(&iport->refcnt_cv); 2326 } 2327 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: iport " 2328 "[0x%p] refcnt (%d)", __func__, (void *)iport, iport->refcnt); 2329 } 2330 2331 void 2332 pmcs_phymap_activate(void *arg, char *ua, void **privp) 2333 { 2334 _NOTE(ARGUNUSED(privp)); 2335 pmcs_hw_t *pwp = arg; 2336 pmcs_iport_t *iport = NULL; 2337 2338 mutex_enter(&pwp->lock); 2339 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD) || 2340 (pwp->state == STATE_IN_RESET)) { 2341 mutex_exit(&pwp->lock); 2342 return; 2343 } 2344 pwp->phymap_active++; 2345 mutex_exit(&pwp->lock); 2346 2347 if (scsi_hba_iportmap_iport_add(pwp->hss_iportmap, ua, NULL) != 2348 DDI_SUCCESS) { 2349 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to " 2350 "add iport handle on unit address [%s]", __func__, ua); 2351 } else { 2352 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: " 2353 "phymap_active count (%d), added iport handle on unit " 2354 "address [%s]", __func__, pwp->phymap_active, ua); 2355 } 2356 2357 /* Set the HBA softstate as our private data for this unit address */ 2358 *privp = (void *)pwp; 2359 2360 /* 2361 * We are waiting on attach for this iport node, unless it is still 2362 * attached. This can happen if a consumer has an outstanding open 2363 * on our iport node, but the port is down. If this is the case, we 2364 * need to configure our iport here for reuse. 2365 */ 2366 iport = pmcs_get_iport_by_ua(pwp, ua); 2367 if (iport) { 2368 mutex_enter(&iport->lock); 2369 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) { 2370 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: " 2371 "failed to configure phys on iport [0x%p] at " 2372 "unit address (%s)", __func__, (void *)iport, ua); 2373 } 2374 pmcs_iport_active(iport); 2375 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 2376 &iport->nphy); 2377 mutex_exit(&iport->lock); 2378 pmcs_rele_iport(iport); 2379 } 2380 2381 } 2382 2383 void 2384 pmcs_phymap_deactivate(void *arg, char *ua, void *privp) 2385 { 2386 _NOTE(ARGUNUSED(privp)); 2387 pmcs_hw_t *pwp = arg; 2388 pmcs_iport_t *iport; 2389 2390 mutex_enter(&pwp->lock); 2391 pwp->phymap_active--; 2392 mutex_exit(&pwp->lock); 2393 2394 if (scsi_hba_iportmap_iport_remove(pwp->hss_iportmap, ua) != 2395 DDI_SUCCESS) { 2396 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to " 2397 "remove iport handle on unit address [%s]", __func__, ua); 2398 } else { 2399 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: " 2400 "phymap_active count (%d), removed iport handle on unit " 2401 "address [%s]", __func__, pwp->phymap_active, ua); 2402 } 2403 2404 iport = pmcs_get_iport_by_ua(pwp, ua); 2405 2406 if (iport == NULL) { 2407 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: failed " 2408 "lookup of iport handle on unit addr (%s)", __func__, ua); 2409 return; 2410 } 2411 2412 mutex_enter(&iport->lock); 2413 iport->ua_state = UA_INACTIVE; 2414 iport->portid = PMCS_IPORT_INVALID_PORT_ID; 2415 pmcs_remove_phy_from_iport(iport, NULL); 2416 mutex_exit(&iport->lock); 2417 pmcs_rele_iport(iport); 2418 } 2419 2420 /* 2421 * Top-level discovery function 2422 */ 2423 void 2424 pmcs_discover(pmcs_hw_t *pwp) 2425 { 2426 pmcs_phy_t *pptr; 2427 pmcs_phy_t *root_phy; 2428 2429 DTRACE_PROBE2(pmcs__discover__entry, ulong_t, pwp->work_flags, 2430 boolean_t, pwp->config_changed); 2431 2432 mutex_enter(&pwp->lock); 2433 2434 if (pwp->state != STATE_RUNNING) { 2435 mutex_exit(&pwp->lock); 2436 return; 2437 } 2438 2439 /* Ensure we have at least one phymap active */ 2440 if (pwp->phymap_active == 0) { 2441 mutex_exit(&pwp->lock); 2442 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2443 "%s: phymap inactive, exiting", __func__); 2444 return; 2445 } 2446 2447 mutex_exit(&pwp->lock); 2448 2449 /* 2450 * If no iports have attached, but we have PHYs that are up, we 2451 * are waiting for iport attach to complete. Restart discovery. 2452 */ 2453 rw_enter(&pwp->iports_lock, RW_READER); 2454 if (!pwp->iports_attached) { 2455 rw_exit(&pwp->iports_lock); 2456 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2457 "%s: no iports attached, retry discovery", __func__); 2458 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2459 return; 2460 } 2461 rw_exit(&pwp->iports_lock); 2462 2463 mutex_enter(&pwp->config_lock); 2464 if (pwp->configuring) { 2465 mutex_exit(&pwp->config_lock); 2466 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2467 "%s: configuration already in progress", __func__); 2468 return; 2469 } 2470 2471 if (pmcs_acquire_scratch(pwp, B_FALSE)) { 2472 mutex_exit(&pwp->config_lock); 2473 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2474 "%s: cannot allocate scratch", __func__); 2475 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2476 return; 2477 } 2478 2479 pwp->configuring = 1; 2480 pwp->config_changed = B_FALSE; 2481 mutex_exit(&pwp->config_lock); 2482 2483 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery begin"); 2484 2485 /* 2486 * First, tell SCSA that we're beginning set operations. 2487 */ 2488 pmcs_begin_observations(pwp); 2489 2490 /* 2491 * The order of the following traversals is important. 2492 * 2493 * The first one checks for changed expanders. 2494 * 2495 * The second one aborts commands for dead devices and deregisters them. 2496 * 2497 * The third one clears the contents of dead expanders from the tree 2498 * 2499 * The fourth one clears now dead devices in expanders that remain. 2500 */ 2501 2502 /* 2503 * 1. Check expanders marked changed (but not dead) to see if they still 2504 * have the same number of phys and the same SAS address. Mark them, 2505 * their subsidiary phys (if wide) and their descendents dead if 2506 * anything has changed. Check the devices they contain to see if 2507 * *they* have changed. If they've changed from type NOTHING we leave 2508 * them marked changed to be configured later (picking up a new SAS 2509 * address and link rate if possible). Otherwise, any change in type, 2510 * SAS address or removal of target role will cause us to mark them 2511 * (and their descendents) as dead (and cause any pending commands 2512 * and associated devices to be removed). 2513 * 2514 * NOTE: We don't want to bail on discovery if the config has 2515 * changed until *after* we run pmcs_kill_devices. 2516 */ 2517 root_phy = pwp->root_phys; 2518 pmcs_check_expanders(pwp, root_phy); 2519 2520 /* 2521 * 2. Descend the tree looking for dead devices and kill them 2522 * by aborting all active commands and then deregistering them. 2523 */ 2524 if (pmcs_kill_devices(pwp, root_phy)) { 2525 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2526 "%s: pmcs_kill_devices failed!", __func__); 2527 } 2528 2529 /* 2530 * 3. Check for dead expanders and remove their children from the tree. 2531 * By the time we get here, the devices and commands for them have 2532 * already been terminated and removed. 2533 * 2534 * We do this independent of the configuration count changing so we can 2535 * free any dead device PHYs that were discovered while checking 2536 * expanders. We ignore any subsidiary phys as pmcs_clear_expander 2537 * will take care of those. 2538 * 2539 * NOTE: pmcs_clear_expander requires softstate lock 2540 */ 2541 mutex_enter(&pwp->lock); 2542 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 2543 /* 2544 * Call pmcs_clear_expander for every root PHY. It will 2545 * recurse and determine which (if any) expanders actually 2546 * need to be cleared. 2547 */ 2548 pmcs_lock_phy(pptr); 2549 pmcs_clear_expander(pwp, pptr, 0); 2550 pmcs_unlock_phy(pptr); 2551 } 2552 mutex_exit(&pwp->lock); 2553 2554 /* 2555 * 4. Check for dead devices and nullify them. By the time we get here, 2556 * the devices and commands for them have already been terminated 2557 * and removed. This is different from step 2 in that this just nulls 2558 * phys that are part of expanders that are still here but used to 2559 * be something but are no longer something (e.g., after a pulled 2560 * disk drive). Note that dead expanders had their contained phys 2561 * removed from the tree- here, the expanders themselves are 2562 * nullified (unless they were removed by being contained in another 2563 * expander phy). 2564 */ 2565 pmcs_clear_phys(pwp, root_phy); 2566 2567 /* 2568 * 5. Now check for and configure new devices. 2569 */ 2570 if (pmcs_configure_new_devices(pwp, root_phy)) { 2571 goto restart; 2572 } 2573 2574 out: 2575 DTRACE_PROBE2(pmcs__discover__exit, ulong_t, pwp->work_flags, 2576 boolean_t, pwp->config_changed); 2577 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery end"); 2578 2579 mutex_enter(&pwp->config_lock); 2580 2581 if (pwp->config_changed == B_FALSE) { 2582 /* 2583 * Observation is stable, report what we currently see to 2584 * the tgtmaps for delta processing. Start by setting 2585 * BEGIN on all tgtmaps. 2586 */ 2587 mutex_exit(&pwp->config_lock); 2588 if (pmcs_report_observations(pwp) == B_FALSE) { 2589 goto restart; 2590 } 2591 mutex_enter(&pwp->config_lock); 2592 } else { 2593 /* 2594 * If config_changed is TRUE, we need to reschedule 2595 * discovery now. 2596 */ 2597 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2598 "%s: Config has changed, will re-run discovery", __func__); 2599 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2600 } 2601 2602 pmcs_release_scratch(pwp); 2603 if (!pwp->quiesced) { 2604 pwp->blocked = 0; 2605 } 2606 pwp->configuring = 0; 2607 cv_signal(&pwp->config_cv); 2608 mutex_exit(&pwp->config_lock); 2609 2610 #ifdef DEBUG 2611 pptr = pmcs_find_phy_needing_work(pwp, pwp->root_phys); 2612 if (pptr != NULL) { 2613 if (!WORK_IS_SCHEDULED(pwp, PMCS_WORK_DISCOVER)) { 2614 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 2615 "PHY %s dead=%d changed=%d configured=%d " 2616 "but no work scheduled", pptr->path, pptr->dead, 2617 pptr->changed, pptr->configured); 2618 } 2619 pmcs_unlock_phy(pptr); 2620 } 2621 #endif 2622 2623 return; 2624 2625 restart: 2626 /* Clean up and restart discovery */ 2627 pmcs_release_scratch(pwp); 2628 pmcs_flush_observations(pwp); 2629 mutex_enter(&pwp->config_lock); 2630 pwp->configuring = 0; 2631 cv_signal(&pwp->config_cv); 2632 RESTART_DISCOVERY_LOCKED(pwp); 2633 mutex_exit(&pwp->config_lock); 2634 } 2635 2636 /* 2637 * Return any PHY that needs to have scheduled work done. The PHY is returned 2638 * locked. 2639 */ 2640 static pmcs_phy_t * 2641 pmcs_find_phy_needing_work(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2642 { 2643 pmcs_phy_t *cphyp, *pnext; 2644 2645 while (pptr) { 2646 pmcs_lock_phy(pptr); 2647 2648 if (pptr->changed || (pptr->dead && pptr->valid_device_id)) { 2649 return (pptr); 2650 } 2651 2652 pnext = pptr->sibling; 2653 2654 if (pptr->children) { 2655 cphyp = pptr->children; 2656 pmcs_unlock_phy(pptr); 2657 cphyp = pmcs_find_phy_needing_work(pwp, cphyp); 2658 if (cphyp) { 2659 return (cphyp); 2660 } 2661 } else { 2662 pmcs_unlock_phy(pptr); 2663 } 2664 2665 pptr = pnext; 2666 } 2667 2668 return (NULL); 2669 } 2670 2671 /* 2672 * We may (or may not) report observations to SCSA. This is prefaced by 2673 * issuing a set_begin for each iport target map. 2674 */ 2675 static void 2676 pmcs_begin_observations(pmcs_hw_t *pwp) 2677 { 2678 pmcs_iport_t *iport; 2679 scsi_hba_tgtmap_t *tgtmap; 2680 2681 rw_enter(&pwp->iports_lock, RW_READER); 2682 for (iport = list_head(&pwp->iports); iport != NULL; 2683 iport = list_next(&pwp->iports, iport)) { 2684 /* 2685 * Unless we have at least one phy up, skip this iport. 2686 * Note we don't need to lock the iport for report_skip 2687 * since it is only used here. We are doing the skip so that 2688 * the phymap and iportmap stabilization times are honored - 2689 * giving us the ability to recover port operation within the 2690 * stabilization time without unconfiguring targets using the 2691 * port. 2692 */ 2693 if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) { 2694 iport->report_skip = 1; 2695 continue; /* skip set_begin */ 2696 } 2697 iport->report_skip = 0; 2698 2699 tgtmap = iport->iss_tgtmap; 2700 ASSERT(tgtmap); 2701 if (scsi_hba_tgtmap_set_begin(tgtmap) != DDI_SUCCESS) { 2702 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2703 "%s: cannot set_begin tgtmap ", __func__); 2704 rw_exit(&pwp->iports_lock); 2705 return; 2706 } 2707 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2708 "%s: set begin on tgtmap [0x%p]", __func__, (void *)tgtmap); 2709 } 2710 rw_exit(&pwp->iports_lock); 2711 } 2712 2713 /* 2714 * Tell SCSA to flush the observations we've already sent (if any), as they 2715 * are no longer valid. 2716 */ 2717 static void 2718 pmcs_flush_observations(pmcs_hw_t *pwp) 2719 { 2720 pmcs_iport_t *iport; 2721 scsi_hba_tgtmap_t *tgtmap; 2722 2723 rw_enter(&pwp->iports_lock, RW_READER); 2724 for (iport = list_head(&pwp->iports); iport != NULL; 2725 iport = list_next(&pwp->iports, iport)) { 2726 /* 2727 * Skip this iport if it has no PHYs up. 2728 */ 2729 if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) { 2730 continue; 2731 } 2732 2733 tgtmap = iport->iss_tgtmap; 2734 ASSERT(tgtmap); 2735 if (scsi_hba_tgtmap_set_flush(tgtmap) != DDI_SUCCESS) { 2736 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2737 "%s: Failed set_flush on tgtmap 0x%p", __func__, 2738 (void *)tgtmap); 2739 } else { 2740 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2741 "%s: set flush on tgtmap 0x%p", __func__, 2742 (void *)tgtmap); 2743 } 2744 } 2745 rw_exit(&pwp->iports_lock); 2746 } 2747 2748 /* 2749 * Report current observations to SCSA. 2750 */ 2751 static boolean_t 2752 pmcs_report_observations(pmcs_hw_t *pwp) 2753 { 2754 pmcs_iport_t *iport; 2755 scsi_hba_tgtmap_t *tgtmap; 2756 char *ap; 2757 pmcs_phy_t *pptr; 2758 uint64_t wwn; 2759 2760 /* 2761 * Observation is stable, report what we currently see to the tgtmaps 2762 * for delta processing. 2763 */ 2764 pptr = pwp->root_phys; 2765 2766 while (pptr) { 2767 pmcs_lock_phy(pptr); 2768 2769 /* 2770 * Skip PHYs that have nothing attached or are dead. 2771 */ 2772 if ((pptr->dtype == NOTHING) || pptr->dead) { 2773 pmcs_unlock_phy(pptr); 2774 pptr = pptr->sibling; 2775 continue; 2776 } 2777 2778 if (pptr->changed) { 2779 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 2780 "%s: oops, PHY %s changed; restart discovery", 2781 __func__, pptr->path); 2782 pmcs_unlock_phy(pptr); 2783 return (B_FALSE); 2784 } 2785 2786 /* 2787 * Get the iport for this root PHY, then call the helper 2788 * to report observations for this iport's targets 2789 */ 2790 wwn = pmcs_barray2wwn(pptr->sas_address); 2791 pmcs_unlock_phy(pptr); 2792 iport = pmcs_get_iport_by_wwn(pwp, wwn); 2793 if (iport == NULL) { 2794 /* No iport for this tgt */ 2795 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2796 "%s: no iport for this target", __func__); 2797 pptr = pptr->sibling; 2798 continue; 2799 } 2800 2801 pmcs_lock_phy(pptr); 2802 if (!iport->report_skip) { 2803 if (pmcs_report_iport_observations( 2804 pwp, iport, pptr) == B_FALSE) { 2805 pmcs_rele_iport(iport); 2806 pmcs_unlock_phy(pptr); 2807 return (B_FALSE); 2808 } 2809 } 2810 pmcs_rele_iport(iport); 2811 pmcs_unlock_phy(pptr); 2812 pptr = pptr->sibling; 2813 } 2814 2815 /* 2816 * The observation is complete, end sets. Note we will skip any 2817 * iports that are active, but have no PHYs in them (i.e. awaiting 2818 * unconfigure). Set to restart discovery if we find this. 2819 */ 2820 rw_enter(&pwp->iports_lock, RW_READER); 2821 for (iport = list_head(&pwp->iports); 2822 iport != NULL; 2823 iport = list_next(&pwp->iports, iport)) { 2824 2825 if (iport->report_skip) 2826 continue; /* skip set_end */ 2827 2828 tgtmap = iport->iss_tgtmap; 2829 ASSERT(tgtmap); 2830 if (scsi_hba_tgtmap_set_end(tgtmap, 0) != DDI_SUCCESS) { 2831 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2832 "%s: cannot set_end tgtmap ", __func__); 2833 rw_exit(&pwp->iports_lock); 2834 return (B_FALSE); 2835 } 2836 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2837 "%s: set end on tgtmap [0x%p]", __func__, (void *)tgtmap); 2838 } 2839 2840 /* 2841 * Now that discovery is complete, set up the necessary 2842 * DDI properties on each iport node. 2843 */ 2844 for (iport = list_head(&pwp->iports); iport != NULL; 2845 iport = list_next(&pwp->iports, iport)) { 2846 /* Set up the 'attached-port' property on the iport */ 2847 ap = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP); 2848 mutex_enter(&iport->lock); 2849 pptr = iport->pptr; 2850 mutex_exit(&iport->lock); 2851 if (pptr == NULL) { 2852 /* 2853 * This iport is down, but has not been 2854 * removed from our list (unconfigured). 2855 * Set our value to '0'. 2856 */ 2857 (void) snprintf(ap, 1, "%s", "0"); 2858 } else { 2859 /* Otherwise, set it to remote phy's wwn */ 2860 pmcs_lock_phy(pptr); 2861 wwn = pmcs_barray2wwn(pptr->sas_address); 2862 (void) scsi_wwn_to_wwnstr(wwn, 1, ap); 2863 pmcs_unlock_phy(pptr); 2864 } 2865 if (ndi_prop_update_string(DDI_DEV_T_NONE, iport->dip, 2866 SCSI_ADDR_PROP_ATTACHED_PORT, ap) != DDI_SUCCESS) { 2867 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed " 2868 "to set prop ("SCSI_ADDR_PROP_ATTACHED_PORT")", 2869 __func__); 2870 } 2871 kmem_free(ap, PMCS_MAX_UA_SIZE); 2872 } 2873 rw_exit(&pwp->iports_lock); 2874 2875 return (B_TRUE); 2876 } 2877 2878 /* 2879 * Report observations into a particular iport's target map 2880 * 2881 * Called with phyp (and all descendents) locked 2882 */ 2883 static boolean_t 2884 pmcs_report_iport_observations(pmcs_hw_t *pwp, pmcs_iport_t *iport, 2885 pmcs_phy_t *phyp) 2886 { 2887 pmcs_phy_t *lphyp; 2888 scsi_hba_tgtmap_t *tgtmap; 2889 scsi_tgtmap_tgt_type_t tgt_type; 2890 char *ua; 2891 uint64_t wwn; 2892 2893 tgtmap = iport->iss_tgtmap; 2894 ASSERT(tgtmap); 2895 2896 lphyp = phyp; 2897 while (lphyp) { 2898 switch (lphyp->dtype) { 2899 default: /* Skip unknown PHYs. */ 2900 /* for non-root phys, skip to sibling */ 2901 goto next_phy; 2902 2903 case SATA: 2904 case SAS: 2905 tgt_type = SCSI_TGT_SCSI_DEVICE; 2906 break; 2907 2908 case EXPANDER: 2909 tgt_type = SCSI_TGT_SMP_DEVICE; 2910 break; 2911 } 2912 2913 if (lphyp->dead || !lphyp->configured) { 2914 goto next_phy; 2915 } 2916 2917 /* 2918 * Validate the PHY's SAS address 2919 */ 2920 if (((lphyp->sas_address[0] & 0xf0) >> 4) != NAA_IEEE_REG) { 2921 pmcs_prt(pwp, PMCS_PRT_ERR, lphyp, NULL, 2922 "PHY 0x%p (%s) has invalid SAS address; " 2923 "will not enumerate", (void *)lphyp, lphyp->path); 2924 goto next_phy; 2925 } 2926 2927 wwn = pmcs_barray2wwn(lphyp->sas_address); 2928 ua = scsi_wwn_to_wwnstr(wwn, 1, NULL); 2929 2930 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, lphyp, NULL, 2931 "iport_observation: adding %s on tgtmap [0x%p] phy [0x%p]", 2932 ua, (void *)tgtmap, (void*)lphyp); 2933 2934 if (scsi_hba_tgtmap_set_add(tgtmap, tgt_type, ua, NULL) != 2935 DDI_SUCCESS) { 2936 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2937 "%s: failed to add address %s", __func__, ua); 2938 scsi_free_wwnstr(ua); 2939 return (B_FALSE); 2940 } 2941 scsi_free_wwnstr(ua); 2942 2943 if (lphyp->children) { 2944 if (pmcs_report_iport_observations(pwp, iport, 2945 lphyp->children) == B_FALSE) { 2946 return (B_FALSE); 2947 } 2948 } 2949 2950 /* for non-root phys, report siblings too */ 2951 next_phy: 2952 if (IS_ROOT_PHY(lphyp)) { 2953 lphyp = NULL; 2954 } else { 2955 lphyp = lphyp->sibling; 2956 } 2957 } 2958 2959 return (B_TRUE); 2960 } 2961 2962 /* 2963 * Check for and configure new devices. 2964 * 2965 * If the changed device is a SATA device, add a SATA device. 2966 * 2967 * If the changed device is a SAS device, add a SAS device. 2968 * 2969 * If the changed device is an EXPANDER device, do a REPORT 2970 * GENERAL SMP command to find out the number of contained phys. 2971 * 2972 * For each number of contained phys, allocate a phy, do a 2973 * DISCOVERY SMP command to find out what kind of device it 2974 * is and add it to the linked list of phys on the *next* level. 2975 * 2976 * NOTE: pptr passed in by the caller will be a root PHY 2977 */ 2978 static int 2979 pmcs_configure_new_devices(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2980 { 2981 int rval = 0; 2982 pmcs_iport_t *iport; 2983 pmcs_phy_t *pnext, *orig_pptr = pptr, *root_phy, *pchild; 2984 uint64_t wwn; 2985 2986 /* 2987 * First, walk through each PHY at this level 2988 */ 2989 while (pptr) { 2990 pmcs_lock_phy(pptr); 2991 pnext = pptr->sibling; 2992 2993 /* 2994 * Set the new dtype if it has changed 2995 */ 2996 if ((pptr->pend_dtype != NEW) && 2997 (pptr->pend_dtype != pptr->dtype)) { 2998 pptr->dtype = pptr->pend_dtype; 2999 } 3000 3001 if (pptr->changed == 0 || pptr->dead || pptr->configured) { 3002 goto next_phy; 3003 } 3004 3005 /* Confirm that this iport is configured */ 3006 root_phy = pmcs_get_root_phy(pptr); 3007 wwn = pmcs_barray2wwn(root_phy->sas_address); 3008 pmcs_unlock_phy(pptr); 3009 iport = pmcs_get_iport_by_wwn(pwp, wwn); 3010 if (iport == NULL) { 3011 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 3012 "%s: iport not yet configured, " 3013 "retry discovery", __func__); 3014 pnext = NULL; 3015 rval = -1; 3016 pmcs_lock_phy(pptr); 3017 goto next_phy; 3018 } 3019 3020 pmcs_lock_phy(pptr); 3021 switch (pptr->dtype) { 3022 case NOTHING: 3023 pptr->changed = 0; 3024 break; 3025 case SATA: 3026 case SAS: 3027 pptr->iport = iport; 3028 pmcs_new_tport(pwp, pptr); 3029 break; 3030 case EXPANDER: 3031 pmcs_configure_expander(pwp, pptr, iport); 3032 break; 3033 } 3034 pmcs_rele_iport(iport); 3035 3036 mutex_enter(&pwp->config_lock); 3037 if (pwp->config_changed) { 3038 mutex_exit(&pwp->config_lock); 3039 pnext = NULL; 3040 goto next_phy; 3041 } 3042 mutex_exit(&pwp->config_lock); 3043 3044 next_phy: 3045 pmcs_unlock_phy(pptr); 3046 pptr = pnext; 3047 } 3048 3049 if (rval != 0) { 3050 return (rval); 3051 } 3052 3053 /* 3054 * Now walk through each PHY again, recalling ourselves if they 3055 * have children 3056 */ 3057 pptr = orig_pptr; 3058 while (pptr) { 3059 pmcs_lock_phy(pptr); 3060 pnext = pptr->sibling; 3061 pchild = pptr->children; 3062 pmcs_unlock_phy(pptr); 3063 3064 if (pchild) { 3065 rval = pmcs_configure_new_devices(pwp, pchild); 3066 if (rval != 0) { 3067 break; 3068 } 3069 } 3070 3071 pptr = pnext; 3072 } 3073 3074 return (rval); 3075 } 3076 3077 /* 3078 * Set all phys and descendent phys as changed if changed == B_TRUE, otherwise 3079 * mark them all as not changed. 3080 * 3081 * Called with parent PHY locked. 3082 */ 3083 void 3084 pmcs_set_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, boolean_t changed, 3085 int level) 3086 { 3087 pmcs_phy_t *pptr; 3088 3089 if (level == 0) { 3090 if (changed) { 3091 PHY_CHANGED(pwp, parent); 3092 } else { 3093 parent->changed = 0; 3094 } 3095 if (parent->dtype == EXPANDER && parent->level) { 3096 parent->width = 1; 3097 } 3098 if (parent->children) { 3099 pmcs_set_changed(pwp, parent->children, changed, 3100 level + 1); 3101 } 3102 } else { 3103 pptr = parent; 3104 while (pptr) { 3105 if (changed) { 3106 PHY_CHANGED(pwp, pptr); 3107 } else { 3108 pptr->changed = 0; 3109 } 3110 if (pptr->dtype == EXPANDER && pptr->level) { 3111 pptr->width = 1; 3112 } 3113 if (pptr->children) { 3114 pmcs_set_changed(pwp, pptr->children, changed, 3115 level + 1); 3116 } 3117 pptr = pptr->sibling; 3118 } 3119 } 3120 } 3121 3122 /* 3123 * Take the passed phy mark it and its descendants as dead. 3124 * Fire up reconfiguration to abort commands and bury it. 3125 * 3126 * Called with the parent PHY locked. 3127 */ 3128 void 3129 pmcs_kill_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, int level) 3130 { 3131 pmcs_phy_t *pptr = parent; 3132 3133 while (pptr) { 3134 pptr->link_rate = 0; 3135 pptr->abort_sent = 0; 3136 pptr->abort_pending = 1; 3137 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 3138 pptr->need_rl_ext = 0; 3139 3140 if (pptr->dead == 0) { 3141 PHY_CHANGED(pwp, pptr); 3142 RESTART_DISCOVERY(pwp); 3143 } 3144 3145 pptr->dead = 1; 3146 3147 if (pptr->children) { 3148 pmcs_kill_changed(pwp, pptr->children, level + 1); 3149 } 3150 3151 /* 3152 * Only kill siblings at level > 0 3153 */ 3154 if (level == 0) { 3155 return; 3156 } 3157 3158 pptr = pptr->sibling; 3159 } 3160 } 3161 3162 /* 3163 * Go through every PHY and clear any that are dead (unless they're expanders) 3164 */ 3165 static void 3166 pmcs_clear_phys(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3167 { 3168 pmcs_phy_t *pnext, *phyp; 3169 3170 phyp = pptr; 3171 while (phyp) { 3172 if (IS_ROOT_PHY(phyp)) { 3173 pmcs_lock_phy(phyp); 3174 } 3175 3176 if ((phyp->dtype != EXPANDER) && phyp->dead) { 3177 pmcs_clear_phy(pwp, phyp); 3178 } 3179 3180 if (phyp->children) { 3181 pmcs_clear_phys(pwp, phyp->children); 3182 } 3183 3184 pnext = phyp->sibling; 3185 3186 if (IS_ROOT_PHY(phyp)) { 3187 pmcs_unlock_phy(phyp); 3188 } 3189 3190 phyp = pnext; 3191 } 3192 } 3193 3194 /* 3195 * Clear volatile parts of a phy. Called with PHY locked. 3196 */ 3197 void 3198 pmcs_clear_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3199 { 3200 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: %s", 3201 __func__, pptr->path); 3202 ASSERT(mutex_owned(&pptr->phy_lock)); 3203 /* keep sibling */ 3204 /* keep children */ 3205 /* keep parent */ 3206 pptr->device_id = PMCS_INVALID_DEVICE_ID; 3207 /* keep hw_event_ack */ 3208 pptr->ncphy = 0; 3209 /* keep phynum */ 3210 pptr->width = 0; 3211 pptr->ds_recovery_retries = 0; 3212 pptr->ds_prev_good_recoveries = 0; 3213 pptr->last_good_recovery = 0; 3214 pptr->prev_recovery = 0; 3215 3216 /* keep dtype */ 3217 pptr->config_stop = 0; 3218 pptr->spinup_hold = 0; 3219 pptr->atdt = 0; 3220 /* keep portid */ 3221 pptr->link_rate = 0; 3222 pptr->valid_device_id = 0; 3223 pptr->abort_sent = 0; 3224 pptr->abort_pending = 0; 3225 pptr->need_rl_ext = 0; 3226 pptr->subsidiary = 0; 3227 pptr->configured = 0; 3228 pptr->deregister_wait = 0; 3229 pptr->reenumerate = 0; 3230 /* Only mark dead if it's not a root PHY and its dtype isn't NOTHING */ 3231 /* XXX: What about directly attached disks? */ 3232 if (!IS_ROOT_PHY(pptr) && (pptr->dtype != NOTHING)) 3233 pptr->dead = 1; 3234 pptr->changed = 0; 3235 /* keep SAS address */ 3236 /* keep path */ 3237 /* keep ref_count */ 3238 /* Don't clear iport on root PHYs - they are handled in pmcs_intr.c */ 3239 if (!IS_ROOT_PHY(pptr)) { 3240 pptr->last_iport = pptr->iport; 3241 pptr->iport = NULL; 3242 } 3243 /* keep target */ 3244 } 3245 3246 /* 3247 * Allocate softstate for this target if there isn't already one. If there 3248 * is, just redo our internal configuration. If it is actually "new", we'll 3249 * soon get a tran_tgt_init for it. 3250 * 3251 * Called with PHY locked. 3252 */ 3253 static void 3254 pmcs_new_tport(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3255 { 3256 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: phy 0x%p @ %s", 3257 __func__, (void *)pptr, pptr->path); 3258 3259 if (pmcs_configure_phy(pwp, pptr) == B_FALSE) { 3260 /* 3261 * If the config failed, mark the PHY as changed. 3262 */ 3263 PHY_CHANGED(pwp, pptr); 3264 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3265 "%s: pmcs_configure_phy failed for phy 0x%p", __func__, 3266 (void *)pptr); 3267 return; 3268 } 3269 3270 /* Mark PHY as no longer changed */ 3271 pptr->changed = 0; 3272 3273 /* 3274 * If the PHY has no target pointer: 3275 * 3276 * If it's a root PHY, see if another PHY in the iport holds the 3277 * target pointer (primary PHY changed). If so, move it over. 3278 * 3279 * If it's not a root PHY, see if there's a PHY on the dead_phys 3280 * list that matches. 3281 */ 3282 if (pptr->target == NULL) { 3283 if (IS_ROOT_PHY(pptr)) { 3284 pmcs_phy_t *rphy = pwp->root_phys; 3285 3286 while (rphy) { 3287 if (rphy == pptr) { 3288 rphy = rphy->sibling; 3289 continue; 3290 } 3291 3292 mutex_enter(&rphy->phy_lock); 3293 if ((rphy->iport == pptr->iport) && 3294 (rphy->target != NULL)) { 3295 mutex_enter(&rphy->target->statlock); 3296 pptr->target = rphy->target; 3297 rphy->target = NULL; 3298 pptr->target->phy = pptr; 3299 /* The target is now on pptr */ 3300 mutex_exit(&pptr->target->statlock); 3301 mutex_exit(&rphy->phy_lock); 3302 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 3303 pptr, pptr->target, 3304 "%s: Moved target from %s to %s", 3305 __func__, rphy->path, pptr->path); 3306 break; 3307 } 3308 mutex_exit(&rphy->phy_lock); 3309 3310 rphy = rphy->sibling; 3311 } 3312 } else { 3313 pmcs_reap_dead_phy(pptr); 3314 } 3315 } 3316 3317 /* 3318 * Only assign the device if there is a target for this PHY with a 3319 * matching SAS address. If an iport is disconnected from one piece 3320 * of storage and connected to another within the iport stabilization 3321 * time, we can get the PHY/target mismatch situation. 3322 * 3323 * Otherwise, it'll get done in tran_tgt_init. 3324 */ 3325 if (pptr->target) { 3326 mutex_enter(&pptr->target->statlock); 3327 if (pmcs_phy_target_match(pptr) == B_FALSE) { 3328 mutex_exit(&pptr->target->statlock); 3329 if (!IS_ROOT_PHY(pptr)) { 3330 pmcs_dec_phy_ref_count(pptr); 3331 } 3332 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 3333 "%s: Not assigning existing tgt %p for PHY %p " 3334 "(WWN mismatch)", __func__, (void *)pptr->target, 3335 (void *)pptr); 3336 pptr->target = NULL; 3337 return; 3338 } 3339 3340 if (!pmcs_assign_device(pwp, pptr->target)) { 3341 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 3342 "%s: pmcs_assign_device failed for target 0x%p", 3343 __func__, (void *)pptr->target); 3344 } 3345 mutex_exit(&pptr->target->statlock); 3346 } 3347 } 3348 3349 /* 3350 * Called with PHY lock held. 3351 */ 3352 static boolean_t 3353 pmcs_configure_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3354 { 3355 char *dtype; 3356 3357 ASSERT(mutex_owned(&pptr->phy_lock)); 3358 3359 /* 3360 * Mark this device as no longer changed. 3361 */ 3362 pptr->changed = 0; 3363 3364 /* 3365 * If we don't have a device handle, get one. 3366 */ 3367 if (pmcs_get_device_handle(pwp, pptr)) { 3368 return (B_FALSE); 3369 } 3370 3371 pptr->configured = 1; 3372 3373 switch (pptr->dtype) { 3374 case SAS: 3375 dtype = "SAS"; 3376 break; 3377 case SATA: 3378 dtype = "SATA"; 3379 break; 3380 case EXPANDER: 3381 dtype = "SMP"; 3382 break; 3383 default: 3384 dtype = "???"; 3385 } 3386 3387 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "config_dev: %s " 3388 "dev %s " SAS_ADDR_FMT " dev id 0x%x lr 0x%x", dtype, pptr->path, 3389 SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate); 3390 3391 return (B_TRUE); 3392 } 3393 3394 /* 3395 * Called with PHY locked 3396 */ 3397 static void 3398 pmcs_configure_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, pmcs_iport_t *iport) 3399 { 3400 pmcs_phy_t *ctmp, *clist = NULL, *cnext; 3401 int result, i, nphy = 0; 3402 boolean_t root_phy = B_FALSE; 3403 3404 ASSERT(iport); 3405 3406 /* 3407 * Step 1- clear our "changed" bit. If we need to retry/restart due 3408 * to resource shortages, we'll set it again. While we're doing 3409 * configuration, other events may set it again as well. If the PHY 3410 * is a root PHY and is currently marked as having changed, reset the 3411 * config_stop timer as well. 3412 */ 3413 if (IS_ROOT_PHY(pptr) && pptr->changed) { 3414 pptr->config_stop = ddi_get_lbolt() + 3415 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3416 } 3417 pptr->changed = 0; 3418 3419 /* 3420 * Step 2- make sure we don't overflow 3421 */ 3422 if (pptr->level == PMCS_MAX_XPND-1) { 3423 pmcs_prt(pwp, PMCS_PRT_WARN, pptr, NULL, 3424 "%s: SAS expansion tree too deep", __func__); 3425 return; 3426 } 3427 3428 /* 3429 * Step 3- Check if this expander is part of a wide phy that has 3430 * already been configured. 3431 * 3432 * This is known by checking this level for another EXPANDER device 3433 * with the same SAS address and isn't already marked as a subsidiary 3434 * phy and a parent whose SAS address is the same as our SAS address 3435 * (if there are parents). 3436 */ 3437 if (!IS_ROOT_PHY(pptr)) { 3438 /* 3439 * No need to lock the parent here because we're in discovery 3440 * and the only time a PHY's children pointer can change is 3441 * in discovery; either in pmcs_clear_expander (which has 3442 * already been called) or here, down below. Plus, trying to 3443 * grab the parent's lock here can cause deadlock. 3444 */ 3445 ctmp = pptr->parent->children; 3446 } else { 3447 ctmp = pwp->root_phys; 3448 root_phy = B_TRUE; 3449 } 3450 3451 while (ctmp) { 3452 /* 3453 * If we've checked all PHYs up to pptr, we stop. Otherwise, 3454 * we'll be checking for a primary PHY with a higher PHY 3455 * number than pptr, which will never happen. The primary 3456 * PHY on non-root expanders will ALWAYS be the lowest 3457 * numbered PHY. 3458 */ 3459 if (ctmp == pptr) { 3460 break; 3461 } 3462 3463 /* 3464 * If pptr and ctmp are root PHYs, just grab the mutex on 3465 * ctmp. No need to lock the entire tree. If they are not 3466 * root PHYs, there is no need to lock since a non-root PHY's 3467 * SAS address and other characteristics can only change in 3468 * discovery anyway. 3469 */ 3470 if (root_phy) { 3471 mutex_enter(&ctmp->phy_lock); 3472 } 3473 3474 if (ctmp->dtype == EXPANDER && ctmp->width && 3475 memcmp(ctmp->sas_address, pptr->sas_address, 8) == 0) { 3476 int widephy = 0; 3477 /* 3478 * If these phys are not root PHYs, compare their SAS 3479 * addresses too. 3480 */ 3481 if (!root_phy) { 3482 if (memcmp(ctmp->parent->sas_address, 3483 pptr->parent->sas_address, 8) == 0) { 3484 widephy = 1; 3485 } 3486 } else { 3487 widephy = 1; 3488 } 3489 if (widephy) { 3490 ctmp->width++; 3491 pptr->subsidiary = 1; 3492 3493 /* 3494 * Update the primary PHY's attached-port-pm 3495 * and target-port-pm information with the info 3496 * from this subsidiary 3497 */ 3498 pmcs_update_phy_pm_props(ctmp, 3499 pptr->att_port_pm_tmp, 3500 pptr->tgt_port_pm_tmp, B_TRUE); 3501 3502 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3503 "%s: PHY %s part of wide PHY %s " 3504 "(now %d wide)", __func__, pptr->path, 3505 ctmp->path, ctmp->width); 3506 if (root_phy) { 3507 mutex_exit(&ctmp->phy_lock); 3508 } 3509 return; 3510 } 3511 } 3512 3513 cnext = ctmp->sibling; 3514 if (root_phy) { 3515 mutex_exit(&ctmp->phy_lock); 3516 } 3517 ctmp = cnext; 3518 } 3519 3520 /* 3521 * Step 4- If we don't have a device handle, get one. Since this 3522 * is the primary PHY, make sure subsidiary is cleared. 3523 */ 3524 pptr->subsidiary = 0; 3525 pptr->iport = iport; 3526 if (pmcs_get_device_handle(pwp, pptr)) { 3527 goto out; 3528 } 3529 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Config expander %s " 3530 SAS_ADDR_FMT " dev id 0x%x lr 0x%x", pptr->path, 3531 SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate); 3532 3533 /* 3534 * Step 5- figure out how many phys are in this expander. 3535 */ 3536 nphy = pmcs_expander_get_nphy(pwp, pptr); 3537 if (nphy <= 0) { 3538 if (nphy == 0 && ddi_get_lbolt() < pptr->config_stop) { 3539 PHY_CHANGED(pwp, pptr); 3540 RESTART_DISCOVERY(pwp); 3541 } else { 3542 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3543 "%s: Retries exhausted for %s, killing", __func__, 3544 pptr->path); 3545 pptr->config_stop = 0; 3546 pmcs_kill_changed(pwp, pptr, 0); 3547 } 3548 goto out; 3549 } 3550 3551 /* 3552 * Step 6- Allocate a list of phys for this expander and figure out 3553 * what each one is. 3554 */ 3555 for (i = 0; i < nphy; i++) { 3556 ctmp = kmem_cache_alloc(pwp->phy_cache, KM_SLEEP); 3557 bzero(ctmp, sizeof (pmcs_phy_t)); 3558 ctmp->device_id = PMCS_INVALID_DEVICE_ID; 3559 ctmp->sibling = clist; 3560 ctmp->pend_dtype = NEW; /* Init pending dtype */ 3561 ctmp->config_stop = ddi_get_lbolt() + 3562 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3563 clist = ctmp; 3564 } 3565 3566 mutex_enter(&pwp->config_lock); 3567 if (pwp->config_changed) { 3568 RESTART_DISCOVERY_LOCKED(pwp); 3569 mutex_exit(&pwp->config_lock); 3570 /* 3571 * Clean up the newly allocated PHYs and return 3572 */ 3573 while (clist) { 3574 ctmp = clist->sibling; 3575 clist->target_addr = NULL; 3576 kmem_cache_free(pwp->phy_cache, clist); 3577 clist = ctmp; 3578 } 3579 return; 3580 } 3581 mutex_exit(&pwp->config_lock); 3582 3583 /* 3584 * Step 7- Now fill in the rest of the static portions of the phy. 3585 */ 3586 for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) { 3587 ctmp->parent = pptr; 3588 ctmp->pwp = pwp; 3589 ctmp->level = pptr->level+1; 3590 ctmp->portid = pptr->portid; 3591 if (ctmp->tolerates_sas2) { 3592 ASSERT(i < SAS2_PHYNUM_MAX); 3593 ctmp->phynum = i & SAS2_PHYNUM_MASK; 3594 } else { 3595 ASSERT(i < SAS_PHYNUM_MAX); 3596 ctmp->phynum = i & SAS_PHYNUM_MASK; 3597 } 3598 pmcs_phy_name(pwp, ctmp, ctmp->path, sizeof (ctmp->path)); 3599 pmcs_lock_phy(ctmp); 3600 } 3601 3602 /* 3603 * Step 8- Discover things about each phy in the expander. 3604 */ 3605 for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) { 3606 result = pmcs_expander_content_discover(pwp, pptr, ctmp); 3607 if (result <= 0) { 3608 if (ddi_get_lbolt() < pptr->config_stop) { 3609 PHY_CHANGED(pwp, pptr); 3610 RESTART_DISCOVERY(pwp); 3611 } else { 3612 pptr->config_stop = 0; 3613 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3614 "%s: Retries exhausted for %s, killing", 3615 __func__, pptr->path); 3616 pmcs_kill_changed(pwp, pptr, 0); 3617 } 3618 goto out; 3619 } 3620 3621 /* Set pend_dtype to dtype for 1st time initialization */ 3622 ctmp->pend_dtype = ctmp->dtype; 3623 } 3624 3625 /* 3626 * Step 9: Install the new list on the next level. There should 3627 * typically be no children pointer on this PHY. There is one known 3628 * case where this can happen, though. If a root PHY goes down and 3629 * comes back up before discovery can run, we will fail to remove the 3630 * children from that PHY since it will no longer be marked dead. 3631 * However, in this case, all children should also be marked dead. If 3632 * we see that, take those children and put them on the dead_phys list. 3633 */ 3634 if (pptr->children != NULL) { 3635 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 3636 "%s: Expander @ %s still has children: Clean up", 3637 __func__, pptr->path); 3638 pmcs_add_dead_phys(pwp, pptr->children); 3639 } 3640 3641 /* 3642 * Set the new children pointer for this expander 3643 */ 3644 pptr->children = clist; 3645 clist = NULL; 3646 pptr->ncphy = nphy; 3647 pptr->configured = 1; 3648 3649 /* 3650 * We only set width if we're greater than level 0. 3651 */ 3652 if (pptr->level) { 3653 pptr->width = 1; 3654 } 3655 3656 /* 3657 * Now tell the rest of the world about us, as an SMP node. 3658 */ 3659 pptr->iport = iport; 3660 pmcs_new_tport(pwp, pptr); 3661 3662 out: 3663 while (clist) { 3664 ctmp = clist->sibling; 3665 pmcs_unlock_phy(clist); 3666 clist->target_addr = NULL; 3667 kmem_cache_free(pwp->phy_cache, clist); 3668 clist = ctmp; 3669 } 3670 } 3671 3672 /* 3673 * 2. Check expanders marked changed (but not dead) to see if they still have 3674 * the same number of phys and the same SAS address. Mark them, their subsidiary 3675 * phys (if wide) and their descendents dead if anything has changed. Check the 3676 * the devices they contain to see if *they* have changed. If they've changed 3677 * from type NOTHING we leave them marked changed to be configured later 3678 * (picking up a new SAS address and link rate if possible). Otherwise, any 3679 * change in type, SAS address or removal of target role will cause us to 3680 * mark them (and their descendents) as dead and cause any pending commands 3681 * and associated devices to be removed. 3682 * 3683 * Called with PHY (pptr) locked. 3684 */ 3685 3686 static void 3687 pmcs_check_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3688 { 3689 int nphy, result; 3690 pmcs_phy_t *ctmp, *local, *local_list = NULL, *local_tail = NULL; 3691 boolean_t kill_changed, changed; 3692 3693 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3694 "%s: check %s", __func__, pptr->path); 3695 3696 /* 3697 * Step 1: Mark phy as not changed. We will mark it changed if we need 3698 * to retry. 3699 */ 3700 pptr->changed = 0; 3701 3702 /* 3703 * Reset the config_stop time. Although we're not actually configuring 3704 * anything here, we do want some indication of when to give up trying 3705 * if we can't communicate with the expander. 3706 */ 3707 pptr->config_stop = ddi_get_lbolt() + 3708 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3709 3710 /* 3711 * Step 2: Figure out how many phys are in this expander. If 3712 * pmcs_expander_get_nphy returns 0 we ran out of resources, 3713 * so reschedule and try later. If it returns another error, 3714 * just return. 3715 */ 3716 nphy = pmcs_expander_get_nphy(pwp, pptr); 3717 if (nphy <= 0) { 3718 if ((nphy == 0) && (ddi_get_lbolt() < pptr->config_stop)) { 3719 PHY_CHANGED(pwp, pptr); 3720 RESTART_DISCOVERY(pwp); 3721 } else { 3722 pptr->config_stop = 0; 3723 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3724 "%s: Retries exhausted for %s, killing", __func__, 3725 pptr->path); 3726 pmcs_kill_changed(pwp, pptr, 0); 3727 } 3728 return; 3729 } 3730 3731 /* 3732 * Step 3: If the number of phys don't agree, kill the old sub-tree. 3733 */ 3734 if (nphy != pptr->ncphy) { 3735 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3736 "%s: number of contained phys for %s changed from %d to %d", 3737 __func__, pptr->path, pptr->ncphy, nphy); 3738 /* 3739 * Force a rescan of this expander after dead contents 3740 * are cleared and removed. 3741 */ 3742 pmcs_kill_changed(pwp, pptr, 0); 3743 return; 3744 } 3745 3746 /* 3747 * Step 4: if we're at the bottom of the stack, we're done 3748 * (we can't have any levels below us) 3749 */ 3750 if (pptr->level == PMCS_MAX_XPND-1) { 3751 return; 3752 } 3753 3754 /* 3755 * Step 5: Discover things about each phy in this expander. We do 3756 * this by walking the current list of contained phys and doing a 3757 * content discovery for it to a local phy. 3758 */ 3759 ctmp = pptr->children; 3760 ASSERT(ctmp); 3761 if (ctmp == NULL) { 3762 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3763 "%s: No children attached to expander @ %s?", __func__, 3764 pptr->path); 3765 return; 3766 } 3767 3768 while (ctmp) { 3769 /* 3770 * Allocate a local PHY to contain the proposed new contents 3771 * and link it to the rest of the local PHYs so that they 3772 * can all be freed later. 3773 */ 3774 local = pmcs_clone_phy(ctmp); 3775 3776 if (local_list == NULL) { 3777 local_list = local; 3778 local_tail = local; 3779 } else { 3780 local_tail->sibling = local; 3781 local_tail = local; 3782 } 3783 3784 /* 3785 * Need to lock the local PHY since pmcs_expander_content_ 3786 * discovery may call pmcs_clear_phy on it, which expects 3787 * the PHY to be locked. 3788 */ 3789 pmcs_lock_phy(local); 3790 result = pmcs_expander_content_discover(pwp, pptr, local); 3791 pmcs_unlock_phy(local); 3792 if (result <= 0) { 3793 if (ddi_get_lbolt() < pptr->config_stop) { 3794 PHY_CHANGED(pwp, pptr); 3795 RESTART_DISCOVERY(pwp); 3796 } else { 3797 pptr->config_stop = 0; 3798 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3799 "%s: Retries exhausted for %s, killing", 3800 __func__, pptr->path); 3801 pmcs_kill_changed(pwp, pptr, 0); 3802 } 3803 3804 /* 3805 * Release all the local PHYs that we allocated. 3806 */ 3807 pmcs_free_phys(pwp, local_list); 3808 return; 3809 } 3810 3811 ctmp = ctmp->sibling; 3812 } 3813 3814 /* 3815 * Step 6: Compare the local PHY's contents to our current PHY. If 3816 * there are changes, take the appropriate action. 3817 * This is done in two steps (step 5 above, and 6 here) so that if we 3818 * have to bail during this process (e.g. pmcs_expander_content_discover 3819 * fails), we haven't actually changed the state of any of the real 3820 * PHYs. Next time we come through here, we'll be starting over from 3821 * scratch. This keeps us from marking a changed PHY as no longer 3822 * changed, but then having to bail only to come back next time and 3823 * think that the PHY hadn't changed. If this were to happen, we 3824 * would fail to properly configure the device behind this PHY. 3825 */ 3826 local = local_list; 3827 ctmp = pptr->children; 3828 3829 while (ctmp) { 3830 changed = B_FALSE; 3831 kill_changed = B_FALSE; 3832 3833 /* 3834 * We set local to local_list prior to this loop so that we 3835 * can simply walk the local_list while we walk this list. The 3836 * two lists should be completely in sync. 3837 * 3838 * Clear the changed flag here. 3839 */ 3840 ctmp->changed = 0; 3841 3842 if (ctmp->dtype != local->dtype) { 3843 if (ctmp->dtype != NOTHING) { 3844 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3845 "%s: %s type changed from %s to %s " 3846 "(killing)", __func__, ctmp->path, 3847 PHY_TYPE(ctmp), PHY_TYPE(local)); 3848 /* 3849 * Force a rescan of this expander after dead 3850 * contents are cleared and removed. 3851 */ 3852 changed = B_TRUE; 3853 kill_changed = B_TRUE; 3854 } else { 3855 changed = B_TRUE; 3856 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3857 "%s: %s type changed from NOTHING to %s", 3858 __func__, ctmp->path, PHY_TYPE(local)); 3859 /* 3860 * Since this PHY was nothing and is now 3861 * something, reset the config_stop timer. 3862 */ 3863 ctmp->config_stop = ddi_get_lbolt() + 3864 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3865 } 3866 3867 } else if (ctmp->atdt != local->atdt) { 3868 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, "%s: " 3869 "%s attached device type changed from %d to %d " 3870 "(killing)", __func__, ctmp->path, ctmp->atdt, 3871 local->atdt); 3872 /* 3873 * Force a rescan of this expander after dead 3874 * contents are cleared and removed. 3875 */ 3876 changed = B_TRUE; 3877 3878 if (local->atdt == 0) { 3879 kill_changed = B_TRUE; 3880 } 3881 } else if (ctmp->link_rate != local->link_rate) { 3882 pmcs_prt(pwp, PMCS_PRT_INFO, ctmp, NULL, "%s: %s " 3883 "changed speed from %s to %s", __func__, ctmp->path, 3884 pmcs_get_rate(ctmp->link_rate), 3885 pmcs_get_rate(local->link_rate)); 3886 /* If the speed changed from invalid, force rescan */ 3887 if (!PMCS_VALID_LINK_RATE(ctmp->link_rate)) { 3888 changed = B_TRUE; 3889 RESTART_DISCOVERY(pwp); 3890 } else { 3891 /* Just update to the new link rate */ 3892 ctmp->link_rate = local->link_rate; 3893 } 3894 3895 if (!PMCS_VALID_LINK_RATE(local->link_rate)) { 3896 kill_changed = B_TRUE; 3897 } 3898 } else if (memcmp(ctmp->sas_address, local->sas_address, 3899 sizeof (ctmp->sas_address)) != 0) { 3900 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3901 "%s: SAS Addr for %s changed from " SAS_ADDR_FMT 3902 "to " SAS_ADDR_FMT " (kill old tree)", __func__, 3903 ctmp->path, SAS_ADDR_PRT(ctmp->sas_address), 3904 SAS_ADDR_PRT(local->sas_address)); 3905 /* 3906 * Force a rescan of this expander after dead 3907 * contents are cleared and removed. 3908 */ 3909 changed = B_TRUE; 3910 } else { 3911 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3912 "%s: %s looks the same (type %s)", 3913 __func__, ctmp->path, PHY_TYPE(ctmp)); 3914 /* 3915 * If EXPANDER, still mark it changed so we 3916 * re-evaluate its contents. If it's not an expander, 3917 * but it hasn't been configured, also mark it as 3918 * changed so that it will undergo configuration. 3919 */ 3920 if (ctmp->dtype == EXPANDER) { 3921 changed = B_TRUE; 3922 } else if ((ctmp->dtype != NOTHING) && 3923 !ctmp->configured) { 3924 ctmp->changed = 1; 3925 } else { 3926 /* It simply hasn't changed */ 3927 ctmp->changed = 0; 3928 } 3929 } 3930 3931 /* 3932 * If the PHY changed, call pmcs_kill_changed if indicated, 3933 * update its contents to reflect its current state and mark it 3934 * as changed. 3935 */ 3936 if (changed) { 3937 /* 3938 * pmcs_kill_changed will mark the PHY as changed, so 3939 * only do PHY_CHANGED if we did not do kill_changed. 3940 */ 3941 if (kill_changed) { 3942 pmcs_kill_changed(pwp, ctmp, 0); 3943 } else { 3944 /* 3945 * If we're not killing the device, it's not 3946 * dead. Mark the PHY as changed. 3947 */ 3948 PHY_CHANGED(pwp, ctmp); 3949 3950 if (ctmp->dead) { 3951 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 3952 ctmp, NULL, "%s: Unmarking PHY %s " 3953 "dead, restarting discovery", 3954 __func__, ctmp->path); 3955 ctmp->dead = 0; 3956 RESTART_DISCOVERY(pwp); 3957 } 3958 } 3959 3960 /* 3961 * If the dtype of this PHY is now NOTHING, mark it as 3962 * unconfigured. Set pend_dtype to what the new dtype 3963 * is. It'll get updated at the end of the discovery 3964 * process. 3965 */ 3966 if (local->dtype == NOTHING) { 3967 bzero(ctmp->sas_address, 3968 sizeof (local->sas_address)); 3969 ctmp->atdt = 0; 3970 ctmp->link_rate = 0; 3971 ctmp->pend_dtype = NOTHING; 3972 ctmp->configured = 0; 3973 } else { 3974 (void) memcpy(ctmp->sas_address, 3975 local->sas_address, 3976 sizeof (local->sas_address)); 3977 ctmp->atdt = local->atdt; 3978 ctmp->link_rate = local->link_rate; 3979 ctmp->pend_dtype = local->dtype; 3980 } 3981 } 3982 3983 local = local->sibling; 3984 ctmp = ctmp->sibling; 3985 } 3986 3987 /* 3988 * If we got to here, that means we were able to see all the PHYs 3989 * and we can now update all of the real PHYs with the information 3990 * we got on the local PHYs. Once that's done, free all the local 3991 * PHYs. 3992 */ 3993 3994 pmcs_free_phys(pwp, local_list); 3995 } 3996 3997 /* 3998 * Top level routine to check expanders. We call pmcs_check_expander for 3999 * each expander. Since we're not doing any configuration right now, it 4000 * doesn't matter if this is breadth-first. 4001 */ 4002 static void 4003 pmcs_check_expanders(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 4004 { 4005 pmcs_phy_t *phyp, *pnext, *pchild; 4006 4007 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4008 "%s: %s", __func__, pptr->path); 4009 4010 /* 4011 * Check each expander at this level 4012 */ 4013 phyp = pptr; 4014 while (phyp) { 4015 pmcs_lock_phy(phyp); 4016 4017 if ((phyp->dtype == EXPANDER) && phyp->changed && 4018 !phyp->dead && !phyp->subsidiary && 4019 phyp->configured) { 4020 pmcs_check_expander(pwp, phyp); 4021 } 4022 4023 pnext = phyp->sibling; 4024 pmcs_unlock_phy(phyp); 4025 phyp = pnext; 4026 } 4027 4028 /* 4029 * Now check the children 4030 */ 4031 phyp = pptr; 4032 while (phyp) { 4033 pmcs_lock_phy(phyp); 4034 pnext = phyp->sibling; 4035 pchild = phyp->children; 4036 pmcs_unlock_phy(phyp); 4037 4038 if (pchild) { 4039 pmcs_check_expanders(pwp, pchild); 4040 } 4041 4042 phyp = pnext; 4043 } 4044 } 4045 4046 /* 4047 * Called with softstate and PHY locked 4048 */ 4049 static void 4050 pmcs_clear_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, int level) 4051 { 4052 pmcs_phy_t *ctmp; 4053 4054 ASSERT(mutex_owned(&pwp->lock)); 4055 ASSERT(mutex_owned(&pptr->phy_lock)); 4056 ASSERT(pptr->level < PMCS_MAX_XPND - 1); 4057 4058 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4059 "%s: checking %s", __func__, pptr->path); 4060 4061 ctmp = pptr->children; 4062 while (ctmp) { 4063 /* 4064 * If the expander is dead, mark its children dead 4065 */ 4066 if (pptr->dead) { 4067 ctmp->dead = 1; 4068 } 4069 if (ctmp->dtype == EXPANDER) { 4070 pmcs_clear_expander(pwp, ctmp, level + 1); 4071 } 4072 ctmp = ctmp->sibling; 4073 } 4074 4075 /* 4076 * If this expander is not dead, we're done here. 4077 */ 4078 if (!pptr->dead) { 4079 return; 4080 } 4081 4082 /* 4083 * Now snip out the list of children below us and release them 4084 */ 4085 if (pptr->children) { 4086 pmcs_add_dead_phys(pwp, pptr->children); 4087 } 4088 4089 pptr->children = NULL; 4090 4091 /* 4092 * Clear subsidiary phys as well. Getting the parent's PHY lock 4093 * is only necessary if level == 0 since otherwise the parent is 4094 * already locked. 4095 */ 4096 if (!IS_ROOT_PHY(pptr)) { 4097 if (level == 0) { 4098 mutex_enter(&pptr->parent->phy_lock); 4099 } 4100 ctmp = pptr->parent->children; 4101 if (level == 0) { 4102 mutex_exit(&pptr->parent->phy_lock); 4103 } 4104 } else { 4105 ctmp = pwp->root_phys; 4106 } 4107 4108 while (ctmp) { 4109 if (ctmp == pptr) { 4110 ctmp = ctmp->sibling; 4111 continue; 4112 } 4113 /* 4114 * We only need to lock subsidiary PHYs on the level 0 4115 * expander. Any children of that expander, subsidiaries or 4116 * not, will already be locked. 4117 */ 4118 if (level == 0) { 4119 pmcs_lock_phy(ctmp); 4120 } 4121 if (ctmp->dtype != EXPANDER || ctmp->subsidiary == 0 || 4122 memcmp(ctmp->sas_address, pptr->sas_address, 4123 sizeof (ctmp->sas_address)) != 0) { 4124 if (level == 0) { 4125 pmcs_unlock_phy(ctmp); 4126 } 4127 ctmp = ctmp->sibling; 4128 continue; 4129 } 4130 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 4131 "%s: subsidiary %s", __func__, ctmp->path); 4132 pmcs_clear_phy(pwp, ctmp); 4133 if (level == 0) { 4134 pmcs_unlock_phy(ctmp); 4135 } 4136 ctmp = ctmp->sibling; 4137 } 4138 4139 pmcs_clear_phy(pwp, pptr); 4140 } 4141 4142 /* 4143 * Called with PHY locked and with scratch acquired. We return 0 if 4144 * we fail to allocate resources or notice that the configuration 4145 * count changed while we were running the command. We return 4146 * less than zero if we had an I/O error or received an unsupported 4147 * configuration. Otherwise we return the number of phys in the 4148 * expander. 4149 */ 4150 #define DFM(m, y) if (m == NULL) m = y 4151 static int 4152 pmcs_expander_get_nphy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 4153 { 4154 struct pmcwork *pwrk; 4155 pmcs_iport_t *iport; 4156 char buf[64]; 4157 const uint_t rdoff = 0x100; /* returned data offset */ 4158 smp_response_frame_t *srf; 4159 smp_report_general_resp_t *srgr; 4160 uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status, ival; 4161 int result = 0; 4162 4163 ival = 0x40001100; 4164 4165 again: 4166 if (!pptr->iport || !pptr->valid_device_id) { 4167 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 4168 "%s: Can't reach PHY %s", __func__, pptr->path); 4169 goto out; 4170 } 4171 4172 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 4173 if (pwrk == NULL) { 4174 goto out; 4175 } 4176 (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE); 4177 pwrk->arg = pwp->scratch; 4178 pwrk->dtype = pptr->dtype; 4179 pwrk->xp = pptr->target; 4180 pwrk->htag |= PMCS_TAG_NONIO_CMD; 4181 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4182 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4183 if (ptr == NULL) { 4184 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4185 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, NULL, 4186 "%s: GET_IQ_ENTRY failed", __func__); 4187 pmcs_pwork(pwp, pwrk); 4188 goto out; 4189 } 4190 4191 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 4192 msg[1] = LE_32(pwrk->htag); 4193 msg[2] = LE_32(pptr->device_id); 4194 msg[3] = LE_32((4 << SMP_REQUEST_LENGTH_SHIFT) | SMP_INDIRECT_RESPONSE); 4195 /* 4196 * Send SMP REPORT GENERAL (of either SAS1.1 or SAS2 flavors). 4197 */ 4198 msg[4] = BE_32(ival); 4199 msg[5] = 0; 4200 msg[6] = 0; 4201 msg[7] = 0; 4202 msg[8] = 0; 4203 msg[9] = 0; 4204 msg[10] = 0; 4205 msg[11] = 0; 4206 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 4207 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 4208 msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff); 4209 msg[15] = 0; 4210 4211 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 4212 4213 pmcs_hold_iport(pptr->iport); 4214 iport = pptr->iport; 4215 pmcs_smp_acquire(iport); 4216 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4217 htag = pwrk->htag; 4218 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4219 pmcs_unlock_phy(pptr); 4220 WAIT_FOR(pwrk, 1000, result); 4221 pmcs_pwork(pwp, pwrk); 4222 pmcs_lock_phy(pptr); 4223 if (result) { 4224 pmcs_timed_out(pwp, htag, __func__); 4225 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4226 "%s: Issuing SMP ABORT for htag 0x%08x", __func__, htag); 4227 if (pmcs_abort(pwp, pptr, htag, 0, 1)) { 4228 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4229 "%s: SMP ABORT failed for cmd (htag 0x%08x)", 4230 __func__, htag); 4231 } 4232 pmcs_smp_release(iport); 4233 pmcs_rele_iport(iport); 4234 result = 0; 4235 goto out; 4236 } 4237 pmcs_smp_release(iport); 4238 pmcs_rele_iport(iport); 4239 4240 mutex_enter(&pwp->config_lock); 4241 if (pwp->config_changed) { 4242 RESTART_DISCOVERY_LOCKED(pwp); 4243 mutex_exit(&pwp->config_lock); 4244 result = 0; 4245 goto out; 4246 } 4247 mutex_exit(&pwp->config_lock); 4248 4249 ptr = (void *)pwp->scratch; 4250 status = LE_32(ptr[2]); 4251 if (status == PMCOUT_STATUS_UNDERFLOW || 4252 status == PMCOUT_STATUS_OVERFLOW) { 4253 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL, 4254 "%s: over/underflow", __func__); 4255 status = PMCOUT_STATUS_OK; 4256 } 4257 srf = (smp_response_frame_t *)&((uint32_t *)pwp->scratch)[rdoff >> 2]; 4258 srgr = (smp_report_general_resp_t *) 4259 &((uint32_t *)pwp->scratch)[(rdoff >> 2)+1]; 4260 4261 if (status != PMCOUT_STATUS_OK) { 4262 char *nag = NULL; 4263 (void) snprintf(buf, sizeof (buf), 4264 "%s: SMP op failed (0x%x)", __func__, status); 4265 switch (status) { 4266 case PMCOUT_STATUS_IO_PORT_IN_RESET: 4267 DFM(nag, "I/O Port In Reset"); 4268 /* FALLTHROUGH */ 4269 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 4270 DFM(nag, "Hardware Timeout"); 4271 /* FALLTHROUGH */ 4272 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 4273 DFM(nag, "Internal SMP Resource Failure"); 4274 /* FALLTHROUGH */ 4275 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 4276 DFM(nag, "PHY Not Ready"); 4277 /* FALLTHROUGH */ 4278 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 4279 DFM(nag, "Connection Rate Not Supported"); 4280 /* FALLTHROUGH */ 4281 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 4282 DFM(nag, "Open Retry Timeout"); 4283 /* FALLTHROUGH */ 4284 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 4285 DFM(nag, "HW Resource Busy"); 4286 /* FALLTHROUGH */ 4287 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 4288 DFM(nag, "Response Connection Error"); 4289 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4290 "%s: expander %s SMP operation failed (%s)", 4291 __func__, pptr->path, nag); 4292 break; 4293 4294 /* 4295 * For the IO_DS_NON_OPERATIONAL case, we need to kick off 4296 * device state recovery and return 0 so that the caller 4297 * doesn't assume this expander is dead for good. 4298 */ 4299 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: { 4300 pmcs_xscsi_t *xp = pptr->target; 4301 4302 pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, pptr, xp, 4303 "%s: expander %s device state non-operational", 4304 __func__, pptr->path); 4305 4306 if (xp == NULL) { 4307 /* 4308 * Kick off recovery right now. 4309 */ 4310 SCHEDULE_WORK(pwp, PMCS_WORK_DS_ERR_RECOVERY); 4311 (void) ddi_taskq_dispatch(pwp->tq, pmcs_worker, 4312 pwp, DDI_NOSLEEP); 4313 } else { 4314 mutex_enter(&xp->statlock); 4315 pmcs_start_dev_state_recovery(xp, pptr); 4316 mutex_exit(&xp->statlock); 4317 } 4318 4319 break; 4320 } 4321 4322 default: 4323 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr); 4324 result = -EIO; 4325 break; 4326 } 4327 } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) { 4328 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4329 "%s: bad response frame type 0x%x", 4330 __func__, srf->srf_frame_type); 4331 result = -EINVAL; 4332 } else if (srf->srf_function != SMP_FUNC_REPORT_GENERAL) { 4333 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4334 "%s: bad response function 0x%x", 4335 __func__, srf->srf_function); 4336 result = -EINVAL; 4337 } else if (srf->srf_result != 0) { 4338 /* 4339 * Check to see if we have a value of 3 for failure and 4340 * whether we were using a SAS2.0 allocation length value 4341 * and retry without it. 4342 */ 4343 if (srf->srf_result == 3 && (ival & 0xff00)) { 4344 ival &= ~0xff00; 4345 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4346 "%s: err 0x%x with SAS2 request- retry with SAS1", 4347 __func__, srf->srf_result); 4348 goto again; 4349 } 4350 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4351 "%s: bad response 0x%x", __func__, srf->srf_result); 4352 result = -EINVAL; 4353 } else if (srgr->srgr_configuring) { 4354 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4355 "%s: expander at phy %s is still configuring", 4356 __func__, pptr->path); 4357 result = 0; 4358 } else { 4359 result = srgr->srgr_number_of_phys; 4360 if (ival & 0xff00) { 4361 pptr->tolerates_sas2 = 1; 4362 } 4363 /* 4364 * Save off the REPORT_GENERAL response 4365 */ 4366 bcopy(srgr, &pptr->rg_resp, sizeof (smp_report_general_resp_t)); 4367 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4368 "%s has %d phys and %s SAS2", pptr->path, result, 4369 pptr->tolerates_sas2? "tolerates" : "does not tolerate"); 4370 } 4371 out: 4372 return (result); 4373 } 4374 4375 /* 4376 * Called with expander locked (and thus, pptr) as well as all PHYs up to 4377 * the root, and scratch acquired. Return 0 if we fail to allocate resources 4378 * or notice that the configuration changed while we were running the command. 4379 * 4380 * We return less than zero if we had an I/O error or received an 4381 * unsupported configuration. 4382 */ 4383 static int 4384 pmcs_expander_content_discover(pmcs_hw_t *pwp, pmcs_phy_t *expander, 4385 pmcs_phy_t *pptr) 4386 { 4387 struct pmcwork *pwrk; 4388 pmcs_iport_t *iport; 4389 char buf[64]; 4390 uint8_t sas_address[8]; 4391 uint8_t att_sas_address[8]; 4392 smp_response_frame_t *srf; 4393 smp_discover_resp_t *sdr; 4394 const uint_t rdoff = 0x100; /* returned data offset */ 4395 uint8_t *roff; 4396 uint32_t status, *ptr, msg[PMCS_MSG_SIZE], htag; 4397 int result = 0; 4398 uint8_t ini_support; 4399 uint8_t tgt_support; 4400 4401 if (!expander->iport || !expander->valid_device_id) { 4402 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, expander, expander->target, 4403 "%s: Can't reach PHY %s", __func__, expander->path); 4404 goto out; 4405 } 4406 4407 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, expander); 4408 if (pwrk == NULL) { 4409 goto out; 4410 } 4411 (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE); 4412 pwrk->arg = pwp->scratch; 4413 pwrk->dtype = expander->dtype; 4414 pwrk->xp = expander->target; 4415 pwrk->htag |= PMCS_TAG_NONIO_CMD; 4416 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 4417 msg[1] = LE_32(pwrk->htag); 4418 msg[2] = LE_32(expander->device_id); 4419 msg[3] = LE_32((12 << SMP_REQUEST_LENGTH_SHIFT) | 4420 SMP_INDIRECT_RESPONSE); 4421 /* 4422 * Send SMP DISCOVER (of either SAS1.1 or SAS2 flavors). 4423 */ 4424 if (expander->tolerates_sas2) { 4425 msg[4] = BE_32(0x40101B00); 4426 } else { 4427 msg[4] = BE_32(0x40100000); 4428 } 4429 msg[5] = 0; 4430 msg[6] = BE_32((pptr->phynum << 16)); 4431 msg[7] = 0; 4432 msg[8] = 0; 4433 msg[9] = 0; 4434 msg[10] = 0; 4435 msg[11] = 0; 4436 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 4437 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 4438 msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff); 4439 msg[15] = 0; 4440 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4441 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4442 if (ptr == NULL) { 4443 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4444 goto out; 4445 } 4446 4447 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 4448 4449 pmcs_hold_iport(expander->iport); 4450 iport = expander->iport; 4451 pmcs_smp_acquire(iport); 4452 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4453 htag = pwrk->htag; 4454 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4455 pmcs_unlock_phy(expander); 4456 WAIT_FOR(pwrk, 1000, result); 4457 pmcs_pwork(pwp, pwrk); 4458 pmcs_lock_phy(expander); 4459 if (result) { 4460 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4461 "%s: Issuing SMP ABORT for htag 0x%08x", __func__, htag); 4462 if (pmcs_abort(pwp, pptr, htag, 0, 1)) { 4463 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4464 "%s: SMP ABORT failed for cmd (htag 0x%08x)", 4465 __func__, htag); 4466 } 4467 pmcs_smp_release(iport); 4468 pmcs_rele_iport(iport); 4469 result = -ETIMEDOUT; 4470 goto out; 4471 } 4472 pmcs_smp_release(iport); 4473 pmcs_rele_iport(iport); 4474 4475 mutex_enter(&pwp->config_lock); 4476 if (pwp->config_changed) { 4477 RESTART_DISCOVERY_LOCKED(pwp); 4478 mutex_exit(&pwp->config_lock); 4479 result = 0; 4480 goto out; 4481 } 4482 4483 mutex_exit(&pwp->config_lock); 4484 ptr = (void *)pwp->scratch; 4485 /* 4486 * Point roff to the DMA offset for returned data 4487 */ 4488 roff = pwp->scratch; 4489 roff += rdoff; 4490 srf = (smp_response_frame_t *)roff; 4491 sdr = (smp_discover_resp_t *)(roff+4); 4492 status = LE_32(ptr[2]); 4493 if (status == PMCOUT_STATUS_UNDERFLOW || 4494 status == PMCOUT_STATUS_OVERFLOW) { 4495 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL, 4496 "%s: over/underflow", __func__); 4497 status = PMCOUT_STATUS_OK; 4498 } 4499 if (status != PMCOUT_STATUS_OK) { 4500 char *nag = NULL; 4501 (void) snprintf(buf, sizeof (buf), 4502 "%s: SMP op failed (0x%x)", __func__, status); 4503 switch (status) { 4504 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 4505 DFM(nag, "Hardware Timeout"); 4506 /* FALLTHROUGH */ 4507 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 4508 DFM(nag, "Internal SMP Resource Failure"); 4509 /* FALLTHROUGH */ 4510 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 4511 DFM(nag, "PHY Not Ready"); 4512 /* FALLTHROUGH */ 4513 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 4514 DFM(nag, "Connection Rate Not Supported"); 4515 /* FALLTHROUGH */ 4516 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 4517 DFM(nag, "Open Retry Timeout"); 4518 /* FALLTHROUGH */ 4519 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 4520 DFM(nag, "HW Resource Busy"); 4521 /* FALLTHROUGH */ 4522 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 4523 DFM(nag, "Response Connection Error"); 4524 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4525 "%s: expander %s SMP operation failed (%s)", 4526 __func__, pptr->path, nag); 4527 break; 4528 default: 4529 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr); 4530 result = -EIO; 4531 break; 4532 } 4533 goto out; 4534 } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) { 4535 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4536 "%s: bad response frame type 0x%x", 4537 __func__, srf->srf_frame_type); 4538 result = -EINVAL; 4539 goto out; 4540 } else if (srf->srf_function != SMP_FUNC_DISCOVER) { 4541 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4542 "%s: bad response function 0x%x", 4543 __func__, srf->srf_function); 4544 result = -EINVAL; 4545 goto out; 4546 } else if (srf->srf_result != SMP_RES_FUNCTION_ACCEPTED) { 4547 result = pmcs_smp_function_result(pwp, srf); 4548 /* Need not fail if PHY is Vacant */ 4549 if (result != SMP_RES_PHY_VACANT) { 4550 result = -EINVAL; 4551 goto out; 4552 } 4553 } 4554 4555 /* 4556 * Save off the DISCOVER response 4557 */ 4558 bcopy(sdr, &pptr->disc_resp, sizeof (smp_discover_resp_t)); 4559 4560 ini_support = (sdr->sdr_attached_sata_host | 4561 (sdr->sdr_attached_smp_initiator << 1) | 4562 (sdr->sdr_attached_stp_initiator << 2) | 4563 (sdr->sdr_attached_ssp_initiator << 3)); 4564 4565 tgt_support = (sdr->sdr_attached_sata_device | 4566 (sdr->sdr_attached_smp_target << 1) | 4567 (sdr->sdr_attached_stp_target << 2) | 4568 (sdr->sdr_attached_ssp_target << 3)); 4569 4570 pmcs_wwn2barray(BE_64(sdr->sdr_sas_addr), sas_address); 4571 pmcs_wwn2barray(BE_64(sdr->sdr_attached_sas_addr), att_sas_address); 4572 4573 pptr->virtual = sdr->sdr_virtual_phy; 4574 4575 /* 4576 * Set the routing attribute regardless of the PHY type. 4577 */ 4578 pptr->routing_attr = sdr->sdr_routing_attr; 4579 4580 switch (sdr->sdr_attached_device_type) { 4581 case SAS_IF_DTYPE_ENDPOINT: 4582 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4583 "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS=" 4584 SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x", 4585 pptr->path, 4586 sdr->sdr_attached_device_type, 4587 sdr->sdr_negotiated_logical_link_rate, 4588 ini_support, 4589 tgt_support, 4590 SAS_ADDR_PRT(sas_address), 4591 SAS_ADDR_PRT(att_sas_address), 4592 sdr->sdr_attached_phy_identifier); 4593 4594 if (sdr->sdr_attached_sata_device || 4595 sdr->sdr_attached_stp_target) { 4596 pptr->dtype = SATA; 4597 } else if (sdr->sdr_attached_ssp_target) { 4598 pptr->dtype = SAS; 4599 } else if (tgt_support || ini_support) { 4600 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4601 "%s: %s has tgt support=%x init support=(%x)", 4602 __func__, pptr->path, tgt_support, ini_support); 4603 } 4604 4605 switch (pptr->routing_attr) { 4606 case SMP_ROUTING_SUBTRACTIVE: 4607 case SMP_ROUTING_TABLE: 4608 case SMP_ROUTING_DIRECT: 4609 pptr->routing_method = SMP_ROUTING_DIRECT; 4610 break; 4611 default: 4612 pptr->routing_method = 0xff; /* Invalid method */ 4613 break; 4614 } 4615 pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum), 4616 (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE); 4617 break; 4618 case SAS_IF_DTYPE_EDGE: 4619 case SAS_IF_DTYPE_FANOUT: 4620 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4621 "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS=" 4622 SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x", 4623 pptr->path, 4624 sdr->sdr_attached_device_type, 4625 sdr->sdr_negotiated_logical_link_rate, 4626 ini_support, 4627 tgt_support, 4628 SAS_ADDR_PRT(sas_address), 4629 SAS_ADDR_PRT(att_sas_address), 4630 sdr->sdr_attached_phy_identifier); 4631 if (sdr->sdr_attached_smp_target) { 4632 /* 4633 * Avoid configuring phys that just point back 4634 * at a parent phy 4635 */ 4636 if (expander->parent && 4637 memcmp(expander->parent->sas_address, 4638 att_sas_address, 4639 sizeof (expander->parent->sas_address)) == 0) { 4640 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, NULL, 4641 "%s: skipping port back to parent " 4642 "expander (%s)", __func__, pptr->path); 4643 pptr->dtype = NOTHING; 4644 break; 4645 } 4646 pptr->dtype = EXPANDER; 4647 4648 } else if (tgt_support || ini_support) { 4649 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4650 "%s has tgt support=%x init support=(%x)", 4651 pptr->path, tgt_support, ini_support); 4652 pptr->dtype = EXPANDER; 4653 } 4654 if (pptr->routing_attr == SMP_ROUTING_DIRECT) { 4655 pptr->routing_method = 0xff; /* Invalid method */ 4656 } else { 4657 pptr->routing_method = pptr->routing_attr; 4658 } 4659 pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum), 4660 (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE); 4661 break; 4662 default: 4663 pptr->dtype = NOTHING; 4664 break; 4665 } 4666 if (pptr->dtype != NOTHING) { 4667 pmcs_phy_t *ctmp; 4668 4669 /* 4670 * If the attached device is a SATA device and the expander 4671 * is (possibly) a SAS2 compliant expander, check for whether 4672 * there is a NAA=5 WWN field starting at this offset and 4673 * use that for the SAS Address for this device. 4674 */ 4675 if (expander->tolerates_sas2 && pptr->dtype == SATA && 4676 (roff[SAS_ATTACHED_NAME_OFFSET] >> 8) == NAA_IEEE_REG) { 4677 (void) memcpy(pptr->sas_address, 4678 &roff[SAS_ATTACHED_NAME_OFFSET], 8); 4679 } else { 4680 (void) memcpy(pptr->sas_address, att_sas_address, 8); 4681 } 4682 pptr->atdt = (sdr->sdr_attached_device_type); 4683 /* 4684 * Now run up from the expander's parent up to the top to 4685 * make sure we only use the least common link_rate. 4686 */ 4687 for (ctmp = expander->parent; ctmp; ctmp = ctmp->parent) { 4688 if (ctmp->link_rate < 4689 sdr->sdr_negotiated_logical_link_rate) { 4690 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4691 "%s: derating link rate from %x to %x due " 4692 "to %s being slower", pptr->path, 4693 sdr->sdr_negotiated_logical_link_rate, 4694 ctmp->link_rate, 4695 ctmp->path); 4696 sdr->sdr_negotiated_logical_link_rate = 4697 ctmp->link_rate; 4698 } 4699 } 4700 pptr->link_rate = sdr->sdr_negotiated_logical_link_rate; 4701 pptr->state.prog_min_rate = sdr->sdr_prog_min_phys_link_rate; 4702 pptr->state.hw_min_rate = sdr->sdr_hw_min_phys_link_rate; 4703 pptr->state.prog_max_rate = sdr->sdr_prog_max_phys_link_rate; 4704 pptr->state.hw_max_rate = sdr->sdr_hw_max_phys_link_rate; 4705 PHY_CHANGED(pwp, pptr); 4706 } else { 4707 pmcs_clear_phy(pwp, pptr); 4708 } 4709 result = 1; 4710 out: 4711 return (result); 4712 } 4713 4714 /* 4715 * Get a work structure and assign it a tag with type and serial number 4716 * If a structure is returned, it is returned locked. 4717 */ 4718 pmcwork_t * 4719 pmcs_gwork(pmcs_hw_t *pwp, uint32_t tag_type, pmcs_phy_t *phyp) 4720 { 4721 pmcwork_t *p; 4722 uint16_t snum; 4723 uint32_t off; 4724 4725 mutex_enter(&pwp->wfree_lock); 4726 p = STAILQ_FIRST(&pwp->wf); 4727 if (p == NULL) { 4728 /* 4729 * If we couldn't get a work structure, it's time to bite 4730 * the bullet, grab the pfree_lock and copy over all the 4731 * work structures from the pending free list to the actual 4732 * free list (assuming it's not also empty). 4733 */ 4734 mutex_enter(&pwp->pfree_lock); 4735 if (STAILQ_FIRST(&pwp->pf) == NULL) { 4736 mutex_exit(&pwp->pfree_lock); 4737 mutex_exit(&pwp->wfree_lock); 4738 return (NULL); 4739 } 4740 pwp->wf.stqh_first = pwp->pf.stqh_first; 4741 pwp->wf.stqh_last = pwp->pf.stqh_last; 4742 STAILQ_INIT(&pwp->pf); 4743 mutex_exit(&pwp->pfree_lock); 4744 4745 p = STAILQ_FIRST(&pwp->wf); 4746 ASSERT(p != NULL); 4747 } 4748 STAILQ_REMOVE(&pwp->wf, p, pmcwork, next); 4749 snum = pwp->wserno++; 4750 mutex_exit(&pwp->wfree_lock); 4751 4752 off = p - pwp->work; 4753 4754 mutex_enter(&p->lock); 4755 ASSERT(p->state == PMCS_WORK_STATE_NIL); 4756 ASSERT(p->htag == PMCS_TAG_FREE); 4757 p->htag = (tag_type << PMCS_TAG_TYPE_SHIFT) & PMCS_TAG_TYPE_MASK; 4758 p->htag |= ((snum << PMCS_TAG_SERNO_SHIFT) & PMCS_TAG_SERNO_MASK); 4759 p->htag |= ((off << PMCS_TAG_INDEX_SHIFT) & PMCS_TAG_INDEX_MASK); 4760 p->start = gethrtime(); 4761 p->state = PMCS_WORK_STATE_READY; 4762 p->ssp_event = 0; 4763 p->dead = 0; 4764 4765 if (phyp) { 4766 p->phy = phyp; 4767 pmcs_inc_phy_ref_count(phyp); 4768 } 4769 4770 return (p); 4771 } 4772 4773 /* 4774 * Called with pwrk lock held. Returned with lock released. 4775 */ 4776 void 4777 pmcs_pwork(pmcs_hw_t *pwp, pmcwork_t *p) 4778 { 4779 ASSERT(p != NULL); 4780 ASSERT(mutex_owned(&p->lock)); 4781 4782 p->last_ptr = p->ptr; 4783 p->last_arg = p->arg; 4784 p->last_phy = p->phy; 4785 p->last_xp = p->xp; 4786 p->last_htag = p->htag; 4787 p->last_state = p->state; 4788 p->finish = gethrtime(); 4789 4790 if (p->phy) { 4791 pmcs_dec_phy_ref_count(p->phy); 4792 } 4793 4794 p->state = PMCS_WORK_STATE_NIL; 4795 p->htag = PMCS_TAG_FREE; 4796 p->xp = NULL; 4797 p->ptr = NULL; 4798 p->arg = NULL; 4799 p->phy = NULL; 4800 p->abt_htag = 0; 4801 p->timer = 0; 4802 p->onwire = 0; 4803 mutex_exit(&p->lock); 4804 4805 if (mutex_tryenter(&pwp->wfree_lock) == 0) { 4806 mutex_enter(&pwp->pfree_lock); 4807 STAILQ_INSERT_TAIL(&pwp->pf, p, next); 4808 mutex_exit(&pwp->pfree_lock); 4809 } else { 4810 STAILQ_INSERT_TAIL(&pwp->wf, p, next); 4811 mutex_exit(&pwp->wfree_lock); 4812 } 4813 } 4814 4815 /* 4816 * Find a work structure based upon a tag and make sure that the tag 4817 * serial number matches the work structure we've found. 4818 * If a structure is found, its lock is held upon return. 4819 * If lock_phy is B_TRUE, then lock the phy also when returning the work struct 4820 */ 4821 pmcwork_t * 4822 pmcs_tag2wp(pmcs_hw_t *pwp, uint32_t htag, boolean_t lock_phy) 4823 { 4824 pmcwork_t *p; 4825 pmcs_phy_t *phyp; 4826 uint32_t idx = PMCS_TAG_INDEX(htag); 4827 4828 p = &pwp->work[idx]; 4829 4830 mutex_enter(&p->lock); 4831 if (p->htag == htag) { 4832 if (lock_phy) { 4833 phyp = p->phy; 4834 if (phyp != NULL) { 4835 /* phy lock should be held before work lock */ 4836 mutex_exit(&p->lock); 4837 mutex_enter(&phyp->phy_lock); 4838 mutex_enter(&p->lock); 4839 } 4840 /* 4841 * Check htag again, in case the work got completed 4842 * while we dropped the work lock and got the phy lock 4843 */ 4844 if (p->htag != htag) { 4845 if (phyp != NULL) { 4846 mutex_exit(&p->lock); 4847 mutex_exit(&phyp->phy_lock); 4848 } 4849 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, "%s: " 4850 "HTAG (0x%x) found, but work (0x%p) " 4851 "is already complete", __func__, htag, 4852 (void *)p); 4853 return (NULL); 4854 } 4855 } 4856 return (p); 4857 } 4858 mutex_exit(&p->lock); 4859 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 4860 "INDEX 0x%x HTAG 0x%x got p->htag 0x%x", idx, htag, p->htag); 4861 return (NULL); 4862 } 4863 4864 /* 4865 * Issue an abort for a command or for all commands. 4866 * 4867 * Since this can be called from interrupt context, 4868 * we don't wait for completion if wait is not set. 4869 * 4870 * Called with PHY lock held. 4871 */ 4872 int 4873 pmcs_abort(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint32_t tag, int all_cmds, 4874 int wait) 4875 { 4876 pmcwork_t *pwrk; 4877 pmcs_xscsi_t *tgt; 4878 uint32_t msg[PMCS_MSG_SIZE], *ptr; 4879 int result, abt_type; 4880 uint32_t abt_htag, status; 4881 4882 if (pptr->abort_all_start) { 4883 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "%s: ABORT_ALL for " 4884 "(%s) already in progress.", __func__, pptr->path); 4885 return (EBUSY); 4886 } 4887 4888 switch (pptr->dtype) { 4889 case SAS: 4890 abt_type = PMCIN_SSP_ABORT; 4891 break; 4892 case SATA: 4893 abt_type = PMCIN_SATA_ABORT; 4894 break; 4895 case EXPANDER: 4896 abt_type = PMCIN_SMP_ABORT; 4897 break; 4898 default: 4899 return (0); 4900 } 4901 4902 pwrk = pmcs_gwork(pwp, wait ? PMCS_TAG_TYPE_WAIT : PMCS_TAG_TYPE_NONE, 4903 pptr); 4904 4905 if (pwrk == NULL) { 4906 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 4907 return (ENOMEM); 4908 } 4909 4910 pwrk->dtype = pptr->dtype; 4911 pwrk->xp = pptr->target; 4912 pwrk->htag |= PMCS_TAG_NONIO_CMD; 4913 if (wait) { 4914 pwrk->arg = msg; 4915 } 4916 if (pptr->valid_device_id == 0) { 4917 pmcs_pwork(pwp, pwrk); 4918 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4919 "%s: Invalid DeviceID", __func__); 4920 return (ENODEV); 4921 } 4922 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, abt_type)); 4923 msg[1] = LE_32(pwrk->htag); 4924 msg[2] = LE_32(pptr->device_id); 4925 if (all_cmds) { 4926 msg[3] = 0; 4927 msg[4] = LE_32(1); 4928 pwrk->ptr = NULL; 4929 pwrk->abt_htag = PMCS_ABT_HTAG_ALL; 4930 pptr->abort_all_start = gethrtime(); 4931 } else { 4932 msg[3] = LE_32(tag); 4933 msg[4] = 0; 4934 pwrk->abt_htag = tag; 4935 } 4936 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4937 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4938 if (ptr == NULL) { 4939 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4940 pmcs_pwork(pwp, pwrk); 4941 pptr->abort_all_start = 0; 4942 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 4943 return (ENOMEM); 4944 } 4945 4946 COPY_MESSAGE(ptr, msg, 5); 4947 if (all_cmds) { 4948 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4949 "%s: aborting all commands for %s device %s. (htag=0x%x)", 4950 __func__, pmcs_get_typename(pptr->dtype), pptr->path, 4951 msg[1]); 4952 } else { 4953 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4954 "%s: aborting tag 0x%x for %s device %s. (htag=0x%x)", 4955 __func__, tag, pmcs_get_typename(pptr->dtype), pptr->path, 4956 msg[1]); 4957 } 4958 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4959 4960 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4961 if (!wait) { 4962 mutex_exit(&pwrk->lock); 4963 return (0); 4964 } 4965 4966 abt_htag = pwrk->htag; 4967 pmcs_unlock_phy(pptr); 4968 WAIT_FOR(pwrk, 1000, result); 4969 pmcs_pwork(pwp, pwrk); 4970 pmcs_lock_phy(pptr); 4971 tgt = pptr->target; 4972 4973 if (all_cmds) { 4974 pptr->abort_all_start = 0; 4975 cv_signal(&pptr->abort_all_cv); 4976 } 4977 4978 if (result) { 4979 if (all_cmds) { 4980 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4981 "%s: Abort all request timed out", __func__); 4982 } else { 4983 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4984 "%s: Abort (htag 0x%08x) request timed out", 4985 __func__, abt_htag); 4986 } 4987 if (tgt != NULL) { 4988 mutex_enter(&tgt->statlock); 4989 if ((tgt->dev_state != PMCS_DEVICE_STATE_IN_RECOVERY) && 4990 (tgt->dev_state != 4991 PMCS_DEVICE_STATE_NON_OPERATIONAL)) { 4992 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4993 "%s: Trying DS error recovery for tgt 0x%p", 4994 __func__, (void *)tgt); 4995 (void) pmcs_send_err_recovery_cmd(pwp, 4996 PMCS_DEVICE_STATE_IN_RECOVERY, pptr, tgt); 4997 } 4998 mutex_exit(&tgt->statlock); 4999 } 5000 return (ETIMEDOUT); 5001 } 5002 5003 status = LE_32(msg[2]); 5004 if (status != PMCOUT_STATUS_OK) { 5005 /* 5006 * The only non-success status are IO_NOT_VALID & 5007 * IO_ABORT_IN_PROGRESS. 5008 * In case of IO_ABORT_IN_PROGRESS, the other ABORT cmd's 5009 * status is of concern and this duplicate cmd status can 5010 * be ignored. 5011 * If IO_NOT_VALID, that's not an error per-se. 5012 * For abort of single I/O complete the command anyway. 5013 * If, however, we were aborting all, that is a problem 5014 * as IO_NOT_VALID really means that the IO or device is 5015 * not there. So, discovery process will take of the cleanup. 5016 */ 5017 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5018 "%s: abort result 0x%x", __func__, LE_32(msg[2])); 5019 if (all_cmds) { 5020 PHY_CHANGED(pwp, pptr); 5021 RESTART_DISCOVERY(pwp); 5022 } else { 5023 return (EINVAL); 5024 } 5025 5026 return (0); 5027 } 5028 5029 if (tgt != NULL) { 5030 mutex_enter(&tgt->statlock); 5031 if (tgt->dev_state == PMCS_DEVICE_STATE_IN_RECOVERY) { 5032 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5033 "%s: Restoring OPERATIONAL dev_state for tgt 0x%p", 5034 __func__, (void *)tgt); 5035 (void) pmcs_send_err_recovery_cmd(pwp, 5036 PMCS_DEVICE_STATE_OPERATIONAL, pptr, tgt); 5037 } 5038 mutex_exit(&tgt->statlock); 5039 } 5040 5041 return (0); 5042 } 5043 5044 /* 5045 * Issue a task management function to an SSP device. 5046 * 5047 * Called with PHY lock held. 5048 * statlock CANNOT be held upon entry. 5049 */ 5050 int 5051 pmcs_ssp_tmf(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t tmf, uint32_t tag, 5052 uint64_t lun, uint32_t *response) 5053 { 5054 int result, ds; 5055 uint8_t local[PMCS_QENTRY_SIZE << 1], *xd; 5056 sas_ssp_rsp_iu_t *rptr = (void *)local; 5057 static const uint8_t ssp_rsp_evec[] = { 5058 0x58, 0x61, 0x56, 0x72, 0x00 5059 }; 5060 uint32_t msg[PMCS_MSG_SIZE], *ptr, status; 5061 struct pmcwork *pwrk; 5062 pmcs_xscsi_t *xp; 5063 5064 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 5065 if (pwrk == NULL) { 5066 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 5067 return (ENOMEM); 5068 } 5069 /* 5070 * NB: We use the PMCS_OQ_GENERAL outbound queue 5071 * NB: so as to not get entangled in normal I/O 5072 * NB: processing. 5073 */ 5074 pwrk->htag |= PMCS_TAG_NONIO_CMD; 5075 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 5076 PMCIN_SSP_INI_TM_START)); 5077 msg[1] = LE_32(pwrk->htag); 5078 msg[2] = LE_32(pptr->device_id); 5079 if (tmf == SAS_ABORT_TASK || tmf == SAS_QUERY_TASK) { 5080 msg[3] = LE_32(tag); 5081 } else { 5082 msg[3] = 0; 5083 } 5084 msg[4] = LE_32(tmf); 5085 msg[5] = BE_32((uint32_t)lun); 5086 msg[6] = BE_32((uint32_t)(lun >> 32)); 5087 msg[7] = LE_32(PMCIN_MESSAGE_REPORT); 5088 5089 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5090 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5091 if (ptr == NULL) { 5092 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5093 pmcs_pwork(pwp, pwrk); 5094 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 5095 return (ENOMEM); 5096 } 5097 COPY_MESSAGE(ptr, msg, 7); 5098 pwrk->arg = msg; 5099 pwrk->dtype = pptr->dtype; 5100 xp = pptr->target; 5101 pwrk->xp = xp; 5102 5103 if (xp != NULL) { 5104 mutex_enter(&xp->statlock); 5105 if (xp->dev_state == PMCS_DEVICE_STATE_NON_OPERATIONAL) { 5106 mutex_exit(&xp->statlock); 5107 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5108 pmcs_pwork(pwp, pwrk); 5109 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: Not " 5110 "sending '%s' because DS is '%s'", __func__, 5111 pmcs_tmf2str(tmf), pmcs_status_str 5112 (PMCOUT_STATUS_IO_DS_NON_OPERATIONAL)); 5113 return (EIO); 5114 } 5115 mutex_exit(&xp->statlock); 5116 } 5117 5118 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5119 "%s: sending '%s' to %s (lun %llu) tag 0x%x", __func__, 5120 pmcs_tmf2str(tmf), pptr->path, (unsigned long long) lun, tag); 5121 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5122 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5123 5124 pmcs_unlock_phy(pptr); 5125 /* 5126 * This is a command sent to the target device, so it can take 5127 * significant amount of time to complete when path & device is busy. 5128 * Set a timeout to 20 seconds 5129 */ 5130 WAIT_FOR(pwrk, 20000, result); 5131 pmcs_pwork(pwp, pwrk); 5132 pmcs_lock_phy(pptr); 5133 xp = pptr->target; 5134 5135 if (result) { 5136 if (xp == NULL) { 5137 return (ETIMEDOUT); 5138 } 5139 5140 mutex_enter(&xp->statlock); 5141 pmcs_start_dev_state_recovery(xp, pptr); 5142 mutex_exit(&xp->statlock); 5143 return (ETIMEDOUT); 5144 } 5145 5146 status = LE_32(msg[2]); 5147 if (status != PMCOUT_STATUS_OK) { 5148 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5149 "%s: status %s for TMF %s action to %s, lun %llu", 5150 __func__, pmcs_status_str(status), pmcs_tmf2str(tmf), 5151 pptr->path, (unsigned long long) lun); 5152 if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) || 5153 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) || 5154 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) { 5155 ds = PMCS_DEVICE_STATE_NON_OPERATIONAL; 5156 } else if (status == PMCOUT_STATUS_IO_DS_IN_RECOVERY) { 5157 /* 5158 * If the status is IN_RECOVERY, it's an indication 5159 * that it's now time for us to request to have the 5160 * device state set to OPERATIONAL since we're the ones 5161 * that requested recovery to begin with. 5162 */ 5163 ds = PMCS_DEVICE_STATE_OPERATIONAL; 5164 } else { 5165 ds = PMCS_DEVICE_STATE_IN_RECOVERY; 5166 } 5167 if (xp != NULL) { 5168 mutex_enter(&xp->statlock); 5169 if (xp->dev_state != ds) { 5170 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5171 "%s: Sending err recovery cmd" 5172 " for tgt 0x%p (status = %s)", 5173 __func__, (void *)xp, 5174 pmcs_status_str(status)); 5175 (void) pmcs_send_err_recovery_cmd(pwp, ds, 5176 pptr, xp); 5177 } 5178 mutex_exit(&xp->statlock); 5179 } 5180 return (EIO); 5181 } else { 5182 ds = PMCS_DEVICE_STATE_OPERATIONAL; 5183 if (xp != NULL) { 5184 mutex_enter(&xp->statlock); 5185 if (xp->dev_state != ds) { 5186 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5187 "%s: Sending err recovery cmd" 5188 " for tgt 0x%p (status = %s)", 5189 __func__, (void *)xp, 5190 pmcs_status_str(status)); 5191 (void) pmcs_send_err_recovery_cmd(pwp, ds, 5192 pptr, xp); 5193 } 5194 mutex_exit(&xp->statlock); 5195 } 5196 } 5197 if (LE_32(msg[3]) == 0) { 5198 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5199 "TMF completed with no response"); 5200 return (EIO); 5201 } 5202 pmcs_endian_transform(pwp, local, &msg[5], ssp_rsp_evec); 5203 xd = (uint8_t *)(&msg[5]); 5204 xd += SAS_RSP_HDR_SIZE; 5205 if (rptr->datapres != SAS_RSP_DATAPRES_RESPONSE_DATA) { 5206 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5207 "%s: TMF response not RESPONSE DATA (0x%x)", 5208 __func__, rptr->datapres); 5209 return (EIO); 5210 } 5211 if (rptr->response_data_length != 4) { 5212 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 5213 "Bad SAS RESPONSE DATA LENGTH", msg); 5214 return (EIO); 5215 } 5216 (void) memcpy(&status, xd, sizeof (uint32_t)); 5217 status = BE_32(status); 5218 if (response != NULL) 5219 *response = status; 5220 /* 5221 * The status is actually in the low-order byte. The upper three 5222 * bytes contain additional information for the TMFs that support them. 5223 * However, at this time we do not issue any of those. In the other 5224 * cases, the upper three bytes are supposed to be 0, but it appears 5225 * they aren't always. Just mask them off. 5226 */ 5227 switch (status & 0xff) { 5228 case SAS_RSP_TMF_COMPLETE: 5229 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5230 "%s: TMF complete", __func__); 5231 result = 0; 5232 break; 5233 case SAS_RSP_TMF_SUCCEEDED: 5234 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5235 "%s: TMF succeeded", __func__); 5236 result = 0; 5237 break; 5238 case SAS_RSP_INVALID_FRAME: 5239 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5240 "%s: TMF returned INVALID FRAME", __func__); 5241 result = EIO; 5242 break; 5243 case SAS_RSP_TMF_NOT_SUPPORTED: 5244 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5245 "%s: TMF returned TMF NOT SUPPORTED", __func__); 5246 result = EIO; 5247 break; 5248 case SAS_RSP_TMF_FAILED: 5249 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5250 "%s: TMF returned TMF FAILED", __func__); 5251 result = EIO; 5252 break; 5253 case SAS_RSP_TMF_INCORRECT_LUN: 5254 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5255 "%s: TMF returned INCORRECT LUN", __func__); 5256 result = EIO; 5257 break; 5258 case SAS_RSP_OVERLAPPED_OIPTTA: 5259 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5260 "%s: TMF returned OVERLAPPED INITIATOR PORT TRANSFER TAG " 5261 "ATTEMPTED", __func__); 5262 result = EIO; 5263 break; 5264 default: 5265 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5266 "%s: TMF returned unknown code 0x%x", __func__, status); 5267 result = EIO; 5268 break; 5269 } 5270 return (result); 5271 } 5272 5273 /* 5274 * Called with PHY lock held and scratch acquired 5275 */ 5276 int 5277 pmcs_sata_abort_ncq(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 5278 { 5279 const char *utag_fail_fmt = "%s: untagged NCQ command failure"; 5280 const char *tag_fail_fmt = "%s: NCQ command failure (tag 0x%x)"; 5281 uint32_t msg[PMCS_QENTRY_SIZE], *ptr, result, status; 5282 uint8_t *fp = pwp->scratch, ds; 5283 fis_t fis; 5284 pmcwork_t *pwrk; 5285 pmcs_xscsi_t *tgt; 5286 5287 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 5288 if (pwrk == NULL) { 5289 return (ENOMEM); 5290 } 5291 pwrk->htag |= PMCS_TAG_NONIO_CMD; 5292 msg[0] = LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, 5293 PMCIN_SATA_HOST_IO_START)); 5294 msg[1] = LE_32(pwrk->htag); 5295 msg[2] = LE_32(pptr->device_id); 5296 msg[3] = LE_32(512); 5297 msg[4] = LE_32(SATA_PROTOCOL_PIO | PMCIN_DATADIR_2_INI); 5298 msg[5] = LE_32((READ_LOG_EXT << 16) | (C_BIT << 8) | FIS_REG_H2DEV); 5299 msg[6] = LE_32(0x10); 5300 msg[8] = LE_32(1); 5301 msg[9] = 0; 5302 msg[10] = 0; 5303 msg[11] = 0; 5304 msg[12] = LE_32(DWORD0(pwp->scratch_dma)); 5305 msg[13] = LE_32(DWORD1(pwp->scratch_dma)); 5306 msg[14] = LE_32(512); 5307 msg[15] = 0; 5308 5309 pwrk->arg = msg; 5310 pwrk->dtype = pptr->dtype; 5311 pwrk->xp = pptr->target; 5312 5313 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5314 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5315 if (ptr == NULL) { 5316 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5317 pmcs_pwork(pwp, pwrk); 5318 return (ENOMEM); 5319 } 5320 COPY_MESSAGE(ptr, msg, PMCS_QENTRY_SIZE); 5321 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5322 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5323 5324 pmcs_unlock_phy(pptr); 5325 WAIT_FOR(pwrk, 250, result); 5326 pmcs_pwork(pwp, pwrk); 5327 pmcs_lock_phy(pptr); 5328 5329 tgt = pptr->target; 5330 if (result) { 5331 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, pmcs_timeo, __func__); 5332 return (EIO); 5333 } 5334 status = LE_32(msg[2]); 5335 if (status != PMCOUT_STATUS_OK || LE_32(msg[3])) { 5336 if (tgt == NULL) { 5337 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5338 "%s: cannot find target for phy 0x%p for " 5339 "dev state recovery", __func__, (void *)pptr); 5340 return (EIO); 5341 } 5342 5343 mutex_enter(&tgt->statlock); 5344 5345 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, "READ LOG EXT", msg); 5346 if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) || 5347 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) || 5348 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) { 5349 ds = PMCS_DEVICE_STATE_NON_OPERATIONAL; 5350 } else { 5351 ds = PMCS_DEVICE_STATE_IN_RECOVERY; 5352 } 5353 if (tgt->dev_state != ds) { 5354 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, "%s: Trying " 5355 "SATA DS Recovery for tgt(0x%p) for status(%s)", 5356 __func__, (void *)tgt, pmcs_status_str(status)); 5357 (void) pmcs_send_err_recovery_cmd(pwp, ds, pptr, tgt); 5358 } 5359 5360 mutex_exit(&tgt->statlock); 5361 return (EIO); 5362 } 5363 fis[0] = (fp[4] << 24) | (fp[3] << 16) | (fp[2] << 8) | FIS_REG_D2H; 5364 fis[1] = (fp[8] << 24) | (fp[7] << 16) | (fp[6] << 8) | fp[5]; 5365 fis[2] = (fp[12] << 24) | (fp[11] << 16) | (fp[10] << 8) | fp[9]; 5366 fis[3] = (fp[16] << 24) | (fp[15] << 16) | (fp[14] << 8) | fp[13]; 5367 fis[4] = 0; 5368 if (fp[0] & 0x80) { 5369 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5370 utag_fail_fmt, __func__); 5371 } else { 5372 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5373 tag_fail_fmt, __func__, fp[0] & 0x1f); 5374 } 5375 pmcs_fis_dump(pwp, fis); 5376 pptr->need_rl_ext = 0; 5377 return (0); 5378 } 5379 5380 /* 5381 * Transform a structure from CPU to Device endian format, or 5382 * vice versa, based upon a transformation vector. 5383 * 5384 * A transformation vector is an array of bytes, each byte 5385 * of which is defined thusly: 5386 * 5387 * bit 7: from CPU to desired endian, otherwise from desired endian 5388 * to CPU format 5389 * bit 6: Big Endian, else Little Endian 5390 * bits 5-4: 5391 * 00 Undefined 5392 * 01 One Byte quantities 5393 * 02 Two Byte quantities 5394 * 03 Four Byte quantities 5395 * 5396 * bits 3-0: 5397 * 00 Undefined 5398 * Number of quantities to transform 5399 * 5400 * The vector is terminated by a 0 value. 5401 */ 5402 5403 void 5404 pmcs_endian_transform(pmcs_hw_t *pwp, void *orig_out, void *orig_in, 5405 const uint8_t *xfvec) 5406 { 5407 uint8_t c, *out = orig_out, *in = orig_in; 5408 5409 if (xfvec == NULL) { 5410 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5411 "%s: null xfvec", __func__); 5412 return; 5413 } 5414 if (out == NULL) { 5415 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5416 "%s: null out", __func__); 5417 return; 5418 } 5419 if (in == NULL) { 5420 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5421 "%s: null in", __func__); 5422 return; 5423 } 5424 while ((c = *xfvec++) != 0) { 5425 int nbyt = (c & 0xf); 5426 int size = (c >> 4) & 0x3; 5427 int bige = (c >> 4) & 0x4; 5428 5429 switch (size) { 5430 case 1: 5431 { 5432 while (nbyt-- > 0) { 5433 *out++ = *in++; 5434 } 5435 break; 5436 } 5437 case 2: 5438 { 5439 uint16_t tmp; 5440 while (nbyt-- > 0) { 5441 (void) memcpy(&tmp, in, sizeof (uint16_t)); 5442 if (bige) { 5443 tmp = BE_16(tmp); 5444 } else { 5445 tmp = LE_16(tmp); 5446 } 5447 (void) memcpy(out, &tmp, sizeof (uint16_t)); 5448 out += sizeof (uint16_t); 5449 in += sizeof (uint16_t); 5450 } 5451 break; 5452 } 5453 case 3: 5454 { 5455 uint32_t tmp; 5456 while (nbyt-- > 0) { 5457 (void) memcpy(&tmp, in, sizeof (uint32_t)); 5458 if (bige) { 5459 tmp = BE_32(tmp); 5460 } else { 5461 tmp = LE_32(tmp); 5462 } 5463 (void) memcpy(out, &tmp, sizeof (uint32_t)); 5464 out += sizeof (uint32_t); 5465 in += sizeof (uint32_t); 5466 } 5467 break; 5468 } 5469 default: 5470 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5471 "%s: bad size", __func__); 5472 return; 5473 } 5474 } 5475 } 5476 5477 const char * 5478 pmcs_get_rate(unsigned int linkrt) 5479 { 5480 const char *rate; 5481 switch (linkrt) { 5482 case SAS_LINK_RATE_1_5GBIT: 5483 rate = "1.5"; 5484 break; 5485 case SAS_LINK_RATE_3GBIT: 5486 rate = "3.0"; 5487 break; 5488 case SAS_LINK_RATE_6GBIT: 5489 rate = "6.0"; 5490 break; 5491 default: 5492 rate = "???"; 5493 break; 5494 } 5495 return (rate); 5496 } 5497 5498 const char * 5499 pmcs_get_typename(pmcs_dtype_t type) 5500 { 5501 switch (type) { 5502 case NOTHING: 5503 return ("NIL"); 5504 case SATA: 5505 return ("SATA"); 5506 case SAS: 5507 return ("SSP"); 5508 case EXPANDER: 5509 return ("EXPANDER"); 5510 } 5511 return ("????"); 5512 } 5513 5514 const char * 5515 pmcs_tmf2str(int tmf) 5516 { 5517 switch (tmf) { 5518 case SAS_ABORT_TASK: 5519 return ("Abort Task"); 5520 case SAS_ABORT_TASK_SET: 5521 return ("Abort Task Set"); 5522 case SAS_CLEAR_TASK_SET: 5523 return ("Clear Task Set"); 5524 case SAS_LOGICAL_UNIT_RESET: 5525 return ("Logical Unit Reset"); 5526 case SAS_I_T_NEXUS_RESET: 5527 return ("I_T Nexus Reset"); 5528 case SAS_CLEAR_ACA: 5529 return ("Clear ACA"); 5530 case SAS_QUERY_TASK: 5531 return ("Query Task"); 5532 case SAS_QUERY_TASK_SET: 5533 return ("Query Task Set"); 5534 case SAS_QUERY_UNIT_ATTENTION: 5535 return ("Query Unit Attention"); 5536 default: 5537 return ("Unknown"); 5538 } 5539 } 5540 5541 const char * 5542 pmcs_status_str(uint32_t status) 5543 { 5544 switch (status) { 5545 case PMCOUT_STATUS_OK: 5546 return ("OK"); 5547 case PMCOUT_STATUS_ABORTED: 5548 return ("ABORTED"); 5549 case PMCOUT_STATUS_OVERFLOW: 5550 return ("OVERFLOW"); 5551 case PMCOUT_STATUS_UNDERFLOW: 5552 return ("UNDERFLOW"); 5553 case PMCOUT_STATUS_FAILED: 5554 return ("FAILED"); 5555 case PMCOUT_STATUS_ABORT_RESET: 5556 return ("ABORT_RESET"); 5557 case PMCOUT_STATUS_IO_NOT_VALID: 5558 return ("IO_NOT_VALID"); 5559 case PMCOUT_STATUS_NO_DEVICE: 5560 return ("NO_DEVICE"); 5561 case PMCOUT_STATUS_ILLEGAL_PARAMETER: 5562 return ("ILLEGAL_PARAMETER"); 5563 case PMCOUT_STATUS_LINK_FAILURE: 5564 return ("LINK_FAILURE"); 5565 case PMCOUT_STATUS_PROG_ERROR: 5566 return ("PROG_ERROR"); 5567 case PMCOUT_STATUS_EDC_IN_ERROR: 5568 return ("EDC_IN_ERROR"); 5569 case PMCOUT_STATUS_EDC_OUT_ERROR: 5570 return ("EDC_OUT_ERROR"); 5571 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 5572 return ("ERROR_HW_TIMEOUT"); 5573 case PMCOUT_STATUS_XFER_ERR_BREAK: 5574 return ("XFER_ERR_BREAK"); 5575 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 5576 return ("XFER_ERR_PHY_NOT_READY"); 5577 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 5578 return ("OPEN_CNX_PROTOCOL_NOT_SUPPORTED"); 5579 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 5580 return ("OPEN_CNX_ERROR_ZONE_VIOLATION"); 5581 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 5582 return ("OPEN_CNX_ERROR_BREAK"); 5583 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 5584 return ("OPEN_CNX_ERROR_IT_NEXUS_LOSS"); 5585 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 5586 return ("OPENCNX_ERROR_BAD_DESTINATION"); 5587 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 5588 return ("OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED"); 5589 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 5590 return ("OPEN_CNX_ERROR_STP_RESOURCES_BUSY"); 5591 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 5592 return ("OPEN_CNX_ERROR_WRONG_DESTINATION"); 5593 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 5594 return ("OPEN_CNX_ERROR_UNKNOWN_ERROR"); 5595 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 5596 return ("IO_XFER_ERROR_NAK_RECEIVED"); 5597 case PMCOUT_STATUS_XFER_ERROR_ACK_NAK_TIMEOUT: 5598 return ("XFER_ERROR_ACK_NAK_TIMEOUT"); 5599 case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED: 5600 return ("XFER_ERROR_PEER_ABORTED"); 5601 case PMCOUT_STATUS_XFER_ERROR_RX_FRAME: 5602 return ("XFER_ERROR_RX_FRAME"); 5603 case PMCOUT_STATUS_IO_XFER_ERROR_DMA: 5604 return ("IO_XFER_ERROR_DMA"); 5605 case PMCOUT_STATUS_XFER_ERROR_CREDIT_TIMEOUT: 5606 return ("XFER_ERROR_CREDIT_TIMEOUT"); 5607 case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT: 5608 return ("XFER_ERROR_SATA_LINK_TIMEOUT"); 5609 case PMCOUT_STATUS_XFER_ERROR_SATA: 5610 return ("XFER_ERROR_SATA"); 5611 case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE: 5612 return ("XFER_ERROR_REJECTED_NCQ_MODE"); 5613 case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST: 5614 return ("XFER_ERROR_ABORTED_DUE_TO_SRST"); 5615 case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE: 5616 return ("XFER_ERROR_ABORTED_NCQ_MODE"); 5617 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 5618 return ("IO_XFER_OPEN_RETRY_TIMEOUT"); 5619 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 5620 return ("SMP_RESP_CONNECTION_ERROR"); 5621 case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE: 5622 return ("XFER_ERROR_UNEXPECTED_PHASE"); 5623 case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN: 5624 return ("XFER_ERROR_RDY_OVERRUN"); 5625 case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED: 5626 return ("XFER_ERROR_RDY_NOT_EXPECTED"); 5627 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 5628 return ("XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT"); 5629 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK: 5630 return ("XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK"); 5631 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK: 5632 return ("XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK"); 5633 case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH: 5634 return ("XFER_ERROR_OFFSET_MISMATCH"); 5635 case PMCOUT_STATUS_XFER_ERROR_ZERO_DATA_LEN: 5636 return ("XFER_ERROR_ZERO_DATA_LEN"); 5637 case PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED: 5638 return ("XFER_CMD_FRAME_ISSUED"); 5639 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 5640 return ("ERROR_INTERNAL_SMP_RESOURCE"); 5641 case PMCOUT_STATUS_IO_PORT_IN_RESET: 5642 return ("IO_PORT_IN_RESET"); 5643 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 5644 return ("DEVICE STATE NON-OPERATIONAL"); 5645 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 5646 return ("DEVICE STATE IN RECOVERY"); 5647 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 5648 return ("OPEN CNX ERR HW RESOURCE BUSY"); 5649 default: 5650 return (NULL); 5651 } 5652 } 5653 5654 uint64_t 5655 pmcs_barray2wwn(uint8_t ba[8]) 5656 { 5657 uint64_t result = 0; 5658 int i; 5659 5660 for (i = 0; i < 8; i++) { 5661 result <<= 8; 5662 result |= ba[i]; 5663 } 5664 return (result); 5665 } 5666 5667 void 5668 pmcs_wwn2barray(uint64_t wwn, uint8_t ba[8]) 5669 { 5670 int i; 5671 for (i = 0; i < 8; i++) { 5672 ba[7 - i] = wwn & 0xff; 5673 wwn >>= 8; 5674 } 5675 } 5676 5677 void 5678 pmcs_report_fwversion(pmcs_hw_t *pwp) 5679 { 5680 const char *fwsupport; 5681 switch (PMCS_FW_TYPE(pwp)) { 5682 case PMCS_FW_TYPE_RELEASED: 5683 fwsupport = "Released"; 5684 break; 5685 case PMCS_FW_TYPE_DEVELOPMENT: 5686 fwsupport = "Development"; 5687 break; 5688 case PMCS_FW_TYPE_ALPHA: 5689 fwsupport = "Alpha"; 5690 break; 5691 case PMCS_FW_TYPE_BETA: 5692 fwsupport = "Beta"; 5693 break; 5694 default: 5695 fwsupport = "Special"; 5696 break; 5697 } 5698 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5699 "Chip Revision: %c; F/W Revision %x.%x.%x %s (ILA rev %08x)", 5700 'A' + pwp->chiprev, PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), 5701 PMCS_FW_MICRO(pwp), fwsupport, pwp->ila_ver); 5702 } 5703 5704 void 5705 pmcs_phy_name(pmcs_hw_t *pwp, pmcs_phy_t *pptr, char *obuf, size_t olen) 5706 { 5707 if (pptr->parent) { 5708 pmcs_phy_name(pwp, pptr->parent, obuf, olen); 5709 (void) snprintf(obuf, olen, "%s.%02x", obuf, pptr->phynum); 5710 } else { 5711 (void) snprintf(obuf, olen, "pp%02x", pptr->phynum); 5712 } 5713 } 5714 5715 /* 5716 * This function is called as a sanity check to ensure that a newly registered 5717 * PHY doesn't have a device_id that exists with another registered PHY. 5718 */ 5719 static boolean_t 5720 pmcs_validate_devid(pmcs_phy_t *parent, pmcs_phy_t *phyp, uint32_t device_id) 5721 { 5722 pmcs_phy_t *pptr, *pchild; 5723 boolean_t rval; 5724 5725 pptr = parent; 5726 5727 while (pptr) { 5728 if (pptr->valid_device_id && (pptr != phyp) && 5729 (pptr->device_id == device_id)) { 5730 /* 5731 * This can still be OK if both of these PHYs actually 5732 * represent the same device (e.g. expander). It could 5733 * be a case of a new "primary" PHY. If the SAS address 5734 * is the same and they have the same parent, we'll 5735 * accept this if the PHY to be registered is the 5736 * primary. 5737 */ 5738 if ((phyp->parent == pptr->parent) && 5739 (memcmp(phyp->sas_address, 5740 pptr->sas_address, 8) == 0) && (phyp->width > 1)) { 5741 /* 5742 * Move children over to the new primary and 5743 * update both PHYs 5744 */ 5745 pmcs_lock_phy(pptr); 5746 phyp->children = pptr->children; 5747 pchild = phyp->children; 5748 while (pchild) { 5749 pchild->parent = phyp; 5750 pchild = pchild->sibling; 5751 } 5752 phyp->subsidiary = 0; 5753 phyp->ncphy = pptr->ncphy; 5754 /* 5755 * device_id, valid_device_id, and configured 5756 * will be set by the caller 5757 */ 5758 pptr->children = NULL; 5759 pptr->subsidiary = 1; 5760 pptr->ncphy = 0; 5761 pmcs_unlock_phy(pptr); 5762 pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL, 5763 "%s: Moving device_id %d from PHY %s to %s", 5764 __func__, device_id, pptr->path, 5765 phyp->path); 5766 return (B_TRUE); 5767 } 5768 pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL, 5769 "%s: phy %s already exists as %s with " 5770 "device id 0x%x", __func__, phyp->path, 5771 pptr->path, device_id); 5772 return (B_FALSE); 5773 } 5774 5775 if (pptr->children) { 5776 rval = pmcs_validate_devid(pptr->children, phyp, 5777 device_id); 5778 if (rval == B_FALSE) { 5779 return (rval); 5780 } 5781 } 5782 5783 pptr = pptr->sibling; 5784 } 5785 5786 /* This PHY and device_id are valid */ 5787 return (B_TRUE); 5788 } 5789 5790 /* 5791 * If the PHY is found, it is returned locked 5792 */ 5793 static pmcs_phy_t * 5794 pmcs_find_phy_by_wwn_impl(pmcs_phy_t *phyp, uint8_t *wwn) 5795 { 5796 pmcs_phy_t *matched_phy, *cphyp, *nphyp; 5797 5798 ASSERT(!mutex_owned(&phyp->phy_lock)); 5799 5800 while (phyp) { 5801 pmcs_lock_phy(phyp); 5802 5803 if (phyp->valid_device_id) { 5804 if (memcmp(phyp->sas_address, wwn, 8) == 0) { 5805 return (phyp); 5806 } 5807 } 5808 5809 if (phyp->children) { 5810 cphyp = phyp->children; 5811 pmcs_unlock_phy(phyp); 5812 matched_phy = pmcs_find_phy_by_wwn_impl(cphyp, wwn); 5813 if (matched_phy) { 5814 ASSERT(mutex_owned(&matched_phy->phy_lock)); 5815 return (matched_phy); 5816 } 5817 pmcs_lock_phy(phyp); 5818 } 5819 5820 /* 5821 * Only iterate through non-root PHYs 5822 */ 5823 if (IS_ROOT_PHY(phyp)) { 5824 pmcs_unlock_phy(phyp); 5825 phyp = NULL; 5826 } else { 5827 nphyp = phyp->sibling; 5828 pmcs_unlock_phy(phyp); 5829 phyp = nphyp; 5830 } 5831 } 5832 5833 return (NULL); 5834 } 5835 5836 pmcs_phy_t * 5837 pmcs_find_phy_by_wwn(pmcs_hw_t *pwp, uint64_t wwn) 5838 { 5839 uint8_t ebstr[8]; 5840 pmcs_phy_t *pptr, *matched_phy; 5841 5842 pmcs_wwn2barray(wwn, ebstr); 5843 5844 pptr = pwp->root_phys; 5845 while (pptr) { 5846 matched_phy = pmcs_find_phy_by_wwn_impl(pptr, ebstr); 5847 if (matched_phy) { 5848 ASSERT(mutex_owned(&matched_phy->phy_lock)); 5849 return (matched_phy); 5850 } 5851 5852 pptr = pptr->sibling; 5853 } 5854 5855 return (NULL); 5856 } 5857 5858 5859 /* 5860 * pmcs_find_phy_by_sas_address 5861 * 5862 * Find a PHY that both matches "sas_addr" and is on "iport". 5863 * If a matching PHY is found, it is returned locked. 5864 */ 5865 pmcs_phy_t * 5866 pmcs_find_phy_by_sas_address(pmcs_hw_t *pwp, pmcs_iport_t *iport, 5867 pmcs_phy_t *root, char *sas_addr) 5868 { 5869 int ua_form = 1; 5870 uint64_t wwn; 5871 char addr[PMCS_MAX_UA_SIZE]; 5872 pmcs_phy_t *pptr, *pnext, *pchild; 5873 5874 if (root == NULL) { 5875 pptr = pwp->root_phys; 5876 } else { 5877 pptr = root; 5878 } 5879 5880 while (pptr) { 5881 pmcs_lock_phy(pptr); 5882 /* 5883 * If the PHY is dead or does not have a valid device ID, 5884 * skip it. 5885 */ 5886 if ((pptr->dead) || (!pptr->valid_device_id)) { 5887 goto next_phy; 5888 } 5889 5890 if (pptr->iport != iport) { 5891 goto next_phy; 5892 } 5893 5894 wwn = pmcs_barray2wwn(pptr->sas_address); 5895 (void *) scsi_wwn_to_wwnstr(wwn, ua_form, addr); 5896 if (strncmp(addr, sas_addr, strlen(addr)) == 0) { 5897 return (pptr); 5898 } 5899 5900 if (pptr->children) { 5901 pchild = pptr->children; 5902 pmcs_unlock_phy(pptr); 5903 pnext = pmcs_find_phy_by_sas_address(pwp, iport, pchild, 5904 sas_addr); 5905 if (pnext) { 5906 return (pnext); 5907 } 5908 pmcs_lock_phy(pptr); 5909 } 5910 5911 next_phy: 5912 pnext = pptr->sibling; 5913 pmcs_unlock_phy(pptr); 5914 pptr = pnext; 5915 } 5916 5917 return (NULL); 5918 } 5919 5920 void 5921 pmcs_fis_dump(pmcs_hw_t *pwp, fis_t fis) 5922 { 5923 switch (fis[0] & 0xff) { 5924 case FIS_REG_H2DEV: 5925 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5926 "FIS REGISTER HOST TO DEVICE: " 5927 "OP=0x%02x Feature=0x%04x Count=0x%04x Device=0x%02x " 5928 "LBA=%llu", BYTE2(fis[0]), BYTE3(fis[2]) << 8 | 5929 BYTE3(fis[0]), WORD0(fis[3]), BYTE3(fis[1]), 5930 (unsigned long long) 5931 (((uint64_t)fis[2] & 0x00ffffff) << 24 | 5932 ((uint64_t)fis[1] & 0x00ffffff))); 5933 break; 5934 case FIS_REG_D2H: 5935 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5936 "FIS REGISTER DEVICE TO HOST: Status=0x%02x " 5937 "Error=0x%02x Dev=0x%02x Count=0x%04x LBA=%llu", 5938 BYTE2(fis[0]), BYTE3(fis[0]), BYTE3(fis[1]), WORD0(fis[3]), 5939 (unsigned long long)(((uint64_t)fis[2] & 0x00ffffff) << 24 | 5940 ((uint64_t)fis[1] & 0x00ffffff))); 5941 break; 5942 default: 5943 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5944 "FIS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 5945 fis[0], fis[1], fis[2], fis[3], fis[4]); 5946 break; 5947 } 5948 } 5949 5950 void 5951 pmcs_print_entry(pmcs_hw_t *pwp, int level, char *msg, void *arg) 5952 { 5953 uint32_t *mb = arg; 5954 size_t i; 5955 5956 pmcs_prt(pwp, level, NULL, NULL, msg); 5957 for (i = 0; i < (PMCS_QENTRY_SIZE / sizeof (uint32_t)); i += 4) { 5958 pmcs_prt(pwp, level, NULL, NULL, 5959 "Offset %2lu: 0x%08x 0x%08x 0x%08x 0x%08x", 5960 i * sizeof (uint32_t), LE_32(mb[i]), 5961 LE_32(mb[i+1]), LE_32(mb[i+2]), LE_32(mb[i+3])); 5962 } 5963 } 5964 5965 /* 5966 * If phyp == NULL we're being called from the worker thread, in which 5967 * case we need to check all the PHYs. In this case, the softstate lock 5968 * will be held. 5969 * If phyp is non-NULL, just issue the spinup release for the specified PHY 5970 * (which will already be locked). 5971 */ 5972 void 5973 pmcs_spinup_release(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 5974 { 5975 uint32_t *msg; 5976 struct pmcwork *pwrk; 5977 pmcs_phy_t *tphyp; 5978 5979 if (phyp != NULL) { 5980 ASSERT(mutex_owned(&phyp->phy_lock)); 5981 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL, 5982 "%s: Issuing spinup release only for PHY %s", __func__, 5983 phyp->path); 5984 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5985 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5986 if (msg == NULL || (pwrk = 5987 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 5988 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5989 SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE); 5990 return; 5991 } 5992 5993 phyp->spinup_hold = 0; 5994 bzero(msg, PMCS_QENTRY_SIZE); 5995 pwrk->htag |= PMCS_TAG_NONIO_CMD; 5996 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 5997 PMCIN_LOCAL_PHY_CONTROL)); 5998 msg[1] = LE_32(pwrk->htag); 5999 msg[2] = LE_32((0x10 << 8) | phyp->phynum); 6000 6001 pwrk->dtype = phyp->dtype; 6002 pwrk->state = PMCS_WORK_STATE_ONCHIP; 6003 pwrk->xp = phyp->target; 6004 mutex_exit(&pwrk->lock); 6005 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6006 return; 6007 } 6008 6009 ASSERT(mutex_owned(&pwp->lock)); 6010 6011 tphyp = pwp->root_phys; 6012 while (tphyp) { 6013 pmcs_lock_phy(tphyp); 6014 if (tphyp->spinup_hold == 0) { 6015 pmcs_unlock_phy(tphyp); 6016 tphyp = tphyp->sibling; 6017 continue; 6018 } 6019 6020 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, tphyp, NULL, 6021 "%s: Issuing spinup release for PHY %s", __func__, 6022 tphyp->path); 6023 6024 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6025 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6026 if (msg == NULL || (pwrk = 6027 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 6028 pmcs_unlock_phy(tphyp); 6029 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6030 SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE); 6031 break; 6032 } 6033 6034 tphyp->spinup_hold = 0; 6035 bzero(msg, PMCS_QENTRY_SIZE); 6036 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 6037 PMCIN_LOCAL_PHY_CONTROL)); 6038 msg[1] = LE_32(pwrk->htag); 6039 msg[2] = LE_32((0x10 << 8) | tphyp->phynum); 6040 6041 pwrk->dtype = tphyp->dtype; 6042 pwrk->state = PMCS_WORK_STATE_ONCHIP; 6043 pwrk->xp = tphyp->target; 6044 mutex_exit(&pwrk->lock); 6045 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6046 pmcs_unlock_phy(tphyp); 6047 6048 tphyp = tphyp->sibling; 6049 } 6050 } 6051 6052 /* 6053 * Abort commands on dead PHYs and deregister them as well as removing 6054 * the associated targets. 6055 */ 6056 static int 6057 pmcs_kill_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 6058 { 6059 pmcs_phy_t *pnext, *pchild; 6060 boolean_t remove_device; 6061 int rval = 0; 6062 6063 while (phyp) { 6064 pmcs_lock_phy(phyp); 6065 pchild = phyp->children; 6066 pnext = phyp->sibling; 6067 pmcs_unlock_phy(phyp); 6068 6069 if (pchild) { 6070 rval = pmcs_kill_devices(pwp, pchild); 6071 if (rval) { 6072 return (rval); 6073 } 6074 } 6075 6076 mutex_enter(&pwp->lock); 6077 pmcs_lock_phy(phyp); 6078 if (phyp->dead && phyp->valid_device_id) { 6079 remove_device = B_TRUE; 6080 } else { 6081 remove_device = B_FALSE; 6082 } 6083 6084 if (remove_device) { 6085 pmcs_remove_device(pwp, phyp); 6086 mutex_exit(&pwp->lock); 6087 6088 rval = pmcs_kill_device(pwp, phyp); 6089 if (rval) { 6090 pmcs_unlock_phy(phyp); 6091 return (rval); 6092 } 6093 } else { 6094 mutex_exit(&pwp->lock); 6095 } 6096 6097 pmcs_unlock_phy(phyp); 6098 phyp = pnext; 6099 } 6100 6101 return (rval); 6102 } 6103 6104 /* 6105 * Called with PHY locked 6106 */ 6107 int 6108 pmcs_kill_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 6109 { 6110 int rval; 6111 6112 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "kill %s device @ %s", 6113 pmcs_get_typename(pptr->dtype), pptr->path); 6114 6115 /* 6116 * There may be an outstanding ABORT_ALL running, which we wouldn't 6117 * know just by checking abort_pending. We can, however, check 6118 * abort_all_start. If it's non-zero, there is one, and we'll just 6119 * sit here and wait for it to complete. If we don't, we'll remove 6120 * the device while there are still commands pending. 6121 */ 6122 if (pptr->abort_all_start) { 6123 while (pptr->abort_all_start) { 6124 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 6125 "%s: Waiting for outstanding ABORT_ALL on PHY 0x%p", 6126 __func__, (void *)pptr); 6127 cv_wait(&pptr->abort_all_cv, &pptr->phy_lock); 6128 } 6129 } else if (pptr->abort_pending) { 6130 rval = pmcs_abort(pwp, pptr, pptr->device_id, 1, 1); 6131 if (rval) { 6132 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 6133 "%s: ABORT_ALL returned non-zero status (%d) for " 6134 "PHY 0x%p", __func__, rval, (void *)pptr); 6135 return (rval); 6136 } 6137 pptr->abort_pending = 0; 6138 } 6139 6140 if (pptr->valid_device_id) { 6141 pmcs_deregister_device(pwp, pptr); 6142 } 6143 6144 PHY_CHANGED(pwp, pptr); 6145 RESTART_DISCOVERY(pwp); 6146 pptr->valid_device_id = 0; 6147 return (0); 6148 } 6149 6150 /* 6151 * Acknowledge the SAS h/w events that need acknowledgement. 6152 * This is only needed for first level PHYs. 6153 */ 6154 void 6155 pmcs_ack_events(pmcs_hw_t *pwp) 6156 { 6157 uint32_t msg[PMCS_MSG_SIZE], *ptr; 6158 struct pmcwork *pwrk; 6159 pmcs_phy_t *pptr; 6160 6161 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 6162 pmcs_lock_phy(pptr); 6163 if (pptr->hw_event_ack == 0) { 6164 pmcs_unlock_phy(pptr); 6165 continue; 6166 } 6167 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6168 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6169 6170 if ((ptr == NULL) || (pwrk = 6171 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 6172 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6173 pmcs_unlock_phy(pptr); 6174 SCHEDULE_WORK(pwp, PMCS_WORK_SAS_HW_ACK); 6175 break; 6176 } 6177 6178 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 6179 PMCIN_SAS_HW_EVENT_ACK)); 6180 msg[1] = LE_32(pwrk->htag); 6181 msg[2] = LE_32(pptr->hw_event_ack); 6182 6183 mutex_exit(&pwrk->lock); 6184 pwrk->dtype = pptr->dtype; 6185 pptr->hw_event_ack = 0; 6186 COPY_MESSAGE(ptr, msg, 3); 6187 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6188 pmcs_unlock_phy(pptr); 6189 } 6190 } 6191 6192 /* 6193 * Load DMA 6194 */ 6195 int 6196 pmcs_dma_load(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint32_t *msg) 6197 { 6198 ddi_dma_cookie_t *sg; 6199 pmcs_dmachunk_t *tc; 6200 pmcs_dmasgl_t *sgl, *prior; 6201 int seg, tsc; 6202 uint64_t sgl_addr; 6203 6204 /* 6205 * If we have no data segments, we're done. 6206 */ 6207 if (CMD2PKT(sp)->pkt_numcookies == 0) { 6208 return (0); 6209 } 6210 6211 /* 6212 * Get the S/G list pointer. 6213 */ 6214 sg = CMD2PKT(sp)->pkt_cookies; 6215 6216 /* 6217 * If we only have one dma segment, we can directly address that 6218 * data within the Inbound message itself. 6219 */ 6220 if (CMD2PKT(sp)->pkt_numcookies == 1) { 6221 msg[12] = LE_32(DWORD0(sg->dmac_laddress)); 6222 msg[13] = LE_32(DWORD1(sg->dmac_laddress)); 6223 msg[14] = LE_32(sg->dmac_size); 6224 msg[15] = 0; 6225 return (0); 6226 } 6227 6228 /* 6229 * Otherwise, we'll need one or more external S/G list chunks. 6230 * Get the first one and its dma address into the Inbound message. 6231 */ 6232 mutex_enter(&pwp->dma_lock); 6233 tc = pwp->dma_freelist; 6234 if (tc == NULL) { 6235 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 6236 mutex_exit(&pwp->dma_lock); 6237 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6238 "%s: out of SG lists", __func__); 6239 return (-1); 6240 } 6241 pwp->dma_freelist = tc->nxt; 6242 mutex_exit(&pwp->dma_lock); 6243 6244 tc->nxt = NULL; 6245 sp->cmd_clist = tc; 6246 sgl = tc->chunks; 6247 (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ); 6248 sgl_addr = tc->addr; 6249 msg[12] = LE_32(DWORD0(sgl_addr)); 6250 msg[13] = LE_32(DWORD1(sgl_addr)); 6251 msg[14] = 0; 6252 msg[15] = LE_32(PMCS_DMASGL_EXTENSION); 6253 6254 prior = sgl; 6255 tsc = 0; 6256 6257 for (seg = 0; seg < CMD2PKT(sp)->pkt_numcookies; seg++) { 6258 /* 6259 * If the current segment count for this chunk is one less than 6260 * the number s/g lists per chunk and we have more than one seg 6261 * to go, we need another chunk. Get it, and make sure that the 6262 * tail end of the the previous chunk points the new chunk 6263 * (if remembering an offset can be called 'pointing to'). 6264 * 6265 * Note that we can store the offset into our command area that 6266 * represents the new chunk in the length field of the part 6267 * that points the PMC chip at the next chunk- the PMC chip 6268 * ignores this field when the EXTENSION bit is set. 6269 * 6270 * This is required for dma unloads later. 6271 */ 6272 if (tsc == (PMCS_SGL_NCHUNKS - 1) && 6273 seg < (CMD2PKT(sp)->pkt_numcookies - 1)) { 6274 mutex_enter(&pwp->dma_lock); 6275 tc = pwp->dma_freelist; 6276 if (tc == NULL) { 6277 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 6278 mutex_exit(&pwp->dma_lock); 6279 pmcs_dma_unload(pwp, sp); 6280 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6281 "%s: out of SG lists", __func__); 6282 return (-1); 6283 } 6284 pwp->dma_freelist = tc->nxt; 6285 tc->nxt = sp->cmd_clist; 6286 mutex_exit(&pwp->dma_lock); 6287 6288 sp->cmd_clist = tc; 6289 (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ); 6290 sgl = tc->chunks; 6291 sgl_addr = tc->addr; 6292 prior[PMCS_SGL_NCHUNKS-1].sglal = 6293 LE_32(DWORD0(sgl_addr)); 6294 prior[PMCS_SGL_NCHUNKS-1].sglah = 6295 LE_32(DWORD1(sgl_addr)); 6296 prior[PMCS_SGL_NCHUNKS-1].sglen = 0; 6297 prior[PMCS_SGL_NCHUNKS-1].flags = 6298 LE_32(PMCS_DMASGL_EXTENSION); 6299 prior = sgl; 6300 tsc = 0; 6301 } 6302 sgl[tsc].sglal = LE_32(DWORD0(sg->dmac_laddress)); 6303 sgl[tsc].sglah = LE_32(DWORD1(sg->dmac_laddress)); 6304 sgl[tsc].sglen = LE_32(sg->dmac_size); 6305 sgl[tsc++].flags = 0; 6306 sg++; 6307 } 6308 return (0); 6309 } 6310 6311 /* 6312 * Unload DMA 6313 */ 6314 void 6315 pmcs_dma_unload(pmcs_hw_t *pwp, pmcs_cmd_t *sp) 6316 { 6317 pmcs_dmachunk_t *cp; 6318 6319 mutex_enter(&pwp->dma_lock); 6320 while ((cp = sp->cmd_clist) != NULL) { 6321 sp->cmd_clist = cp->nxt; 6322 cp->nxt = pwp->dma_freelist; 6323 pwp->dma_freelist = cp; 6324 } 6325 mutex_exit(&pwp->dma_lock); 6326 } 6327 6328 /* 6329 * Take a chunk of consistent memory that has just been allocated and inserted 6330 * into the cip indices and prepare it for DMA chunk usage and add it to the 6331 * freelist. 6332 * 6333 * Called with dma_lock locked (except during attach when it's unnecessary) 6334 */ 6335 void 6336 pmcs_idma_chunks(pmcs_hw_t *pwp, pmcs_dmachunk_t *dcp, 6337 pmcs_chunk_t *pchunk, unsigned long lim) 6338 { 6339 unsigned long off, n; 6340 pmcs_dmachunk_t *np = dcp; 6341 pmcs_chunk_t *tmp_chunk; 6342 6343 if (pwp->dma_chunklist == NULL) { 6344 pwp->dma_chunklist = pchunk; 6345 } else { 6346 tmp_chunk = pwp->dma_chunklist; 6347 while (tmp_chunk->next) { 6348 tmp_chunk = tmp_chunk->next; 6349 } 6350 tmp_chunk->next = pchunk; 6351 } 6352 6353 /* 6354 * Install offsets into chunk lists. 6355 */ 6356 for (n = 0, off = 0; off < lim; off += PMCS_SGL_CHUNKSZ, n++) { 6357 np->chunks = (void *)&pchunk->addrp[off]; 6358 np->addr = pchunk->dma_addr + off; 6359 np->acc_handle = pchunk->acc_handle; 6360 np->dma_handle = pchunk->dma_handle; 6361 if ((off + PMCS_SGL_CHUNKSZ) < lim) { 6362 np = np->nxt; 6363 } 6364 } 6365 np->nxt = pwp->dma_freelist; 6366 pwp->dma_freelist = dcp; 6367 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6368 "added %lu DMA chunks ", n); 6369 } 6370 6371 /* 6372 * Change the value of the interrupt coalescing timer. This is done currently 6373 * only for I/O completions. If we're using the "auto clear" feature, it can 6374 * be turned back on when interrupt coalescing is turned off and must be 6375 * turned off when the coalescing timer is on. 6376 * NOTE: PMCS_MSIX_GENERAL and PMCS_OQ_IODONE are the same value. As long 6377 * as that's true, we don't need to distinguish between them. 6378 */ 6379 6380 void 6381 pmcs_set_intr_coal_timer(pmcs_hw_t *pwp, pmcs_coal_timer_adj_t adj) 6382 { 6383 if (adj == DECREASE_TIMER) { 6384 /* If the timer is already off, nothing to do. */ 6385 if (pwp->io_intr_coal.timer_on == B_FALSE) { 6386 return; 6387 } 6388 6389 pwp->io_intr_coal.intr_coal_timer -= PMCS_COAL_TIMER_GRAN; 6390 6391 if (pwp->io_intr_coal.intr_coal_timer == 0) { 6392 /* Disable the timer */ 6393 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 0); 6394 6395 if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) { 6396 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, 6397 pwp->odb_auto_clear); 6398 } 6399 6400 pwp->io_intr_coal.timer_on = B_FALSE; 6401 pwp->io_intr_coal.max_io_completions = B_FALSE; 6402 pwp->io_intr_coal.num_intrs = 0; 6403 pwp->io_intr_coal.int_cleared = B_FALSE; 6404 pwp->io_intr_coal.num_io_completions = 0; 6405 6406 DTRACE_PROBE1(pmcs__intr__coalesce__timer__off, 6407 pmcs_io_intr_coal_t *, &pwp->io_intr_coal); 6408 } else { 6409 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER, 6410 pwp->io_intr_coal.intr_coal_timer); 6411 } 6412 } else { 6413 /* 6414 * If the timer isn't on yet, do the setup for it now. 6415 */ 6416 if (pwp->io_intr_coal.timer_on == B_FALSE) { 6417 /* If auto clear is being used, turn it off. */ 6418 if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) { 6419 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, 6420 (pwp->odb_auto_clear & 6421 ~(1 << PMCS_MSIX_IODONE))); 6422 } 6423 6424 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 6425 (1 << PMCS_MSIX_IODONE)); 6426 pwp->io_intr_coal.timer_on = B_TRUE; 6427 pwp->io_intr_coal.intr_coal_timer = 6428 PMCS_COAL_TIMER_GRAN; 6429 6430 DTRACE_PROBE1(pmcs__intr__coalesce__timer__on, 6431 pmcs_io_intr_coal_t *, &pwp->io_intr_coal); 6432 } else { 6433 pwp->io_intr_coal.intr_coal_timer += 6434 PMCS_COAL_TIMER_GRAN; 6435 } 6436 6437 if (pwp->io_intr_coal.intr_coal_timer > PMCS_MAX_COAL_TIMER) { 6438 pwp->io_intr_coal.intr_coal_timer = PMCS_MAX_COAL_TIMER; 6439 } 6440 6441 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER, 6442 pwp->io_intr_coal.intr_coal_timer); 6443 } 6444 6445 /* 6446 * Adjust the interrupt threshold based on the current timer value 6447 */ 6448 pwp->io_intr_coal.intr_threshold = 6449 PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 / 6450 (pwp->io_intr_coal.intr_latency + 6451 (pwp->io_intr_coal.intr_coal_timer * 1000))); 6452 } 6453 6454 /* 6455 * Register Access functions 6456 */ 6457 uint32_t 6458 pmcs_rd_iqci(pmcs_hw_t *pwp, uint32_t qnum) 6459 { 6460 uint32_t iqci; 6461 6462 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 6463 DDI_SUCCESS) { 6464 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6465 "%s: ddi_dma_sync failed?", __func__); 6466 } 6467 6468 iqci = LE_32( 6469 ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2]); 6470 6471 return (iqci); 6472 } 6473 6474 uint32_t 6475 pmcs_rd_oqpi(pmcs_hw_t *pwp, uint32_t qnum) 6476 { 6477 uint32_t oqpi; 6478 6479 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 6480 DDI_SUCCESS) { 6481 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6482 "%s: ddi_dma_sync failed?", __func__); 6483 } 6484 6485 oqpi = LE_32( 6486 ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2]); 6487 6488 return (oqpi); 6489 } 6490 6491 uint32_t 6492 pmcs_rd_gsm_reg(pmcs_hw_t *pwp, uint8_t hi, uint32_t off) 6493 { 6494 uint32_t rv, newaxil, oldaxil, oldaxih; 6495 6496 newaxil = off & ~GSM_BASE_MASK; 6497 off &= GSM_BASE_MASK; 6498 mutex_enter(&pwp->axil_lock); 6499 oldaxil = ddi_get32(pwp->top_acc_handle, 6500 &pwp->top_regs[PMCS_AXI_TRANS >> 2]); 6501 ddi_put32(pwp->top_acc_handle, 6502 &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil); 6503 drv_usecwait(10); 6504 if (ddi_get32(pwp->top_acc_handle, 6505 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) { 6506 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6507 "AXIL register update failed"); 6508 } 6509 if (hi) { 6510 oldaxih = ddi_get32(pwp->top_acc_handle, 6511 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]); 6512 ddi_put32(pwp->top_acc_handle, 6513 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2], hi); 6514 drv_usecwait(10); 6515 if (ddi_get32(pwp->top_acc_handle, 6516 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]) != hi) { 6517 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6518 "AXIH register update failed"); 6519 } 6520 } 6521 rv = ddi_get32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2]); 6522 if (hi) { 6523 ddi_put32(pwp->top_acc_handle, 6524 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2], oldaxih); 6525 drv_usecwait(10); 6526 if (ddi_get32(pwp->top_acc_handle, 6527 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]) != oldaxih) { 6528 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6529 "AXIH register restore failed"); 6530 } 6531 } 6532 ddi_put32(pwp->top_acc_handle, 6533 &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil); 6534 drv_usecwait(10); 6535 if (ddi_get32(pwp->top_acc_handle, 6536 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) { 6537 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6538 "AXIL register restore failed"); 6539 } 6540 mutex_exit(&pwp->axil_lock); 6541 return (rv); 6542 } 6543 6544 void 6545 pmcs_wr_gsm_reg(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6546 { 6547 uint32_t newaxil, oldaxil; 6548 6549 newaxil = off & ~GSM_BASE_MASK; 6550 off &= GSM_BASE_MASK; 6551 mutex_enter(&pwp->axil_lock); 6552 oldaxil = ddi_get32(pwp->top_acc_handle, 6553 &pwp->top_regs[PMCS_AXI_TRANS >> 2]); 6554 ddi_put32(pwp->top_acc_handle, 6555 &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil); 6556 drv_usecwait(10); 6557 if (ddi_get32(pwp->top_acc_handle, 6558 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) { 6559 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6560 "AXIL register update failed"); 6561 } 6562 ddi_put32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2], val); 6563 ddi_put32(pwp->top_acc_handle, 6564 &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil); 6565 drv_usecwait(10); 6566 if (ddi_get32(pwp->top_acc_handle, 6567 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) { 6568 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6569 "AXIL register restore failed"); 6570 } 6571 mutex_exit(&pwp->axil_lock); 6572 } 6573 6574 uint32_t 6575 pmcs_rd_topunit(pmcs_hw_t *pwp, uint32_t off) 6576 { 6577 switch (off) { 6578 case PMCS_SPC_RESET: 6579 case PMCS_SPC_BOOT_STRAP: 6580 case PMCS_SPC_DEVICE_ID: 6581 case PMCS_DEVICE_REVISION: 6582 off = pmcs_rd_gsm_reg(pwp, 0, off); 6583 break; 6584 default: 6585 off = ddi_get32(pwp->top_acc_handle, 6586 &pwp->top_regs[off >> 2]); 6587 break; 6588 } 6589 return (off); 6590 } 6591 6592 void 6593 pmcs_wr_topunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6594 { 6595 switch (off) { 6596 case PMCS_SPC_RESET: 6597 case PMCS_DEVICE_REVISION: 6598 pmcs_wr_gsm_reg(pwp, off, val); 6599 break; 6600 default: 6601 ddi_put32(pwp->top_acc_handle, &pwp->top_regs[off >> 2], val); 6602 break; 6603 } 6604 } 6605 6606 uint32_t 6607 pmcs_rd_msgunit(pmcs_hw_t *pwp, uint32_t off) 6608 { 6609 return (ddi_get32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2])); 6610 } 6611 6612 uint32_t 6613 pmcs_rd_mpi_tbl(pmcs_hw_t *pwp, uint32_t off) 6614 { 6615 return (ddi_get32(pwp->mpi_acc_handle, 6616 &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2])); 6617 } 6618 6619 uint32_t 6620 pmcs_rd_gst_tbl(pmcs_hw_t *pwp, uint32_t off) 6621 { 6622 return (ddi_get32(pwp->mpi_acc_handle, 6623 &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2])); 6624 } 6625 6626 uint32_t 6627 pmcs_rd_iqc_tbl(pmcs_hw_t *pwp, uint32_t off) 6628 { 6629 return (ddi_get32(pwp->mpi_acc_handle, 6630 &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2])); 6631 } 6632 6633 uint32_t 6634 pmcs_rd_oqc_tbl(pmcs_hw_t *pwp, uint32_t off) 6635 { 6636 return (ddi_get32(pwp->mpi_acc_handle, 6637 &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2])); 6638 } 6639 6640 uint32_t 6641 pmcs_rd_iqpi(pmcs_hw_t *pwp, uint32_t qnum) 6642 { 6643 return (ddi_get32(pwp->mpi_acc_handle, 6644 &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2])); 6645 } 6646 6647 uint32_t 6648 pmcs_rd_oqci(pmcs_hw_t *pwp, uint32_t qnum) 6649 { 6650 return (ddi_get32(pwp->mpi_acc_handle, 6651 &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2])); 6652 } 6653 6654 void 6655 pmcs_wr_msgunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6656 { 6657 ddi_put32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2], val); 6658 } 6659 6660 void 6661 pmcs_wr_mpi_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6662 { 6663 ddi_put32(pwp->mpi_acc_handle, 6664 &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2], (val)); 6665 } 6666 6667 void 6668 pmcs_wr_gst_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6669 { 6670 ddi_put32(pwp->mpi_acc_handle, 6671 &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2], val); 6672 } 6673 6674 void 6675 pmcs_wr_iqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6676 { 6677 ddi_put32(pwp->mpi_acc_handle, 6678 &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2], val); 6679 } 6680 6681 void 6682 pmcs_wr_oqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6683 { 6684 ddi_put32(pwp->mpi_acc_handle, 6685 &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2], val); 6686 } 6687 6688 void 6689 pmcs_wr_iqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6690 { 6691 ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2] = val; 6692 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) != 6693 DDI_SUCCESS) { 6694 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6695 "%s: ddi_dma_sync failed?", __func__); 6696 } 6697 } 6698 6699 void 6700 pmcs_wr_iqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6701 { 6702 ddi_put32(pwp->mpi_acc_handle, 6703 &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2], val); 6704 } 6705 6706 void 6707 pmcs_wr_oqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6708 { 6709 ddi_put32(pwp->mpi_acc_handle, 6710 &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2], val); 6711 } 6712 6713 void 6714 pmcs_wr_oqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6715 { 6716 ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2] = val; 6717 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) != 6718 DDI_SUCCESS) { 6719 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6720 "%s: ddi_dma_sync failed?", __func__); 6721 } 6722 } 6723 6724 /* 6725 * Check the status value of an outbound IOMB and report anything bad 6726 */ 6727 6728 void 6729 pmcs_check_iomb_status(pmcs_hw_t *pwp, uint32_t *iomb) 6730 { 6731 uint16_t opcode; 6732 int offset; 6733 6734 if (iomb == NULL) { 6735 return; 6736 } 6737 6738 opcode = LE_32(iomb[0]) & 0xfff; 6739 6740 switch (opcode) { 6741 /* 6742 * The following have no status field, so ignore them 6743 */ 6744 case PMCOUT_ECHO: 6745 case PMCOUT_SAS_HW_EVENT: 6746 case PMCOUT_GET_DEVICE_HANDLE: 6747 case PMCOUT_SATA_EVENT: 6748 case PMCOUT_SSP_EVENT: 6749 case PMCOUT_DEVICE_HANDLE_ARRIVED: 6750 case PMCOUT_GPIO: 6751 case PMCOUT_GPIO_EVENT: 6752 case PMCOUT_GET_TIME_STAMP: 6753 case PMCOUT_SKIP_ENTRIES: 6754 case PMCOUT_GET_NVMD_DATA: /* Actually lower 16 bits of word 3 */ 6755 case PMCOUT_SET_NVMD_DATA: /* but ignore - we don't use these */ 6756 case PMCOUT_DEVICE_HANDLE_REMOVED: 6757 case PMCOUT_SSP_REQUEST_RECEIVED: 6758 return; 6759 6760 case PMCOUT_GENERAL_EVENT: 6761 offset = 1; 6762 break; 6763 6764 case PMCOUT_SSP_COMPLETION: 6765 case PMCOUT_SMP_COMPLETION: 6766 case PMCOUT_DEVICE_REGISTRATION: 6767 case PMCOUT_DEREGISTER_DEVICE_HANDLE: 6768 case PMCOUT_SATA_COMPLETION: 6769 case PMCOUT_DEVICE_INFO: 6770 case PMCOUT_FW_FLASH_UPDATE: 6771 case PMCOUT_SSP_ABORT: 6772 case PMCOUT_SATA_ABORT: 6773 case PMCOUT_SAS_DIAG_MODE_START_END: 6774 case PMCOUT_SAS_HW_EVENT_ACK_ACK: 6775 case PMCOUT_SMP_ABORT: 6776 case PMCOUT_SET_DEVICE_STATE: 6777 case PMCOUT_GET_DEVICE_STATE: 6778 case PMCOUT_SET_DEVICE_INFO: 6779 offset = 2; 6780 break; 6781 6782 case PMCOUT_LOCAL_PHY_CONTROL: 6783 case PMCOUT_SAS_DIAG_EXECUTE: 6784 case PMCOUT_PORT_CONTROL: 6785 offset = 3; 6786 break; 6787 6788 case PMCOUT_GET_INFO: 6789 case PMCOUT_GET_VPD: 6790 case PMCOUT_SAS_ASSISTED_DISCOVERY_EVENT: 6791 case PMCOUT_SATA_ASSISTED_DISCOVERY_EVENT: 6792 case PMCOUT_SET_VPD: 6793 case PMCOUT_TWI: 6794 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6795 "Got response for deprecated opcode", iomb); 6796 return; 6797 6798 default: 6799 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6800 "Got response for unknown opcode", iomb); 6801 return; 6802 } 6803 6804 if (LE_32(iomb[offset]) != PMCOUT_STATUS_OK) { 6805 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6806 "bad status on TAG_TYPE_NONE command", iomb); 6807 } 6808 } 6809 6810 /* 6811 * Called with statlock held 6812 */ 6813 void 6814 pmcs_clear_xp(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 6815 { 6816 _NOTE(ARGUNUSED(pwp)); 6817 6818 ASSERT(mutex_owned(&xp->statlock)); 6819 6820 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: Device 0x%p is gone.", 6821 __func__, (void *)xp); 6822 6823 /* 6824 * Clear the dip now. This keeps pmcs_remove_device from attempting 6825 * to call us on the same device while we're still flushing queues. 6826 * The only side effect is we can no longer update SM-HBA properties, 6827 * but this device is going away anyway, so no matter. 6828 */ 6829 xp->dip = NULL; 6830 xp->smpd = NULL; 6831 xp->special_running = 0; 6832 xp->recovering = 0; 6833 xp->recover_wait = 0; 6834 xp->draining = 0; 6835 xp->new = 0; 6836 xp->assigned = 0; 6837 xp->dev_state = 0; 6838 xp->tagmap = 0; 6839 xp->dev_gone = 1; 6840 xp->event_recovery = 0; 6841 xp->dtype = NOTHING; 6842 xp->wq_recovery_tail = NULL; 6843 /* Don't clear xp->phy */ 6844 /* Don't clear xp->actv_cnt */ 6845 /* Don't clear xp->actv_pkts */ 6846 6847 /* 6848 * Flush all target queues 6849 */ 6850 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES); 6851 } 6852 6853 static int 6854 pmcs_smp_function_result(pmcs_hw_t *pwp, smp_response_frame_t *srf) 6855 { 6856 int result = srf->srf_result; 6857 6858 switch (result) { 6859 case SMP_RES_UNKNOWN_FUNCTION: 6860 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6861 "%s: SMP DISCOVER Response " 6862 "Function Result: Unknown SMP Function(0x%x)", 6863 __func__, result); 6864 break; 6865 case SMP_RES_FUNCTION_FAILED: 6866 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6867 "%s: SMP DISCOVER Response " 6868 "Function Result: SMP Function Failed(0x%x)", 6869 __func__, result); 6870 break; 6871 case SMP_RES_INVALID_REQUEST_FRAME_LENGTH: 6872 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6873 "%s: SMP DISCOVER Response " 6874 "Function Result: Invalid Request Frame Length(0x%x)", 6875 __func__, result); 6876 break; 6877 case SMP_RES_INCOMPLETE_DESCRIPTOR_LIST: 6878 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6879 "%s: SMP DISCOVER Response " 6880 "Function Result: Incomplete Descriptor List(0x%x)", 6881 __func__, result); 6882 break; 6883 case SMP_RES_PHY_DOES_NOT_EXIST: 6884 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6885 "%s: SMP DISCOVER Response " 6886 "Function Result: PHY does not exist(0x%x)", 6887 __func__, result); 6888 break; 6889 case SMP_RES_PHY_VACANT: 6890 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6891 "%s: SMP DISCOVER Response " 6892 "Function Result: PHY Vacant(0x%x)", 6893 __func__, result); 6894 break; 6895 default: 6896 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6897 "%s: SMP DISCOVER Response " 6898 "Function Result: (0x%x)", 6899 __func__, result); 6900 break; 6901 } 6902 6903 return (result); 6904 } 6905 6906 /* 6907 * Do all the repetitive stuff necessary to setup for DMA 6908 * 6909 * pwp: Used for dip 6910 * dma_attr: ddi_dma_attr_t to use for the mapping 6911 * acch: ddi_acc_handle_t to use for the mapping 6912 * dmah: ddi_dma_handle_t to use 6913 * length: Amount of memory for mapping 6914 * kvap: Pointer filled in with kernel virtual address on successful return 6915 * dma_addr: Pointer filled in with DMA address on successful return 6916 */ 6917 boolean_t 6918 pmcs_dma_setup(pmcs_hw_t *pwp, ddi_dma_attr_t *dma_attr, ddi_acc_handle_t *acch, 6919 ddi_dma_handle_t *dmah, size_t length, caddr_t *kvap, uint64_t *dma_addr) 6920 { 6921 dev_info_t *dip = pwp->dip; 6922 ddi_dma_cookie_t cookie; 6923 size_t real_length; 6924 uint_t ddma_flag = DDI_DMA_CONSISTENT; 6925 uint_t ddabh_flag = DDI_DMA_CONSISTENT | DDI_DMA_RDWR; 6926 uint_t cookie_cnt; 6927 ddi_device_acc_attr_t mattr = { 6928 DDI_DEVICE_ATTR_V0, 6929 DDI_NEVERSWAP_ACC, 6930 DDI_STRICTORDER_ACC, 6931 DDI_DEFAULT_ACC 6932 }; 6933 6934 *acch = NULL; 6935 *dmah = NULL; 6936 6937 if (ddi_dma_alloc_handle(dip, dma_attr, DDI_DMA_SLEEP, NULL, dmah) != 6938 DDI_SUCCESS) { 6939 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6940 "Failed to allocate DMA handle"); 6941 return (B_FALSE); 6942 } 6943 6944 if (ddi_dma_mem_alloc(*dmah, length, &mattr, ddma_flag, DDI_DMA_SLEEP, 6945 NULL, kvap, &real_length, acch) != DDI_SUCCESS) { 6946 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6947 "Failed to allocate DMA mem"); 6948 ddi_dma_free_handle(dmah); 6949 *dmah = NULL; 6950 return (B_FALSE); 6951 } 6952 6953 if (ddi_dma_addr_bind_handle(*dmah, NULL, *kvap, real_length, 6954 ddabh_flag, DDI_DMA_SLEEP, NULL, &cookie, &cookie_cnt) 6955 != DDI_DMA_MAPPED) { 6956 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Failed to bind DMA"); 6957 ddi_dma_free_handle(dmah); 6958 ddi_dma_mem_free(acch); 6959 *dmah = NULL; 6960 *acch = NULL; 6961 return (B_FALSE); 6962 } 6963 6964 if (cookie_cnt != 1) { 6965 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Multiple cookies"); 6966 if (ddi_dma_unbind_handle(*dmah) != DDI_SUCCESS) { 6967 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Condition " 6968 "failed at %s():%d", __func__, __LINE__); 6969 } 6970 ddi_dma_free_handle(dmah); 6971 ddi_dma_mem_free(acch); 6972 *dmah = NULL; 6973 *acch = NULL; 6974 return (B_FALSE); 6975 } 6976 6977 *dma_addr = cookie.dmac_laddress; 6978 6979 return (B_TRUE); 6980 } 6981 6982 /* 6983 * Flush requested queues for a particular target. Called with statlock held 6984 */ 6985 void 6986 pmcs_flush_target_queues(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt, uint8_t queues) 6987 { 6988 pmcs_cmd_t *sp, *sp_next; 6989 pmcwork_t *pwrk; 6990 6991 ASSERT(pwp != NULL); 6992 ASSERT(tgt != NULL); 6993 6994 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, tgt, 6995 "%s: Flushing queues (%d) for target 0x%p", __func__, 6996 queues, (void *)tgt); 6997 6998 /* 6999 * Commands on the wait queue (or the special queue below) don't have 7000 * work structures associated with them. 7001 */ 7002 if (queues & PMCS_TGT_WAIT_QUEUE) { 7003 mutex_enter(&tgt->wqlock); 7004 while ((sp = STAILQ_FIRST(&tgt->wq)) != NULL) { 7005 STAILQ_REMOVE(&tgt->wq, sp, pmcs_cmd, cmd_next); 7006 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, tgt, 7007 "%s: Removing cmd 0x%p from wq for target 0x%p", 7008 __func__, (void *)sp, (void *)tgt); 7009 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 7010 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 7011 mutex_exit(&tgt->wqlock); 7012 pmcs_dma_unload(pwp, sp); 7013 mutex_enter(&pwp->cq_lock); 7014 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 7015 PMCS_CQ_RUN_LOCKED(pwp); 7016 mutex_exit(&pwp->cq_lock); 7017 mutex_enter(&tgt->wqlock); 7018 } 7019 mutex_exit(&tgt->wqlock); 7020 } 7021 7022 /* 7023 * Commands on the active queue will have work structures associated 7024 * with them. 7025 */ 7026 if (queues & PMCS_TGT_ACTIVE_QUEUE) { 7027 mutex_exit(&tgt->statlock); 7028 mutex_enter(&tgt->aqlock); 7029 sp = STAILQ_FIRST(&tgt->aq); 7030 while (sp) { 7031 sp_next = STAILQ_NEXT(sp, cmd_next); 7032 pwrk = pmcs_tag2wp(pwp, sp->cmd_tag, B_FALSE); 7033 7034 /* 7035 * If we don't find a work structure, it's because 7036 * the command is already complete. If so, move on 7037 * to the next one. 7038 */ 7039 if (pwrk == NULL) { 7040 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7041 "%s: Not removing cmd 0x%p (htag 0x%x) " 7042 "from aq", __func__, (void *)sp, 7043 sp->cmd_tag); 7044 sp = sp_next; 7045 continue; 7046 } 7047 7048 STAILQ_REMOVE(&tgt->aq, sp, pmcs_cmd, cmd_next); 7049 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7050 "%s: Removing cmd 0x%p (htag 0x%x) from aq for " 7051 "target 0x%p", __func__, (void *)sp, sp->cmd_tag, 7052 (void *)tgt); 7053 mutex_exit(&tgt->aqlock); 7054 7055 /* 7056 * Mark the work structure as dead and complete it 7057 */ 7058 pwrk->dead = 1; 7059 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 7060 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 7061 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 7062 pmcs_dma_unload(pwp, sp); 7063 mutex_enter(&pwp->cq_lock); 7064 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 7065 mutex_exit(&pwp->cq_lock); 7066 mutex_enter(&tgt->aqlock); 7067 sp = sp_next; 7068 } 7069 mutex_exit(&tgt->aqlock); 7070 mutex_enter(&tgt->statlock); 7071 } 7072 7073 if (queues & PMCS_TGT_SPECIAL_QUEUE) { 7074 while ((sp = STAILQ_FIRST(&tgt->sq)) != NULL) { 7075 STAILQ_REMOVE(&tgt->sq, sp, pmcs_cmd, cmd_next); 7076 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7077 "%s: Removing cmd 0x%p from sq for target 0x%p", 7078 __func__, (void *)sp, (void *)tgt); 7079 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 7080 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 7081 pmcs_dma_unload(pwp, sp); 7082 mutex_enter(&pwp->cq_lock); 7083 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 7084 mutex_exit(&pwp->cq_lock); 7085 } 7086 } 7087 7088 if (queues == PMCS_TGT_ALL_QUEUES) { 7089 mutex_exit(&tgt->statlock); 7090 pmcs_flush_nonio_cmds(pwp, tgt); 7091 mutex_enter(&tgt->statlock); 7092 } 7093 } 7094 7095 /* 7096 * Flush non-IO commands for this target. This cleans up the off-queue 7097 * work with no pmcs_cmd_t associated. 7098 */ 7099 static void 7100 pmcs_flush_nonio_cmds(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt) 7101 { 7102 int i; 7103 pmcwork_t *p; 7104 7105 for (i = 0; i < pwp->max_cmd; i++) { 7106 p = &pwp->work[i]; 7107 mutex_enter(&p->lock); 7108 if (p->xp != tgt) { 7109 mutex_exit(&p->lock); 7110 continue; 7111 } 7112 if (p->htag & PMCS_TAG_NONIO_CMD) { 7113 if (!PMCS_COMMAND_ACTIVE(p) || PMCS_COMMAND_DONE(p)) { 7114 mutex_exit(&p->lock); 7115 continue; 7116 } 7117 pmcs_prt(pwp, PMCS_PRT_DEBUG, p->phy, p->xp, 7118 "%s: Completing non-io cmd with HTAG 0x%x", 7119 __func__, p->htag); 7120 pmcs_complete_work_impl(pwp, p, NULL, 0); 7121 } else { 7122 mutex_exit(&p->lock); 7123 } 7124 } 7125 } 7126 7127 void 7128 pmcs_complete_work_impl(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *iomb, 7129 size_t amt) 7130 { 7131 pmcs_phy_t *pptr = NULL; 7132 7133 switch (PMCS_TAG_TYPE(pwrk->htag)) { 7134 case PMCS_TAG_TYPE_CBACK: 7135 { 7136 pmcs_cb_t callback = (pmcs_cb_t)pwrk->ptr; 7137 (*callback)(pwp, pwrk, iomb); 7138 break; 7139 } 7140 case PMCS_TAG_TYPE_WAIT: 7141 if (pwrk->arg && iomb && amt) { 7142 (void) memcpy(pwrk->arg, iomb, amt); 7143 } 7144 cv_signal(&pwrk->sleep_cv); 7145 mutex_exit(&pwrk->lock); 7146 break; 7147 case PMCS_TAG_TYPE_NONE: 7148 #ifdef DEBUG 7149 pmcs_check_iomb_status(pwp, iomb); 7150 #endif 7151 pptr = pwrk->phy; 7152 pmcs_pwork(pwp, pwrk); 7153 7154 /* If this was an abort all, clean up if needed */ 7155 if ((pwrk->abt_htag == PMCS_ABT_HTAG_ALL) && (pptr != NULL)) { 7156 mutex_enter(&pptr->phy_lock); 7157 if (pptr->abort_all_start) { 7158 pptr->abort_all_start = 0; 7159 cv_signal(&pptr->abort_all_cv); 7160 } 7161 mutex_exit(&pptr->phy_lock); 7162 } 7163 break; 7164 default: 7165 /* 7166 * We will leak a structure here if we don't know 7167 * what happened 7168 */ 7169 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 7170 "%s: Unknown PMCS_TAG_TYPE (%x)", 7171 __func__, PMCS_TAG_TYPE(pwrk->htag)); 7172 break; 7173 } 7174 } 7175 7176 /* 7177 * Determine if iport still has targets. During detach(9E), if SCSA is 7178 * successfull in its guarantee of tran_tgt_free(9E) before detach(9E), 7179 * this should always return B_FALSE. 7180 */ 7181 boolean_t 7182 pmcs_iport_has_targets(pmcs_hw_t *pwp, pmcs_iport_t *iport) 7183 { 7184 pmcs_xscsi_t *xp; 7185 int i; 7186 7187 mutex_enter(&pwp->lock); 7188 7189 if (!pwp->targets || !pwp->max_dev) { 7190 mutex_exit(&pwp->lock); 7191 return (B_FALSE); 7192 } 7193 7194 for (i = 0; i < pwp->max_dev; i++) { 7195 xp = pwp->targets[i]; 7196 if ((xp == NULL) || (xp->phy == NULL) || 7197 (xp->phy->iport != iport)) { 7198 continue; 7199 } 7200 7201 mutex_exit(&pwp->lock); 7202 return (B_TRUE); 7203 } 7204 7205 mutex_exit(&pwp->lock); 7206 return (B_FALSE); 7207 } 7208 7209 /* 7210 * Called with softstate lock held 7211 */ 7212 void 7213 pmcs_destroy_target(pmcs_xscsi_t *target) 7214 { 7215 pmcs_hw_t *pwp = target->pwp; 7216 pmcs_iport_t *iport; 7217 7218 ASSERT(pwp); 7219 ASSERT(mutex_owned(&pwp->lock)); 7220 7221 if (!target->ua) { 7222 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target, 7223 "%s: target %p iport address is null", 7224 __func__, (void *)target); 7225 } 7226 7227 iport = pmcs_get_iport_by_ua(pwp, target->ua); 7228 if (iport == NULL) { 7229 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target, 7230 "%s: no iport associated with tgt(0x%p)", 7231 __func__, (void *)target); 7232 return; 7233 } 7234 7235 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target, 7236 "%s: free target %p", __func__, (void *)target); 7237 if (target->ua) { 7238 strfree(target->ua); 7239 } 7240 7241 mutex_destroy(&target->wqlock); 7242 mutex_destroy(&target->aqlock); 7243 mutex_destroy(&target->statlock); 7244 cv_destroy(&target->reset_cv); 7245 cv_destroy(&target->abort_cv); 7246 ddi_soft_state_bystr_fini(&target->lun_sstate); 7247 ddi_soft_state_bystr_free(iport->tgt_sstate, target->unit_address); 7248 pmcs_rele_iport(iport); 7249 } 7250 7251 /* 7252 * pmcs_lock_phy_impl 7253 * 7254 * This function is what does the actual work for pmcs_lock_phy. It will 7255 * lock all PHYs from phyp down in a top-down fashion. 7256 * 7257 * Locking notes: 7258 * 1. level starts from 0 for the PHY ("parent") that's passed in. It is 7259 * not a reflection of the actual level of the PHY in the SAS topology. 7260 * 2. If parent is an expander, then parent is locked along with all its 7261 * descendents. 7262 * 3. Expander subsidiary PHYs at level 0 are not locked. It is the 7263 * responsibility of the caller to individually lock expander subsidiary PHYs 7264 * at level 0 if necessary. 7265 * 4. Siblings at level 0 are not traversed due to the possibility that we're 7266 * locking a PHY on the dead list. The siblings could be pointing to invalid 7267 * PHYs. We don't lock siblings at level 0 anyway. 7268 */ 7269 static void 7270 pmcs_lock_phy_impl(pmcs_phy_t *phyp, int level) 7271 { 7272 pmcs_phy_t *tphyp; 7273 7274 ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) || 7275 (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING)); 7276 7277 /* 7278 * Start walking the PHYs. 7279 */ 7280 tphyp = phyp; 7281 while (tphyp) { 7282 /* 7283 * If we're at the top level, only lock ourselves. For anything 7284 * at level > 0, traverse children while locking everything. 7285 */ 7286 if ((level > 0) || (tphyp == phyp)) { 7287 pmcs_prt(tphyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, tphyp, 7288 NULL, "%s: PHY 0x%p parent 0x%p path %s lvl %d", 7289 __func__, (void *)tphyp, (void *)tphyp->parent, 7290 tphyp->path, level); 7291 mutex_enter(&tphyp->phy_lock); 7292 7293 if (tphyp->children) { 7294 pmcs_lock_phy_impl(tphyp->children, level + 1); 7295 } 7296 } 7297 7298 if (level == 0) { 7299 return; 7300 } 7301 7302 tphyp = tphyp->sibling; 7303 } 7304 } 7305 7306 /* 7307 * pmcs_lock_phy 7308 * 7309 * This function is responsible for locking a PHY and all its descendents 7310 */ 7311 void 7312 pmcs_lock_phy(pmcs_phy_t *phyp) 7313 { 7314 #ifdef DEBUG 7315 char *callername = NULL; 7316 ulong_t off; 7317 7318 ASSERT(phyp != NULL); 7319 7320 callername = modgetsymname((uintptr_t)caller(), &off); 7321 7322 if (callername == NULL) { 7323 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7324 "%s: PHY 0x%p path %s caller: unknown", __func__, 7325 (void *)phyp, phyp->path); 7326 } else { 7327 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7328 "%s: PHY 0x%p path %s caller: %s+%lx", __func__, 7329 (void *)phyp, phyp->path, callername, off); 7330 } 7331 #else 7332 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7333 "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path); 7334 #endif 7335 pmcs_lock_phy_impl(phyp, 0); 7336 } 7337 7338 /* 7339 * pmcs_unlock_phy_impl 7340 * 7341 * Unlock all PHYs from phyp down in a bottom-up fashion. 7342 */ 7343 static void 7344 pmcs_unlock_phy_impl(pmcs_phy_t *phyp, int level) 7345 { 7346 pmcs_phy_t *phy_next; 7347 7348 ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) || 7349 (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING)); 7350 7351 /* 7352 * Recurse down to the bottom PHYs 7353 */ 7354 if (level == 0) { 7355 if (phyp->children) { 7356 pmcs_unlock_phy_impl(phyp->children, level + 1); 7357 } 7358 } else { 7359 phy_next = phyp; 7360 while (phy_next) { 7361 if (phy_next->children) { 7362 pmcs_unlock_phy_impl(phy_next->children, 7363 level + 1); 7364 } 7365 phy_next = phy_next->sibling; 7366 } 7367 } 7368 7369 /* 7370 * Iterate through PHYs unlocking all at level > 0 as well the top PHY 7371 */ 7372 phy_next = phyp; 7373 while (phy_next) { 7374 if ((level > 0) || (phy_next == phyp)) { 7375 pmcs_prt(phy_next->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, 7376 phy_next, NULL, 7377 "%s: PHY 0x%p parent 0x%p path %s lvl %d", 7378 __func__, (void *)phy_next, 7379 (void *)phy_next->parent, phy_next->path, level); 7380 mutex_exit(&phy_next->phy_lock); 7381 } 7382 7383 if (level == 0) { 7384 return; 7385 } 7386 7387 phy_next = phy_next->sibling; 7388 } 7389 } 7390 7391 /* 7392 * pmcs_unlock_phy 7393 * 7394 * Unlock a PHY and all its descendents 7395 */ 7396 void 7397 pmcs_unlock_phy(pmcs_phy_t *phyp) 7398 { 7399 #ifdef DEBUG 7400 char *callername = NULL; 7401 ulong_t off; 7402 7403 ASSERT(phyp != NULL); 7404 7405 callername = modgetsymname((uintptr_t)caller(), &off); 7406 7407 if (callername == NULL) { 7408 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7409 "%s: PHY 0x%p path %s caller: unknown", __func__, 7410 (void *)phyp, phyp->path); 7411 } else { 7412 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7413 "%s: PHY 0x%p path %s caller: %s+%lx", __func__, 7414 (void *)phyp, phyp->path, callername, off); 7415 } 7416 #else 7417 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7418 "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path); 7419 #endif 7420 pmcs_unlock_phy_impl(phyp, 0); 7421 } 7422 7423 /* 7424 * pmcs_get_root_phy 7425 * 7426 * For a given phy pointer return its root phy. 7427 * This function must only be called during discovery in order to ensure that 7428 * the chain of PHYs from phyp up to the root PHY doesn't change. 7429 */ 7430 pmcs_phy_t * 7431 pmcs_get_root_phy(pmcs_phy_t *phyp) 7432 { 7433 ASSERT(phyp); 7434 7435 while (phyp) { 7436 if (IS_ROOT_PHY(phyp)) { 7437 break; 7438 } 7439 phyp = phyp->parent; 7440 } 7441 7442 return (phyp); 7443 } 7444 7445 /* 7446 * pmcs_free_dma_chunklist 7447 * 7448 * Free DMA S/G chunk list 7449 */ 7450 void 7451 pmcs_free_dma_chunklist(pmcs_hw_t *pwp) 7452 { 7453 pmcs_chunk_t *pchunk; 7454 7455 while (pwp->dma_chunklist) { 7456 pchunk = pwp->dma_chunklist; 7457 pwp->dma_chunklist = pwp->dma_chunklist->next; 7458 if (pchunk->dma_handle) { 7459 if (ddi_dma_unbind_handle(pchunk->dma_handle) != 7460 DDI_SUCCESS) { 7461 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 7462 "Condition failed at %s():%d", 7463 __func__, __LINE__); 7464 } 7465 ddi_dma_free_handle(&pchunk->dma_handle); 7466 ddi_dma_mem_free(&pchunk->acc_handle); 7467 } 7468 kmem_free(pchunk, sizeof (pmcs_chunk_t)); 7469 } 7470 } 7471 7472 /*ARGSUSED2*/ 7473 int 7474 pmcs_phy_constructor(void *buf, void *arg, int kmflags) 7475 { 7476 pmcs_hw_t *pwp = (pmcs_hw_t *)arg; 7477 pmcs_phy_t *phyp = (pmcs_phy_t *)buf; 7478 7479 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER, 7480 DDI_INTR_PRI(pwp->intr_pri)); 7481 cv_init(&phyp->abort_all_cv, NULL, CV_DRIVER, NULL); 7482 return (0); 7483 } 7484 7485 /*ARGSUSED1*/ 7486 void 7487 pmcs_phy_destructor(void *buf, void *arg) 7488 { 7489 pmcs_phy_t *phyp = (pmcs_phy_t *)buf; 7490 7491 cv_destroy(&phyp->abort_all_cv); 7492 mutex_destroy(&phyp->phy_lock); 7493 } 7494 7495 /* 7496 * Free all PHYs from the kmem_cache starting at phyp as well as everything 7497 * on the dead_phys list. 7498 * 7499 * NOTE: This function does not free root PHYs as they are not allocated 7500 * from the kmem_cache. 7501 * 7502 * No PHY locks are acquired as this should only be called during DDI_DETACH 7503 * or soft reset (while pmcs interrupts are disabled). 7504 */ 7505 void 7506 pmcs_free_all_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 7507 { 7508 pmcs_phy_t *tphyp, *nphyp, *cphyp; 7509 7510 if (phyp == NULL) { 7511 return; 7512 } 7513 7514 for (tphyp = phyp; tphyp; tphyp = nphyp) { 7515 nphyp = tphyp->sibling; 7516 cphyp = tphyp->children; 7517 7518 if (cphyp) { 7519 tphyp->children = NULL; 7520 pmcs_free_all_phys(pwp, cphyp); 7521 } 7522 7523 if (!IS_ROOT_PHY(tphyp)) { 7524 tphyp->target_addr = NULL; 7525 kmem_cache_free(pwp->phy_cache, tphyp); 7526 } 7527 } 7528 7529 mutex_enter(&pwp->dead_phylist_lock); 7530 for (tphyp = pwp->dead_phys; tphyp; tphyp = nphyp) { 7531 nphyp = tphyp->dead_next; 7532 tphyp->target_addr = NULL; 7533 kmem_cache_free(pwp->phy_cache, tphyp); 7534 } 7535 pwp->dead_phys = NULL; 7536 mutex_exit(&pwp->dead_phylist_lock); 7537 } 7538 7539 /* 7540 * Free a list of PHYs linked together by the sibling pointer back to the 7541 * kmem cache from whence they came. This function does not recurse, so the 7542 * caller must ensure there are no children. 7543 */ 7544 void 7545 pmcs_free_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 7546 { 7547 pmcs_phy_t *next_phy; 7548 7549 while (phyp) { 7550 next_phy = phyp->sibling; 7551 ASSERT(!mutex_owned(&phyp->phy_lock)); 7552 phyp->target_addr = NULL; 7553 kmem_cache_free(pwp->phy_cache, phyp); 7554 phyp = next_phy; 7555 } 7556 } 7557 7558 /* 7559 * Make a copy of an existing PHY structure. This is used primarily in 7560 * discovery to compare the contents of an existing PHY with what gets 7561 * reported back by an expander. 7562 * 7563 * This function must not be called from any context where sleeping is 7564 * not possible. 7565 * 7566 * The new PHY is returned unlocked. 7567 */ 7568 static pmcs_phy_t * 7569 pmcs_clone_phy(pmcs_phy_t *orig_phy) 7570 { 7571 pmcs_phy_t *local; 7572 7573 local = kmem_cache_alloc(orig_phy->pwp->phy_cache, KM_SLEEP); 7574 7575 /* 7576 * Go ahead and just copy everything... 7577 */ 7578 *local = *orig_phy; 7579 local->target_addr = &orig_phy->target; 7580 7581 /* 7582 * But the following must be set appropriately for this copy 7583 */ 7584 local->sibling = NULL; 7585 local->children = NULL; 7586 local->target = NULL; 7587 mutex_init(&local->phy_lock, NULL, MUTEX_DRIVER, 7588 DDI_INTR_PRI(orig_phy->pwp->intr_pri)); 7589 7590 return (local); 7591 } 7592 7593 int 7594 pmcs_check_acc_handle(ddi_acc_handle_t handle) 7595 { 7596 ddi_fm_error_t de; 7597 7598 if (handle == NULL) { 7599 return (DDI_FAILURE); 7600 } 7601 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0); 7602 return (de.fme_status); 7603 } 7604 7605 int 7606 pmcs_check_dma_handle(ddi_dma_handle_t handle) 7607 { 7608 ddi_fm_error_t de; 7609 7610 if (handle == NULL) { 7611 return (DDI_FAILURE); 7612 } 7613 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0); 7614 return (de.fme_status); 7615 } 7616 7617 7618 void 7619 pmcs_fm_ereport(pmcs_hw_t *pwp, char *detail) 7620 { 7621 uint64_t ena; 7622 char buf[FM_MAX_CLASS]; 7623 7624 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 7625 ena = fm_ena_generate(0, FM_ENA_FMT1); 7626 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities)) { 7627 ddi_fm_ereport_post(pwp->dip, buf, ena, DDI_NOSLEEP, 7628 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 7629 } 7630 } 7631 7632 int 7633 pmcs_check_acc_dma_handle(pmcs_hw_t *pwp) 7634 { 7635 pmcs_chunk_t *pchunk; 7636 int i; 7637 7638 /* check all acc & dma handles allocated in attach */ 7639 if ((pmcs_check_acc_handle(pwp->pci_acc_handle) != DDI_SUCCESS) || 7640 (pmcs_check_acc_handle(pwp->msg_acc_handle) != DDI_SUCCESS) || 7641 (pmcs_check_acc_handle(pwp->top_acc_handle) != DDI_SUCCESS) || 7642 (pmcs_check_acc_handle(pwp->mpi_acc_handle) != DDI_SUCCESS) || 7643 (pmcs_check_acc_handle(pwp->gsm_acc_handle) != DDI_SUCCESS)) { 7644 goto check_failed; 7645 } 7646 7647 for (i = 0; i < PMCS_NIQ; i++) { 7648 if ((pmcs_check_dma_handle( 7649 pwp->iqp_handles[i]) != DDI_SUCCESS) || 7650 (pmcs_check_acc_handle( 7651 pwp->iqp_acchdls[i]) != DDI_SUCCESS)) { 7652 goto check_failed; 7653 } 7654 } 7655 7656 for (i = 0; i < PMCS_NOQ; i++) { 7657 if ((pmcs_check_dma_handle( 7658 pwp->oqp_handles[i]) != DDI_SUCCESS) || 7659 (pmcs_check_acc_handle( 7660 pwp->oqp_acchdls[i]) != DDI_SUCCESS)) { 7661 goto check_failed; 7662 } 7663 } 7664 7665 if ((pmcs_check_dma_handle(pwp->cip_handles) != DDI_SUCCESS) || 7666 (pmcs_check_acc_handle(pwp->cip_acchdls) != DDI_SUCCESS)) { 7667 goto check_failed; 7668 } 7669 7670 if (pwp->fwlog && 7671 ((pmcs_check_dma_handle(pwp->fwlog_hndl) != DDI_SUCCESS) || 7672 (pmcs_check_acc_handle(pwp->fwlog_acchdl) != DDI_SUCCESS))) { 7673 goto check_failed; 7674 } 7675 7676 if (pwp->regdump_hndl && pwp->regdump_acchdl && 7677 ((pmcs_check_dma_handle(pwp->regdump_hndl) != DDI_SUCCESS) || 7678 (pmcs_check_acc_handle(pwp->regdump_acchdl) 7679 != DDI_SUCCESS))) { 7680 goto check_failed; 7681 } 7682 7683 7684 pchunk = pwp->dma_chunklist; 7685 while (pchunk) { 7686 if ((pmcs_check_acc_handle(pchunk->acc_handle) 7687 != DDI_SUCCESS) || 7688 (pmcs_check_dma_handle(pchunk->dma_handle) 7689 != DDI_SUCCESS)) { 7690 goto check_failed; 7691 } 7692 pchunk = pchunk->next; 7693 } 7694 7695 return (0); 7696 7697 check_failed: 7698 7699 return (1); 7700 } 7701 7702 /* 7703 * pmcs_handle_dead_phys 7704 * 7705 * If the PHY has no outstanding work associated with it, remove it from 7706 * the dead PHY list and free it. 7707 * 7708 * If pwp->ds_err_recovering or pwp->configuring is set, don't run. 7709 * This keeps routines that need to submit work to the chip from having to 7710 * hold PHY locks to ensure that PHYs don't disappear while they do their work. 7711 */ 7712 void 7713 pmcs_handle_dead_phys(pmcs_hw_t *pwp) 7714 { 7715 pmcs_phy_t *phyp, *nphyp, *pphyp; 7716 7717 mutex_enter(&pwp->lock); 7718 mutex_enter(&pwp->config_lock); 7719 7720 if (pwp->configuring | pwp->ds_err_recovering) { 7721 mutex_exit(&pwp->config_lock); 7722 mutex_exit(&pwp->lock); 7723 return; 7724 } 7725 7726 /* 7727 * Check every PHY in the dead PHY list 7728 */ 7729 mutex_enter(&pwp->dead_phylist_lock); 7730 phyp = pwp->dead_phys; 7731 pphyp = NULL; /* Set previous PHY to NULL */ 7732 7733 while (phyp != NULL) { 7734 pmcs_lock_phy(phyp); 7735 ASSERT(phyp->dead); 7736 7737 nphyp = phyp->dead_next; 7738 7739 /* 7740 * Check for outstanding work 7741 */ 7742 if (phyp->ref_count > 0) { 7743 pmcs_unlock_phy(phyp); 7744 pphyp = phyp; /* This PHY becomes "previous" */ 7745 } else if (phyp->target) { 7746 pmcs_unlock_phy(phyp); 7747 pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, phyp->target, 7748 "%s: Not freeing PHY 0x%p: target 0x%p is not free", 7749 __func__, (void *)phyp, (void *)phyp->target); 7750 pphyp = phyp; 7751 } else { 7752 /* 7753 * No outstanding work or target references. Remove it 7754 * from the list and free it 7755 */ 7756 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 7757 "%s: Freeing inactive dead PHY 0x%p @ %s " 7758 "target = 0x%p", __func__, (void *)phyp, 7759 phyp->path, (void *)phyp->target); 7760 /* 7761 * If pphyp is NULL, then phyp was the head of the list, 7762 * so just reset the head to nphyp. Otherwise, the 7763 * previous PHY will now point to nphyp (the next PHY) 7764 */ 7765 if (pphyp == NULL) { 7766 pwp->dead_phys = nphyp; 7767 } else { 7768 pphyp->dead_next = nphyp; 7769 } 7770 /* 7771 * If the target still points to this PHY, remove 7772 * that linkage now. 7773 */ 7774 if (phyp->target) { 7775 mutex_enter(&phyp->target->statlock); 7776 if (phyp->target->phy == phyp) { 7777 phyp->target->phy = NULL; 7778 } 7779 mutex_exit(&phyp->target->statlock); 7780 } 7781 pmcs_unlock_phy(phyp); 7782 phyp->target_addr = NULL; 7783 kmem_cache_free(pwp->phy_cache, phyp); 7784 } 7785 7786 phyp = nphyp; 7787 } 7788 7789 mutex_exit(&pwp->dead_phylist_lock); 7790 mutex_exit(&pwp->config_lock); 7791 mutex_exit(&pwp->lock); 7792 } 7793 7794 void 7795 pmcs_inc_phy_ref_count(pmcs_phy_t *phyp) 7796 { 7797 atomic_inc_32(&phyp->ref_count); 7798 } 7799 7800 void 7801 pmcs_dec_phy_ref_count(pmcs_phy_t *phyp) 7802 { 7803 ASSERT(phyp->ref_count != 0); 7804 atomic_dec_32(&phyp->ref_count); 7805 } 7806 7807 /* 7808 * pmcs_reap_dead_phy 7809 * 7810 * This function is called from pmcs_new_tport when we have a PHY 7811 * without a target pointer. It's possible in that case that this PHY 7812 * may have a "brother" on the dead_phys list. That is, it may be the same as 7813 * this one but with a different root PHY number (e.g. pp05 vs. pp04). If 7814 * that's the case, update the dead PHY and this new PHY. If that's not the 7815 * case, we should get a tran_tgt_init on this after it's reported to SCSA. 7816 * 7817 * Called with PHY locked. 7818 */ 7819 static void 7820 pmcs_reap_dead_phy(pmcs_phy_t *phyp) 7821 { 7822 pmcs_hw_t *pwp = phyp->pwp; 7823 pmcs_phy_t *ctmp; 7824 pmcs_iport_t *iport_cmp; 7825 7826 ASSERT(mutex_owned(&phyp->phy_lock)); 7827 7828 /* 7829 * Check the dead PHYs list 7830 */ 7831 mutex_enter(&pwp->dead_phylist_lock); 7832 ctmp = pwp->dead_phys; 7833 while (ctmp) { 7834 /* 7835 * If the iport is NULL, compare against last_iport. 7836 */ 7837 if (ctmp->iport) { 7838 iport_cmp = ctmp->iport; 7839 } else { 7840 iport_cmp = ctmp->last_iport; 7841 } 7842 7843 if ((iport_cmp != phyp->iport) || 7844 (memcmp((void *)&ctmp->sas_address[0], 7845 (void *)&phyp->sas_address[0], 8))) { 7846 ctmp = ctmp->dead_next; 7847 continue; 7848 } 7849 7850 /* 7851 * Same SAS address on same iport. Now check to see if 7852 * the PHY path is the same with the possible exception 7853 * of the root PHY number. 7854 * The "5" is the string length of "pp00." 7855 */ 7856 if ((strnlen(phyp->path, 5) >= 5) && 7857 (strnlen(ctmp->path, 5) >= 5)) { 7858 if (memcmp((void *)&phyp->path[5], 7859 (void *)&ctmp->path[5], 7860 strnlen(phyp->path, 32) - 5) == 0) { 7861 break; 7862 } 7863 } 7864 7865 ctmp = ctmp->dead_next; 7866 } 7867 mutex_exit(&pwp->dead_phylist_lock); 7868 7869 /* 7870 * Found a match. Remove the target linkage and drop the 7871 * ref count on the old PHY. Then, increment the ref count 7872 * on the new PHY to compensate. 7873 */ 7874 if (ctmp) { 7875 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 7876 "%s: Found match in dead PHY list (0x%p) for new PHY %s", 7877 __func__, (void *)ctmp, phyp->path); 7878 /* 7879 * If there is a pointer to the target in the dead PHY, move 7880 * all reference counts to the new PHY. 7881 */ 7882 if (ctmp->target) { 7883 mutex_enter(&ctmp->target->statlock); 7884 phyp->target = ctmp->target; 7885 7886 while (ctmp->ref_count != 0) { 7887 pmcs_inc_phy_ref_count(phyp); 7888 pmcs_dec_phy_ref_count(ctmp); 7889 } 7890 /* 7891 * Update the target's linkage as well 7892 */ 7893 phyp->target->phy = phyp; 7894 phyp->target->dtype = phyp->dtype; 7895 ctmp->target = NULL; 7896 mutex_exit(&phyp->target->statlock); 7897 } 7898 } 7899 } 7900 7901 /* 7902 * Called with iport lock held 7903 */ 7904 void 7905 pmcs_add_phy_to_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp) 7906 { 7907 ASSERT(mutex_owned(&iport->lock)); 7908 ASSERT(phyp); 7909 ASSERT(!list_link_active(&phyp->list_node)); 7910 7911 iport->nphy++; 7912 list_insert_tail(&iport->phys, phyp); 7913 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 7914 &iport->nphy); 7915 mutex_enter(&phyp->phy_lock); 7916 pmcs_create_one_phy_stats(iport, phyp); 7917 mutex_exit(&phyp->phy_lock); 7918 pmcs_hold_iport(iport); 7919 } 7920 7921 /* 7922 * Called with the iport lock held 7923 */ 7924 void 7925 pmcs_remove_phy_from_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp) 7926 { 7927 pmcs_phy_t *pptr, *next_pptr; 7928 7929 ASSERT(mutex_owned(&iport->lock)); 7930 7931 /* 7932 * If phyp is NULL, remove all PHYs from the iport 7933 */ 7934 if (phyp == NULL) { 7935 for (pptr = list_head(&iport->phys); pptr != NULL; 7936 pptr = next_pptr) { 7937 next_pptr = list_next(&iport->phys, pptr); 7938 mutex_enter(&pptr->phy_lock); 7939 if (pptr->phy_stats != NULL) { 7940 kstat_delete(pptr->phy_stats); 7941 pptr->phy_stats = NULL; 7942 } 7943 pptr->iport = NULL; 7944 pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp, 7945 pptr->tgt_port_pm_tmp, B_FALSE); 7946 mutex_exit(&pptr->phy_lock); 7947 pmcs_rele_iport(iport); 7948 list_remove(&iport->phys, pptr); 7949 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, 7950 PMCS_NUM_PHYS, &iport->nphy); 7951 } 7952 iport->nphy = 0; 7953 return; 7954 } 7955 7956 ASSERT(phyp); 7957 ASSERT(iport->nphy > 0); 7958 ASSERT(list_link_active(&phyp->list_node)); 7959 iport->nphy--; 7960 list_remove(&iport->phys, phyp); 7961 pmcs_update_phy_pm_props(phyp, phyp->att_port_pm_tmp, 7962 phyp->tgt_port_pm_tmp, B_FALSE); 7963 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 7964 &iport->nphy); 7965 pmcs_rele_iport(iport); 7966 } 7967 7968 /* 7969 * This function checks to see if the target pointed to by phyp is still 7970 * correct. This is done by comparing the target's unit address with the 7971 * SAS address in phyp. 7972 * 7973 * Called with PHY locked and target statlock held 7974 */ 7975 static boolean_t 7976 pmcs_phy_target_match(pmcs_phy_t *phyp) 7977 { 7978 uint64_t wwn; 7979 char unit_address[PMCS_MAX_UA_SIZE]; 7980 boolean_t rval = B_FALSE; 7981 7982 ASSERT(phyp); 7983 ASSERT(phyp->target); 7984 ASSERT(mutex_owned(&phyp->phy_lock)); 7985 ASSERT(mutex_owned(&phyp->target->statlock)); 7986 7987 wwn = pmcs_barray2wwn(phyp->sas_address); 7988 (void) scsi_wwn_to_wwnstr(wwn, 1, unit_address); 7989 7990 if (memcmp((void *)unit_address, (void *)phyp->target->unit_address, 7991 strnlen(phyp->target->unit_address, PMCS_MAX_UA_SIZE)) == 0) { 7992 rval = B_TRUE; 7993 } 7994 7995 return (rval); 7996 } 7997 /* 7998 * Commands used to serialize SMP requests. 7999 * 8000 * The SPC only allows 2 SMP commands per SMP target: 1 cmd pending and 1 cmd 8001 * queued for the same SMP target. If a third SMP cmd is sent to the SPC for an 8002 * SMP target that already has a SMP cmd pending and one queued, then the 8003 * SPC responds with the ERROR_INTERNAL_SMP_RESOURCE response. 8004 * 8005 * Additionally, the SPC has an 8 entry deep cmd queue and the number of SMP 8006 * cmds that can be queued is controlled by the PORT_CONTROL IOMB. The 8007 * SPC default is 1 SMP command/port (iport). These 2 queued SMP cmds would 8008 * have to be for different SMP targets. The INTERNAL_SMP_RESOURCE error will 8009 * also be returned if a 2nd SMP cmd is sent to the controller when there is 8010 * already 1 SMP cmd queued for that port or if a 3rd SMP cmd is sent to the 8011 * queue if there are already 2 queued SMP cmds. 8012 */ 8013 void 8014 pmcs_smp_acquire(pmcs_iport_t *iport) 8015 { 8016 if (iport == NULL) { 8017 return; 8018 } 8019 8020 mutex_enter(&iport->smp_lock); 8021 while (iport->smp_active) { 8022 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 8023 "%s: SMP is active on thread 0x%p, waiting", __func__, 8024 (void *)iport->smp_active_thread); 8025 cv_wait(&iport->smp_cv, &iport->smp_lock); 8026 } 8027 iport->smp_active = B_TRUE; 8028 iport->smp_active_thread = curthread; 8029 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL, 8030 "%s: SMP acquired by thread 0x%p", __func__, 8031 (void *)iport->smp_active_thread); 8032 mutex_exit(&iport->smp_lock); 8033 } 8034 8035 void 8036 pmcs_smp_release(pmcs_iport_t *iport) 8037 { 8038 if (iport == NULL) { 8039 return; 8040 } 8041 8042 mutex_enter(&iport->smp_lock); 8043 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL, 8044 "%s: SMP released by thread 0x%p", __func__, (void *)curthread); 8045 iport->smp_active = B_FALSE; 8046 iport->smp_active_thread = NULL; 8047 cv_signal(&iport->smp_cv); 8048 mutex_exit(&iport->smp_lock); 8049 } 8050 8051 /* 8052 * Update a PHY's attached-port-pm and target-port-pm properties 8053 * 8054 * phyp: PHY whose properties are to be updated 8055 * 8056 * att_bv: Bit value of the attached-port-pm property to be updated in the 8057 * 64-bit holding area for the PHY. 8058 * 8059 * tgt_bv: Bit value of the target-port-pm property to update in the 64-bit 8060 * holding area for the PHY. 8061 * 8062 * prop_add_val: If TRUE, we're adding bits into the property value. 8063 * Otherwise, we're taking them out. Either way, the properties for this 8064 * PHY will be updated. 8065 */ 8066 void 8067 pmcs_update_phy_pm_props(pmcs_phy_t *phyp, uint64_t att_bv, uint64_t tgt_bv, 8068 boolean_t prop_add_val) 8069 { 8070 pmcs_xscsi_t *tgt; 8071 8072 if (prop_add_val) { 8073 /* 8074 * If the values are currently 0, then we're setting the 8075 * phymask for just this PHY as well. 8076 */ 8077 if (phyp->att_port_pm_tmp == 0) { 8078 phyp->att_port_pm = att_bv; 8079 phyp->tgt_port_pm = tgt_bv; 8080 } 8081 phyp->att_port_pm_tmp |= att_bv; 8082 phyp->tgt_port_pm_tmp |= tgt_bv; 8083 (void) snprintf(phyp->att_port_pm_str, PMCS_PM_MAX_NAMELEN, 8084 "%"PRIx64, phyp->att_port_pm_tmp); 8085 (void) snprintf(phyp->tgt_port_pm_str, PMCS_PM_MAX_NAMELEN, 8086 "%"PRIx64, phyp->tgt_port_pm_tmp); 8087 } else { 8088 phyp->att_port_pm_tmp &= ~att_bv; 8089 phyp->tgt_port_pm_tmp &= ~tgt_bv; 8090 if (phyp->att_port_pm_tmp) { 8091 (void) snprintf(phyp->att_port_pm_str, 8092 PMCS_PM_MAX_NAMELEN, "%"PRIx64, 8093 phyp->att_port_pm_tmp); 8094 } else { 8095 phyp->att_port_pm_str[0] = '\0'; 8096 phyp->att_port_pm = 0; 8097 } 8098 if (phyp->tgt_port_pm_tmp) { 8099 (void) snprintf(phyp->tgt_port_pm_str, 8100 PMCS_PM_MAX_NAMELEN, "%"PRIx64, 8101 phyp->tgt_port_pm_tmp); 8102 } else { 8103 phyp->tgt_port_pm_str[0] = '\0'; 8104 phyp->tgt_port_pm = 0; 8105 } 8106 } 8107 8108 if ((phyp->target_addr) && (*phyp->target_addr != NULL)) { 8109 tgt = *phyp->target_addr; 8110 } else if (phyp->target != NULL) { 8111 tgt = phyp->target; 8112 } else { 8113 return; 8114 } 8115 8116 mutex_enter(&tgt->statlock); 8117 if (!list_is_empty(&tgt->lun_list)) { 8118 pmcs_lun_t *lunp; 8119 8120 lunp = list_head(&tgt->lun_list); 8121 while (lunp) { 8122 (void) scsi_device_prop_update_string(lunp->sd, 8123 SCSI_DEVICE_PROP_PATH, 8124 SCSI_ADDR_PROP_ATTACHED_PORT_PM, 8125 phyp->att_port_pm_str); 8126 (void) scsi_device_prop_update_string(lunp->sd, 8127 SCSI_DEVICE_PROP_PATH, 8128 SCSI_ADDR_PROP_TARGET_PORT_PM, 8129 phyp->tgt_port_pm_str); 8130 lunp = list_next(&tgt->lun_list, lunp); 8131 } 8132 } else if (tgt->smpd) { 8133 (void) smp_device_prop_update_string(tgt->smpd, 8134 SCSI_ADDR_PROP_ATTACHED_PORT_PM, 8135 phyp->att_port_pm_str); 8136 (void) smp_device_prop_update_string(tgt->smpd, 8137 SCSI_ADDR_PROP_TARGET_PORT_PM, 8138 phyp->tgt_port_pm_str); 8139 } 8140 mutex_exit(&tgt->statlock); 8141 } 8142 8143 /* ARGSUSED */ 8144 void 8145 pmcs_deregister_device_work(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 8146 { 8147 pmcs_phy_t *pptr; 8148 8149 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 8150 pmcs_lock_phy(pptr); 8151 if (pptr->deregister_wait) { 8152 pmcs_deregister_device(pwp, pptr); 8153 } 8154 pmcs_unlock_phy(pptr); 8155 } 8156 } 8157 8158 /* 8159 * pmcs_iport_active 8160 * 8161 * Mark this iport as active. Called with the iport lock held. 8162 */ 8163 static void 8164 pmcs_iport_active(pmcs_iport_t *iport) 8165 { 8166 ASSERT(mutex_owned(&iport->lock)); 8167 8168 iport->ua_state = UA_ACTIVE; 8169 iport->smp_active = B_FALSE; 8170 iport->smp_active_thread = NULL; 8171 } 8172 8173 /* ARGSUSED */ 8174 static void 8175 pmcs_tgtmap_activate_cb(void *tgtmap_priv, char *tgt_addr, 8176 scsi_tgtmap_tgt_type_t tgt_type, void **tgt_privp) 8177 { 8178 pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv; 8179 pmcs_hw_t *pwp = iport->pwp; 8180 pmcs_xscsi_t *target; 8181 8182 /* 8183 * Look up the target. If there is one, and it doesn't have a PHY 8184 * pointer, re-establish that linkage here. 8185 */ 8186 mutex_enter(&pwp->lock); 8187 target = pmcs_get_target(iport, tgt_addr, B_FALSE); 8188 mutex_exit(&pwp->lock); 8189 8190 /* 8191 * If we got a target, it will now have a PHY pointer and the PHY 8192 * will point to the target. The PHY will be locked, so we'll need 8193 * to unlock it. 8194 */ 8195 if (target != NULL) { 8196 pmcs_unlock_phy(target->phy); 8197 } 8198 8199 /* 8200 * Update config_restart_time so we don't try to restart discovery 8201 * while enumeration is still in progress. 8202 */ 8203 mutex_enter(&pwp->config_lock); 8204 pwp->config_restart_time = ddi_get_lbolt() + 8205 drv_usectohz(PMCS_REDISCOVERY_DELAY); 8206 mutex_exit(&pwp->config_lock); 8207 } 8208 8209 /* ARGSUSED */ 8210 static boolean_t 8211 pmcs_tgtmap_deactivate_cb(void *tgtmap_priv, char *tgt_addr, 8212 scsi_tgtmap_tgt_type_t tgt_type, void *tgt_priv, 8213 scsi_tgtmap_deact_rsn_t tgt_deact_rsn) 8214 { 8215 pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv; 8216 pmcs_phy_t *phyp; 8217 boolean_t rediscover = B_FALSE; 8218 8219 ASSERT(iport); 8220 8221 phyp = pmcs_find_phy_by_sas_address(iport->pwp, iport, NULL, tgt_addr); 8222 if (phyp == NULL) { 8223 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 8224 "%s: Couldn't find PHY at %s", __func__, tgt_addr); 8225 return (rediscover); 8226 } 8227 /* phyp is locked */ 8228 8229 if (!phyp->reenumerate && phyp->configured) { 8230 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, phyp->target, 8231 "%s: PHY @ %s is configured... re-enumerate", __func__, 8232 tgt_addr); 8233 phyp->reenumerate = 1; 8234 } 8235 8236 /* 8237 * Check to see if reenumerate is set, and if so, if we've reached our 8238 * maximum number of retries. 8239 */ 8240 if (phyp->reenumerate) { 8241 if (phyp->enum_attempts == PMCS_MAX_REENUMERATE) { 8242 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, 8243 phyp->target, 8244 "%s: No more enumeration attempts for %s", __func__, 8245 tgt_addr); 8246 } else { 8247 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, 8248 phyp->target, "%s: Re-attempt enumeration for %s", 8249 __func__, tgt_addr); 8250 ++phyp->enum_attempts; 8251 rediscover = B_TRUE; 8252 } 8253 8254 phyp->reenumerate = 0; 8255 } 8256 8257 pmcs_unlock_phy(phyp); 8258 8259 mutex_enter(&iport->pwp->config_lock); 8260 iport->pwp->config_restart_time = ddi_get_lbolt() + 8261 drv_usectohz(PMCS_REDISCOVERY_DELAY); 8262 if (rediscover) { 8263 iport->pwp->config_restart = B_TRUE; 8264 } else if (iport->pwp->config_restart == B_TRUE) { 8265 /* 8266 * If we aren't asking for rediscovery because of this PHY, 8267 * check to see if we're already asking for it on behalf of 8268 * some other PHY. If so, we'll want to return TRUE, so reset 8269 * "rediscover" here. 8270 */ 8271 rediscover = B_TRUE; 8272 } 8273 8274 mutex_exit(&iport->pwp->config_lock); 8275 8276 return (rediscover); 8277 } 8278 8279 void 8280 pmcs_status_disposition(pmcs_phy_t *phyp, uint32_t status) 8281 { 8282 ASSERT(phyp); 8283 ASSERT(!mutex_owned(&phyp->phy_lock)); 8284 8285 if (phyp == NULL) { 8286 return; 8287 } 8288 8289 pmcs_lock_phy(phyp); 8290 8291 /* 8292 * XXX: Do we need to call this function from an SSP_EVENT? 8293 */ 8294 8295 switch (status) { 8296 case PMCOUT_STATUS_NO_DEVICE: 8297 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 8298 case PMCOUT_STATUS_XFER_ERR_BREAK: 8299 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 8300 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 8301 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 8302 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 8303 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 8304 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 8305 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 8306 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 8307 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 8308 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 8309 case PMCOUT_STATUS_XFER_ERROR_RX_FRAME: 8310 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 8311 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 8312 case PMCOUT_STATUS_IO_PORT_IN_RESET: 8313 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 8314 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 8315 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 8316 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 8317 "%s: status = 0x%x for " SAS_ADDR_FMT ", reenumerate", 8318 __func__, status, SAS_ADDR_PRT(phyp->sas_address)); 8319 phyp->reenumerate = 1; 8320 break; 8321 8322 default: 8323 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 8324 "%s: status = 0x%x for " SAS_ADDR_FMT ", no reenumeration", 8325 __func__, status, SAS_ADDR_PRT(phyp->sas_address)); 8326 break; 8327 } 8328 8329 pmcs_unlock_phy(phyp); 8330 } 8331 8332 /* 8333 * Add the list of PHYs pointed to by phyp to the dead_phys_list 8334 * 8335 * Called with all PHYs in the list locked 8336 */ 8337 static void 8338 pmcs_add_dead_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 8339 { 8340 mutex_enter(&pwp->dead_phylist_lock); 8341 while (phyp) { 8342 pmcs_phy_t *nxt = phyp->sibling; 8343 ASSERT(phyp->dead); 8344 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL, 8345 "%s: dead PHY 0x%p (%s) (ref_count %d)", __func__, 8346 (void *)phyp, phyp->path, phyp->ref_count); 8347 /* 8348 * Put this PHY on the dead PHY list for the watchdog to 8349 * clean up after any outstanding work has completed. 8350 */ 8351 phyp->dead_next = pwp->dead_phys; 8352 pwp->dead_phys = phyp; 8353 pmcs_unlock_phy(phyp); 8354 phyp = nxt; 8355 } 8356 mutex_exit(&pwp->dead_phylist_lock); 8357 } 8358 8359 static void 8360 pmcs_get_fw_version(pmcs_hw_t *pwp) 8361 { 8362 uint32_t ila_len, ver_hi, ver_lo; 8363 uint8_t ila_ver_string[9], img_flag; 8364 char uc, *ucp = &uc; 8365 unsigned long ila_ver; 8366 uint64_t ver_hilo; 8367 8368 /* Firmware version is easy. */ 8369 pwp->fw = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FW); 8370 8371 /* 8372 * Get the image size (2nd to last dword) 8373 * NOTE: The GSM registers are mapped little-endian, but the data 8374 * on the flash is actually big-endian, so we need to swap these values 8375 * regardless of which platform we're on. 8376 */ 8377 ila_len = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8378 GSM_FLASH_BASE + GSM_SM_BLKSZ - (2 << 2))); 8379 if (ila_len > 65535) { 8380 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 8381 "%s: Invalid ILA image size (0x%x)?", __func__, ila_len); 8382 return; 8383 } 8384 8385 /* 8386 * The numeric version is at ila_len - PMCS_ILA_VER_OFFSET 8387 */ 8388 ver_hi = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8389 GSM_FLASH_BASE + ila_len - PMCS_ILA_VER_OFFSET)); 8390 ver_lo = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8391 GSM_FLASH_BASE + ila_len - PMCS_ILA_VER_OFFSET + 4)); 8392 ver_hilo = BE_64(((uint64_t)ver_hi << 32) | ver_lo); 8393 bcopy((const void *)&ver_hilo, &ila_ver_string[0], 8); 8394 ila_ver_string[8] = '\0'; 8395 8396 (void) ddi_strtoul((const char *)ila_ver_string, &ucp, 16, &ila_ver); 8397 pwp->ila_ver = (int)(ila_ver & 0xffffffff); 8398 8399 img_flag = (BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8400 GSM_FLASH_IMG_FLAGS)) & 0xff000000) >> 24; 8401 if (img_flag & PMCS_IMG_FLAG_A) { 8402 pwp->fw_active_img = 1; 8403 } else { 8404 pwp->fw_active_img = 0; 8405 } 8406 } 8407