1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * This file contains various support routines. 27 */ 28 29 #include <sys/scsi/adapters/pmcs/pmcs.h> 30 31 /* 32 * Local static data 33 */ 34 static int tgtmap_stable_usec = MICROSEC; /* 1 second */ 35 static int tgtmap_csync_usec = 10 * MICROSEC; /* 10 seconds */ 36 37 /* 38 * SAS Topology Configuration 39 */ 40 static void pmcs_new_tport(pmcs_hw_t *, pmcs_phy_t *); 41 static void pmcs_configure_expander(pmcs_hw_t *, pmcs_phy_t *, pmcs_iport_t *); 42 43 static void pmcs_check_expanders(pmcs_hw_t *, pmcs_phy_t *); 44 static void pmcs_check_expander(pmcs_hw_t *, pmcs_phy_t *); 45 static void pmcs_clear_expander(pmcs_hw_t *, pmcs_phy_t *, int); 46 47 static int pmcs_expander_get_nphy(pmcs_hw_t *, pmcs_phy_t *); 48 static int pmcs_expander_content_discover(pmcs_hw_t *, pmcs_phy_t *, 49 pmcs_phy_t *); 50 51 static int pmcs_smp_function_result(pmcs_hw_t *, smp_response_frame_t *); 52 static boolean_t pmcs_validate_devid(pmcs_phy_t *, pmcs_phy_t *, uint32_t); 53 static void pmcs_clear_phys(pmcs_hw_t *, pmcs_phy_t *); 54 static int pmcs_configure_new_devices(pmcs_hw_t *, pmcs_phy_t *); 55 static void pmcs_begin_observations(pmcs_hw_t *); 56 static void pmcs_flush_observations(pmcs_hw_t *); 57 static boolean_t pmcs_report_observations(pmcs_hw_t *); 58 static boolean_t pmcs_report_iport_observations(pmcs_hw_t *, pmcs_iport_t *, 59 pmcs_phy_t *); 60 static pmcs_phy_t *pmcs_find_phy_needing_work(pmcs_hw_t *, pmcs_phy_t *); 61 static int pmcs_kill_devices(pmcs_hw_t *, pmcs_phy_t *); 62 static void pmcs_lock_phy_impl(pmcs_phy_t *, int); 63 static void pmcs_unlock_phy_impl(pmcs_phy_t *, int); 64 static pmcs_phy_t *pmcs_clone_phy(pmcs_phy_t *); 65 static boolean_t pmcs_configure_phy(pmcs_hw_t *, pmcs_phy_t *); 66 static void pmcs_reap_dead_phy(pmcs_phy_t *); 67 static pmcs_iport_t *pmcs_get_iport_by_ua(pmcs_hw_t *, char *); 68 static boolean_t pmcs_phy_target_match(pmcs_phy_t *); 69 static void pmcs_iport_active(pmcs_iport_t *); 70 static void pmcs_tgtmap_activate_cb(void *, char *, scsi_tgtmap_tgt_type_t, 71 void **); 72 static boolean_t pmcs_tgtmap_deactivate_cb(void *, char *, 73 scsi_tgtmap_tgt_type_t, void *, scsi_tgtmap_deact_rsn_t); 74 static void pmcs_add_dead_phys(pmcs_hw_t *, pmcs_phy_t *); 75 static void pmcs_get_fw_version(pmcs_hw_t *); 76 static int pmcs_get_time_stamp(pmcs_hw_t *, uint64_t *, hrtime_t *); 77 78 /* 79 * Often used strings 80 */ 81 const char pmcs_nowrk[] = "%s: unable to get work structure"; 82 const char pmcs_nomsg[] = "%s: unable to get Inbound Message entry"; 83 const char pmcs_timeo[] = "%s: command timed out"; 84 85 extern const ddi_dma_attr_t pmcs_dattr; 86 extern kmutex_t pmcs_trace_lock; 87 88 /* 89 * Some Initial setup steps. 90 */ 91 92 int 93 pmcs_setup(pmcs_hw_t *pwp) 94 { 95 uint32_t barval = pwp->mpibar; 96 uint32_t i, scratch, regbar, regoff, barbar, baroff; 97 uint32_t new_ioq_depth, ferr = 0; 98 99 /* 100 * Check current state. If we're not at READY state, 101 * we can't go further. 102 */ 103 scratch = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 104 if ((scratch & PMCS_MSGU_AAP_STATE_MASK) == PMCS_MSGU_AAP_STATE_ERROR) { 105 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 106 "%s: AAP Error State (0x%x)", 107 __func__, pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 108 PMCS_MSGU_AAP_ERROR_MASK); 109 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE); 110 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 111 return (-1); 112 } 113 if ((scratch & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) { 114 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 115 "%s: AAP unit not ready (state 0x%x)", 116 __func__, scratch & PMCS_MSGU_AAP_STATE_MASK); 117 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE); 118 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 119 return (-1); 120 } 121 122 /* 123 * Read the offset from the Message Unit scratchpad 0 register. 124 * This allows us to read the MPI Configuration table. 125 * 126 * Check its signature for validity. 127 */ 128 baroff = barval; 129 barbar = barval >> PMCS_MSGU_MPI_BAR_SHIFT; 130 baroff &= PMCS_MSGU_MPI_OFFSET_MASK; 131 132 regoff = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0); 133 regbar = regoff >> PMCS_MSGU_MPI_BAR_SHIFT; 134 regoff &= PMCS_MSGU_MPI_OFFSET_MASK; 135 136 if (regoff > baroff) { 137 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 138 "%s: bad MPI Table Length (register offset=0x%08x, " 139 "passed offset=0x%08x)", __func__, regoff, baroff); 140 return (-1); 141 } 142 if (regbar != barbar) { 143 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 144 "%s: bad MPI BAR (register BAROFF=0x%08x, " 145 "passed BAROFF=0x%08x)", __func__, regbar, barbar); 146 return (-1); 147 } 148 pwp->mpi_offset = regoff; 149 if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS) != PMCS_SIGNATURE) { 150 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 151 "%s: Bad MPI Configuration Table Signature 0x%x", __func__, 152 pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS)); 153 return (-1); 154 } 155 156 if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR) != PMCS_MPI_REVISION1) { 157 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 158 "%s: Bad MPI Configuration Revision 0x%x", __func__, 159 pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR)); 160 return (-1); 161 } 162 163 /* 164 * Generate offsets for the General System, Inbound Queue Configuration 165 * and Outbound Queue configuration tables. This way the macros to 166 * access those tables will work correctly. 167 */ 168 pwp->mpi_gst_offset = 169 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_GSTO); 170 pwp->mpi_iqc_offset = 171 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IQCTO); 172 pwp->mpi_oqc_offset = 173 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_OQCTO); 174 175 pmcs_get_fw_version(pwp); 176 177 pwp->max_cmd = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MOIO); 178 pwp->max_dev = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO0) >> 16; 179 180 pwp->max_iq = PMCS_MNIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 181 pwp->max_oq = PMCS_MNOQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 182 pwp->nphy = PMCS_NPHY(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)); 183 if (pwp->max_iq <= PMCS_NIQ) { 184 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 185 "%s: not enough Inbound Queues supported " 186 "(need %d, max_oq=%d)", __func__, pwp->max_iq, PMCS_NIQ); 187 return (-1); 188 } 189 if (pwp->max_oq <= PMCS_NOQ) { 190 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 191 "%s: not enough Outbound Queues supported " 192 "(need %d, max_oq=%d)", __func__, pwp->max_oq, PMCS_NOQ); 193 return (-1); 194 } 195 if (pwp->nphy == 0) { 196 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 197 "%s: zero phys reported", __func__); 198 return (-1); 199 } 200 if (PMCS_HPIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1))) { 201 pwp->hipri_queue = (1 << PMCS_IQ_OTHER); 202 } 203 204 205 for (i = 0; i < pwp->nphy; i++) { 206 PMCS_MPI_EVQSET(pwp, PMCS_OQ_EVENTS, i); 207 PMCS_MPI_NCQSET(pwp, PMCS_OQ_EVENTS, i); 208 } 209 210 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_INFO2, 211 (PMCS_OQ_EVENTS << GENERAL_EVENT_OQ_SHIFT) | 212 (PMCS_OQ_EVENTS << DEVICE_HANDLE_REMOVED_SHIFT)); 213 214 /* 215 * Verify that ioq_depth is valid (> 0 and not so high that it 216 * would cause us to overrun the chip with commands). 217 */ 218 if (pwp->ioq_depth == 0) { 219 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 220 "%s: I/O queue depth set to 0. Setting to %d", 221 __func__, PMCS_NQENTRY); 222 pwp->ioq_depth = PMCS_NQENTRY; 223 } 224 225 if (pwp->ioq_depth < PMCS_MIN_NQENTRY) { 226 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 227 "%s: I/O queue depth set too low (%d). Setting to %d", 228 __func__, pwp->ioq_depth, PMCS_MIN_NQENTRY); 229 pwp->ioq_depth = PMCS_MIN_NQENTRY; 230 } 231 232 if (pwp->ioq_depth > (pwp->max_cmd / (PMCS_IO_IQ_MASK + 1))) { 233 new_ioq_depth = pwp->max_cmd / (PMCS_IO_IQ_MASK + 1); 234 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 235 "%s: I/O queue depth set too high (%d). Setting to %d", 236 __func__, pwp->ioq_depth, new_ioq_depth); 237 pwp->ioq_depth = new_ioq_depth; 238 } 239 240 /* 241 * Allocate consistent memory for OQs and IQs. 242 */ 243 pwp->iqp_dma_attr = pwp->oqp_dma_attr = pmcs_dattr; 244 pwp->iqp_dma_attr.dma_attr_align = 245 pwp->oqp_dma_attr.dma_attr_align = PMCS_QENTRY_SIZE; 246 247 /* 248 * The Rev C chip has the ability to do PIO to or from consistent 249 * memory anywhere in a 64 bit address space, but the firmware is 250 * not presently set up to do so. 251 */ 252 pwp->iqp_dma_attr.dma_attr_addr_hi = 253 pwp->oqp_dma_attr.dma_attr_addr_hi = 0x000000FFFFFFFFFFull; 254 255 for (i = 0; i < PMCS_NIQ; i++) { 256 if (pmcs_dma_setup(pwp, &pwp->iqp_dma_attr, 257 &pwp->iqp_acchdls[i], 258 &pwp->iqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth, 259 (caddr_t *)&pwp->iqp[i], &pwp->iqaddr[i]) == B_FALSE) { 260 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 261 "Failed to setup DMA for iqp[%d]", i); 262 return (-1); 263 } 264 bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 265 } 266 267 for (i = 0; i < PMCS_NOQ; i++) { 268 if (pmcs_dma_setup(pwp, &pwp->oqp_dma_attr, 269 &pwp->oqp_acchdls[i], 270 &pwp->oqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth, 271 (caddr_t *)&pwp->oqp[i], &pwp->oqaddr[i]) == B_FALSE) { 272 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 273 "Failed to setup DMA for oqp[%d]", i); 274 return (-1); 275 } 276 bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 277 } 278 279 /* 280 * Install the IQ and OQ addresses (and null out the rest). 281 */ 282 for (i = 0; i < pwp->max_iq; i++) { 283 pwp->iqpi_offset[i] = pmcs_rd_iqc_tbl(pwp, PMCS_IQPIOFFX(i)); 284 if (i < PMCS_NIQ) { 285 if (i != PMCS_IQ_OTHER) { 286 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 287 pwp->ioq_depth | (PMCS_QENTRY_SIZE << 16)); 288 } else { 289 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 290 (1 << 30) | pwp->ioq_depth | 291 (PMCS_QENTRY_SIZE << 16)); 292 } 293 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 294 DWORD1(pwp->iqaddr[i])); 295 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 296 DWORD0(pwp->iqaddr[i])); 297 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 298 DWORD1(pwp->ciaddr+IQ_OFFSET(i))); 299 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 300 DWORD0(pwp->ciaddr+IQ_OFFSET(i))); 301 } else { 302 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0); 303 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0); 304 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0); 305 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0); 306 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0); 307 } 308 } 309 310 for (i = 0; i < pwp->max_oq; i++) { 311 pwp->oqci_offset[i] = pmcs_rd_oqc_tbl(pwp, PMCS_OQCIOFFX(i)); 312 if (i < PMCS_NOQ) { 313 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), pwp->ioq_depth | 314 (PMCS_QENTRY_SIZE << 16) | OQIEX); 315 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 316 DWORD1(pwp->oqaddr[i])); 317 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 318 DWORD0(pwp->oqaddr[i])); 319 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 320 DWORD1(pwp->ciaddr+OQ_OFFSET(i))); 321 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 322 DWORD0(pwp->ciaddr+OQ_OFFSET(i))); 323 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 324 pwp->oqvec[i] << 24); 325 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 326 } else { 327 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0); 328 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0); 329 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0); 330 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0); 331 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0); 332 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0); 333 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 334 } 335 } 336 337 /* 338 * Set up logging, if defined. 339 */ 340 if (pwp->fwlog) { 341 uint64_t logdma = pwp->fwaddr; 342 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAH, DWORD1(logdma)); 343 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAL, DWORD0(logdma)); 344 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBS, PMCS_FWLOG_SIZE >> 1); 345 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELSEV, pwp->fwlog); 346 logdma += (PMCS_FWLOG_SIZE >> 1); 347 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAH, DWORD1(logdma)); 348 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAL, DWORD0(logdma)); 349 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBS, PMCS_FWLOG_SIZE >> 1); 350 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELSEV, pwp->fwlog); 351 } 352 353 /* 354 * Interrupt vectors, outbound queues, and odb_auto_clear 355 * 356 * MSI/MSI-X: 357 * If we got 4 interrupt vectors, we'll assign one to each outbound 358 * queue as well as the fatal interrupt, and auto clear can be set 359 * for each. 360 * 361 * If we only got 2 vectors, one will be used for I/O completions 362 * and the other for the other two vectors. In this case, auto_ 363 * clear can only be set for I/Os, which is fine. The fatal 364 * interrupt will be mapped to the PMCS_FATAL_INTERRUPT bit, which 365 * is not an interrupt vector. 366 * 367 * MSI/MSI-X/INT-X: 368 * If we only got 1 interrupt vector, auto_clear must be set to 0, 369 * and again the fatal interrupt will be mapped to the 370 * PMCS_FATAL_INTERRUPT bit (again, not an interrupt vector). 371 */ 372 373 switch (pwp->int_type) { 374 case PMCS_INT_MSIX: 375 case PMCS_INT_MSI: 376 switch (pwp->intr_cnt) { 377 case 1: 378 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 379 (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 380 pwp->odb_auto_clear = 0; 381 break; 382 case 2: 383 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 384 (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 385 pwp->odb_auto_clear = (1 << PMCS_FATAL_INTERRUPT) | 386 (1 << PMCS_MSIX_IODONE); 387 break; 388 case 4: 389 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE | 390 (PMCS_MSIX_FATAL << PMCS_FERIV_SHIFT)); 391 pwp->odb_auto_clear = (1 << PMCS_MSIX_FATAL) | 392 (1 << PMCS_MSIX_GENERAL) | (1 << PMCS_MSIX_IODONE) | 393 (1 << PMCS_MSIX_EVENTS); 394 break; 395 } 396 break; 397 398 case PMCS_INT_FIXED: 399 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 400 PMCS_FERRIE | (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT)); 401 pwp->odb_auto_clear = 0; 402 break; 403 } 404 405 /* 406 * If the open retry interval is non-zero, set it. 407 */ 408 if (pwp->open_retry_interval != 0) { 409 int phynum; 410 411 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 412 "%s: Setting open retry interval to %d usecs", __func__, 413 pwp->open_retry_interval); 414 for (phynum = 0; phynum < pwp->nphy; phynum ++) { 415 pmcs_wr_gsm_reg(pwp, OPEN_RETRY_INTERVAL(phynum), 416 pwp->open_retry_interval); 417 } 418 } 419 420 /* 421 * Enable Interrupt Reassertion 422 * Default Delay 1000us 423 */ 424 ferr = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FERR); 425 if ((ferr & PMCS_MPI_IRAE) == 0) { 426 ferr &= ~(PMCS_MPI_IRAU | PMCS_MPI_IRAD_MASK); 427 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, ferr | PMCS_MPI_IRAE); 428 } 429 430 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, pwp->odb_auto_clear); 431 pwp->mpi_table_setup = 1; 432 return (0); 433 } 434 435 /* 436 * Start the Message Passing protocol with the PMC chip. 437 */ 438 int 439 pmcs_start_mpi(pmcs_hw_t *pwp) 440 { 441 int i; 442 443 pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPIINI); 444 for (i = 0; i < 1000; i++) { 445 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & 446 PMCS_MSGU_IBDB_MPIINI) == 0) { 447 break; 448 } 449 drv_usecwait(1000); 450 } 451 if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPIINI) { 452 return (-1); 453 } 454 drv_usecwait(500000); 455 456 /* 457 * Check to make sure we got to INIT state. 458 */ 459 if (PMCS_MPI_S(pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE)) != 460 PMCS_MPI_STATE_INIT) { 461 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 462 "%s: MPI launch failed (GST 0x%x DBCLR 0x%x)", __func__, 463 pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE), 464 pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB_CLEAR)); 465 return (-1); 466 } 467 return (0); 468 } 469 470 /* 471 * Stop the Message Passing protocol with the PMC chip. 472 */ 473 int 474 pmcs_stop_mpi(pmcs_hw_t *pwp) 475 { 476 int i; 477 478 for (i = 0; i < pwp->max_iq; i++) { 479 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0); 480 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0); 481 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0); 482 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0); 483 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0); 484 } 485 for (i = 0; i < pwp->max_oq; i++) { 486 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0); 487 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0); 488 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0); 489 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0); 490 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0); 491 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0); 492 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0); 493 } 494 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 0); 495 pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPICTU); 496 for (i = 0; i < 2000; i++) { 497 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & 498 PMCS_MSGU_IBDB_MPICTU) == 0) { 499 break; 500 } 501 drv_usecwait(1000); 502 } 503 if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPICTU) { 504 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 505 "%s: MPI stop failed", __func__); 506 return (-1); 507 } 508 return (0); 509 } 510 511 /* 512 * Do a sequence of ECHO messages to test for MPI functionality, 513 * all inbound and outbound queue functionality and interrupts. 514 */ 515 int 516 pmcs_echo_test(pmcs_hw_t *pwp) 517 { 518 echo_test_t fred; 519 struct pmcwork *pwrk; 520 uint32_t *msg, count; 521 int iqe = 0, iqo = 0, result, rval = 0; 522 int iterations; 523 hrtime_t echo_start, echo_end, echo_total; 524 525 ASSERT(pwp->max_cmd > 0); 526 527 /* 528 * We want iterations to be max_cmd * 3 to ensure that we run the 529 * echo test enough times to iterate through every inbound queue 530 * at least twice. 531 */ 532 iterations = pwp->max_cmd * 3; 533 534 echo_total = 0; 535 count = 0; 536 537 while (count < iterations) { 538 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 539 if (pwrk == NULL) { 540 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 541 pmcs_nowrk, __func__); 542 rval = -1; 543 break; 544 } 545 546 mutex_enter(&pwp->iqp_lock[iqe]); 547 msg = GET_IQ_ENTRY(pwp, iqe); 548 if (msg == NULL) { 549 mutex_exit(&pwp->iqp_lock[iqe]); 550 pmcs_pwork(pwp, pwrk); 551 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 552 pmcs_nomsg, __func__); 553 rval = -1; 554 break; 555 } 556 557 bzero(msg, PMCS_QENTRY_SIZE); 558 559 if (iqe == PMCS_IQ_OTHER) { 560 /* This is on the high priority queue */ 561 msg[0] = LE_32(PMCS_HIPRI(pwp, iqo, PMCIN_ECHO)); 562 } else { 563 msg[0] = LE_32(PMCS_IOMB_IN_SAS(iqo, PMCIN_ECHO)); 564 } 565 msg[1] = LE_32(pwrk->htag); 566 fred.signature = 0xdeadbeef; 567 fred.count = count; 568 fred.ptr = &count; 569 (void) memcpy(&msg[2], &fred, sizeof (fred)); 570 pwrk->state = PMCS_WORK_STATE_ONCHIP; 571 572 INC_IQ_ENTRY(pwp, iqe); 573 574 echo_start = gethrtime(); 575 DTRACE_PROBE2(pmcs__echo__test__wait__start, 576 hrtime_t, echo_start, uint32_t, pwrk->htag); 577 578 if (++iqe == PMCS_NIQ) { 579 iqe = 0; 580 } 581 if (++iqo == PMCS_NOQ) { 582 iqo = 0; 583 } 584 585 WAIT_FOR(pwrk, 250, result); 586 587 echo_end = gethrtime(); 588 DTRACE_PROBE2(pmcs__echo__test__wait__end, 589 hrtime_t, echo_end, int, result); 590 591 echo_total += (echo_end - echo_start); 592 593 pmcs_pwork(pwp, pwrk); 594 if (result) { 595 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 596 "%s: command timed out on echo test #%d", 597 __func__, count); 598 rval = -1; 599 break; 600 } 601 } 602 603 /* 604 * The intr_threshold is adjusted by PMCS_INTR_THRESHOLD in order to 605 * remove the overhead of things like the delay in getting signaled 606 * for completion. 607 */ 608 if (echo_total != 0) { 609 pwp->io_intr_coal.intr_latency = 610 (echo_total / iterations) / 2; 611 pwp->io_intr_coal.intr_threshold = 612 PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 / 613 pwp->io_intr_coal.intr_latency); 614 } 615 616 return (rval); 617 } 618 619 /* 620 * Start the (real) phys 621 */ 622 int 623 pmcs_start_phy(pmcs_hw_t *pwp, int phynum, int linkmode, int speed) 624 { 625 int result; 626 uint32_t *msg; 627 struct pmcwork *pwrk; 628 pmcs_phy_t *pptr; 629 sas_identify_af_t sap; 630 631 mutex_enter(&pwp->lock); 632 pptr = pwp->root_phys + phynum; 633 if (pptr == NULL) { 634 mutex_exit(&pwp->lock); 635 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 636 "%s: cannot find port %d", __func__, phynum); 637 return (0); 638 } 639 640 pmcs_lock_phy(pptr); 641 mutex_exit(&pwp->lock); 642 643 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 644 if (pwrk == NULL) { 645 pmcs_unlock_phy(pptr); 646 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 647 return (-1); 648 } 649 650 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 651 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 652 653 if (msg == NULL) { 654 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 655 pmcs_unlock_phy(pptr); 656 pmcs_pwork(pwp, pwrk); 657 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 658 return (-1); 659 } 660 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_START)); 661 msg[1] = LE_32(pwrk->htag); 662 msg[2] = LE_32(linkmode | speed | phynum); 663 bzero(&sap, sizeof (sap)); 664 sap.device_type = SAS_IF_DTYPE_ENDPOINT; 665 sap.ssp_ini_port = 1; 666 667 if (pwp->separate_ports) { 668 pmcs_wwn2barray(pwp->sas_wwns[phynum], sap.sas_address); 669 } else { 670 pmcs_wwn2barray(pwp->sas_wwns[0], sap.sas_address); 671 } 672 673 ASSERT(phynum < SAS2_PHYNUM_MAX); 674 sap.phy_identifier = phynum & SAS2_PHYNUM_MASK; 675 (void) memcpy(&msg[3], &sap, sizeof (sas_identify_af_t)); 676 pwrk->state = PMCS_WORK_STATE_ONCHIP; 677 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 678 679 pptr->state.prog_min_rate = (lowbit((ulong_t)speed) - 1); 680 pptr->state.prog_max_rate = (highbit((ulong_t)speed) - 1); 681 pptr->state.hw_min_rate = PMCS_HW_MIN_LINK_RATE; 682 pptr->state.hw_max_rate = PMCS_HW_MAX_LINK_RATE; 683 684 pmcs_unlock_phy(pptr); 685 WAIT_FOR(pwrk, 1000, result); 686 pmcs_pwork(pwp, pwrk); 687 688 if (result) { 689 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 690 } else { 691 mutex_enter(&pwp->lock); 692 pwp->phys_started |= (1 << phynum); 693 mutex_exit(&pwp->lock); 694 } 695 696 return (0); 697 } 698 699 int 700 pmcs_start_phys(pmcs_hw_t *pwp) 701 { 702 int i, rval; 703 704 for (i = 0; i < pwp->nphy; i++) { 705 if ((pwp->phyid_block_mask & (1 << i)) == 0) { 706 if (pmcs_start_phy(pwp, i, 707 (pwp->phymode << PHY_MODE_SHIFT), 708 pwp->physpeed << PHY_LINK_SHIFT)) { 709 return (-1); 710 } 711 if (pmcs_clear_diag_counters(pwp, i)) { 712 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 713 "%s: failed to reset counters on PHY (%d)", 714 __func__, i); 715 } 716 } 717 } 718 719 rval = pmcs_get_time_stamp(pwp, &pwp->fw_timestamp, &pwp->hrtimestamp); 720 if (rval) { 721 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 722 "%s: Failed to obtain firmware timestamp", __func__); 723 } else { 724 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 725 "Firmware timestamp: 0x%" PRIx64, pwp->fw_timestamp); 726 } 727 728 return (0); 729 } 730 731 /* 732 * Called with PHY locked 733 */ 734 int 735 pmcs_reset_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t type) 736 { 737 uint32_t *msg; 738 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 739 const char *mbar; 740 uint32_t amt; 741 uint32_t pdevid; 742 uint32_t stsoff; 743 uint32_t status; 744 int result, level, phynum; 745 struct pmcwork *pwrk; 746 uint32_t htag; 747 748 ASSERT(mutex_owned(&pptr->phy_lock)); 749 750 bzero(iomb, PMCS_QENTRY_SIZE); 751 phynum = pptr->phynum; 752 level = pptr->level; 753 if (level > 0) { 754 pdevid = pptr->parent->device_id; 755 } else if ((level == 0) && (pptr->dtype == EXPANDER)) { 756 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 757 "%s: Not resetting HBA PHY @ %s", __func__, pptr->path); 758 return (0); 759 } 760 761 if (!pptr->iport || !pptr->valid_device_id) { 762 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 763 "%s: Can't reach PHY %s", __func__, pptr->path); 764 return (0); 765 } 766 767 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 768 769 if (pwrk == NULL) { 770 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 771 return (ENOMEM); 772 } 773 774 pwrk->arg = iomb; 775 776 /* 777 * If level > 0, we need to issue an SMP_REQUEST with a PHY_CONTROL 778 * function to do either a link reset or hard reset. If level == 0, 779 * then we do a LOCAL_PHY_CONTROL IOMB to do link/hard reset to the 780 * root (local) PHY 781 */ 782 if (level) { 783 stsoff = 2; 784 iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 785 PMCIN_SMP_REQUEST)); 786 iomb[1] = LE_32(pwrk->htag); 787 iomb[2] = LE_32(pdevid); 788 iomb[3] = LE_32(40 << SMP_REQUEST_LENGTH_SHIFT); 789 /* 790 * Send SMP PHY CONTROL/HARD or LINK RESET 791 */ 792 iomb[4] = BE_32(0x40910000); 793 iomb[5] = 0; 794 795 if (type == PMCS_PHYOP_HARD_RESET) { 796 mbar = "SMP PHY CONTROL/HARD RESET"; 797 iomb[6] = BE_32((phynum << 24) | 798 (PMCS_PHYOP_HARD_RESET << 16)); 799 } else { 800 mbar = "SMP PHY CONTROL/LINK RESET"; 801 iomb[6] = BE_32((phynum << 24) | 802 (PMCS_PHYOP_LINK_RESET << 16)); 803 } 804 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 805 "%s: sending %s to %s for phy 0x%x", 806 __func__, mbar, pptr->parent->path, pptr->phynum); 807 amt = 7; 808 } else { 809 /* 810 * Unlike most other Outbound messages, status for 811 * a local phy operation is in DWORD 3. 812 */ 813 stsoff = 3; 814 iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 815 PMCIN_LOCAL_PHY_CONTROL)); 816 iomb[1] = LE_32(pwrk->htag); 817 if (type == PMCS_PHYOP_LINK_RESET) { 818 mbar = "LOCAL PHY LINK RESET"; 819 iomb[2] = LE_32((PMCS_PHYOP_LINK_RESET << 8) | phynum); 820 } else { 821 mbar = "LOCAL PHY HARD RESET"; 822 iomb[2] = LE_32((PMCS_PHYOP_HARD_RESET << 8) | phynum); 823 } 824 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 825 "%s: sending %s to %s", __func__, mbar, pptr->path); 826 amt = 3; 827 } 828 829 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 830 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 831 if (msg == NULL) { 832 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 833 pmcs_pwork(pwp, pwrk); 834 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 835 return (ENOMEM); 836 } 837 COPY_MESSAGE(msg, iomb, amt); 838 htag = pwrk->htag; 839 840 /* SMP serialization */ 841 pmcs_smp_acquire(pptr->iport); 842 843 pwrk->state = PMCS_WORK_STATE_ONCHIP; 844 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 845 846 pmcs_unlock_phy(pptr); 847 WAIT_FOR(pwrk, 1000, result); 848 pmcs_pwork(pwp, pwrk); 849 /* Release SMP lock before reacquiring PHY lock */ 850 pmcs_smp_release(pptr->iport); 851 pmcs_lock_phy(pptr); 852 853 if (result) { 854 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 855 856 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 857 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 858 "%s: Unable to issue SMP abort for htag 0x%08x", 859 __func__, htag); 860 } else { 861 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 862 "%s: Issuing SMP ABORT for htag 0x%08x", 863 __func__, htag); 864 } 865 return (EIO); 866 } 867 status = LE_32(iomb[stsoff]); 868 869 if (status != PMCOUT_STATUS_OK) { 870 char buf[32]; 871 const char *es = pmcs_status_str(status); 872 if (es == NULL) { 873 (void) snprintf(buf, sizeof (buf), "Status 0x%x", 874 status); 875 es = buf; 876 } 877 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 878 "%s: %s action returned %s for %s", __func__, mbar, es, 879 pptr->path); 880 return (status); 881 } 882 883 return (0); 884 } 885 886 /* 887 * Stop the (real) phys. No PHY or softstate locks are required as this only 888 * happens during detach. 889 */ 890 void 891 pmcs_stop_phy(pmcs_hw_t *pwp, int phynum) 892 { 893 int result; 894 pmcs_phy_t *pptr; 895 uint32_t *msg; 896 struct pmcwork *pwrk; 897 898 pptr = pwp->root_phys + phynum; 899 if (pptr == NULL) { 900 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 901 "%s: unable to find port %d", __func__, phynum); 902 return; 903 } 904 905 if (pwp->phys_started & (1 << phynum)) { 906 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 907 908 if (pwrk == NULL) { 909 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, 910 pmcs_nowrk, __func__); 911 return; 912 } 913 914 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 915 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 916 917 if (msg == NULL) { 918 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 919 pmcs_pwork(pwp, pwrk); 920 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, 921 pmcs_nomsg, __func__); 922 return; 923 } 924 925 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_STOP)); 926 msg[1] = LE_32(pwrk->htag); 927 msg[2] = LE_32(phynum); 928 pwrk->state = PMCS_WORK_STATE_ONCHIP; 929 /* 930 * Make this unconfigured now. 931 */ 932 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 933 WAIT_FOR(pwrk, 1000, result); 934 935 pmcs_pwork(pwp, pwrk); 936 if (result) { 937 pmcs_prt(pwp, PMCS_PRT_DEBUG, 938 pptr, NULL, pmcs_timeo, __func__); 939 } 940 941 pwp->phys_started &= ~(1 << phynum); 942 } 943 944 pptr->configured = 0; 945 } 946 947 /* 948 * No locks should be required as this is only called during detach 949 */ 950 void 951 pmcs_stop_phys(pmcs_hw_t *pwp) 952 { 953 int i; 954 for (i = 0; i < pwp->nphy; i++) { 955 if ((pwp->phyid_block_mask & (1 << i)) == 0) { 956 pmcs_stop_phy(pwp, i); 957 } 958 } 959 } 960 961 /* 962 * Run SAS_DIAG_EXECUTE with cmd and cmd_desc passed. 963 * ERR_CNT_RESET: return status of cmd 964 * DIAG_REPORT_GET: return value of the counter 965 */ 966 int 967 pmcs_sas_diag_execute(pmcs_hw_t *pwp, uint32_t cmd, uint32_t cmd_desc, 968 uint8_t phynum) 969 { 970 uint32_t htag, *ptr, status, msg[PMCS_MSG_SIZE << 1]; 971 int result; 972 struct pmcwork *pwrk; 973 974 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 975 if (pwrk == NULL) { 976 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__); 977 return (DDI_FAILURE); 978 } 979 pwrk->arg = msg; 980 htag = pwrk->htag; 981 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_SAS_DIAG_EXECUTE)); 982 msg[1] = LE_32(htag); 983 msg[2] = LE_32((cmd << PMCS_DIAG_CMD_SHIFT) | 984 (cmd_desc << PMCS_DIAG_CMD_DESC_SHIFT) | phynum); 985 986 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 987 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 988 if (ptr == NULL) { 989 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 990 pmcs_pwork(pwp, pwrk); 991 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__); 992 return (DDI_FAILURE); 993 } 994 COPY_MESSAGE(ptr, msg, 3); 995 pwrk->state = PMCS_WORK_STATE_ONCHIP; 996 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 997 998 WAIT_FOR(pwrk, 1000, result); 999 1000 pmcs_pwork(pwp, pwrk); 1001 1002 if (result) { 1003 pmcs_timed_out(pwp, htag, __func__); 1004 return (DDI_FAILURE); 1005 } 1006 1007 status = LE_32(msg[3]); 1008 1009 /* Return for counter reset */ 1010 if (cmd == PMCS_ERR_CNT_RESET) 1011 return (status); 1012 1013 /* Return for counter value */ 1014 if (status) { 1015 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1016 "%s: failed, status (0x%x)", __func__, status); 1017 return (DDI_FAILURE); 1018 } 1019 return (LE_32(msg[4])); 1020 } 1021 1022 /* Get the current value of the counter for desc on phynum and return it. */ 1023 int 1024 pmcs_get_diag_report(pmcs_hw_t *pwp, uint32_t desc, uint8_t phynum) 1025 { 1026 return (pmcs_sas_diag_execute(pwp, PMCS_DIAG_REPORT_GET, desc, phynum)); 1027 } 1028 1029 /* Clear all of the counters for phynum. Returns the status of the command. */ 1030 int 1031 pmcs_clear_diag_counters(pmcs_hw_t *pwp, uint8_t phynum) 1032 { 1033 uint32_t cmd = PMCS_ERR_CNT_RESET; 1034 uint32_t cmd_desc; 1035 1036 cmd_desc = PMCS_INVALID_DWORD_CNT; 1037 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1038 return (DDI_FAILURE); 1039 1040 cmd_desc = PMCS_DISPARITY_ERR_CNT; 1041 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1042 return (DDI_FAILURE); 1043 1044 cmd_desc = PMCS_LOST_DWORD_SYNC_CNT; 1045 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1046 return (DDI_FAILURE); 1047 1048 cmd_desc = PMCS_RESET_FAILED_CNT; 1049 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum)) 1050 return (DDI_FAILURE); 1051 1052 return (DDI_SUCCESS); 1053 } 1054 1055 /* 1056 * Get firmware timestamp 1057 */ 1058 static int 1059 pmcs_get_time_stamp(pmcs_hw_t *pwp, uint64_t *fw_ts, hrtime_t *sys_hr_ts) 1060 { 1061 uint32_t htag, *ptr, msg[PMCS_MSG_SIZE << 1]; 1062 int result; 1063 struct pmcwork *pwrk; 1064 1065 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL); 1066 if (pwrk == NULL) { 1067 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__); 1068 return (-1); 1069 } 1070 pwrk->arg = msg; 1071 htag = pwrk->htag; 1072 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_GET_TIME_STAMP)); 1073 msg[1] = LE_32(pwrk->htag); 1074 1075 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1076 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1077 if (ptr == NULL) { 1078 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1079 pmcs_pwork(pwp, pwrk); 1080 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__); 1081 return (-1); 1082 } 1083 COPY_MESSAGE(ptr, msg, 2); 1084 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1085 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1086 1087 WAIT_FOR(pwrk, 1000, result); 1088 1089 pmcs_pwork(pwp, pwrk); 1090 1091 if (result) { 1092 pmcs_timed_out(pwp, htag, __func__); 1093 return (-1); 1094 } 1095 1096 mutex_enter(&pmcs_trace_lock); 1097 *sys_hr_ts = gethrtime(); 1098 gethrestime(&pwp->sys_timestamp); 1099 *fw_ts = LE_32(msg[2]) | (((uint64_t)LE_32(msg[3])) << 32); 1100 mutex_exit(&pmcs_trace_lock); 1101 return (0); 1102 } 1103 1104 /* 1105 * Dump all pertinent registers 1106 */ 1107 1108 void 1109 pmcs_register_dump(pmcs_hw_t *pwp) 1110 { 1111 int i; 1112 uint32_t val; 1113 1114 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump start", 1115 ddi_get_instance(pwp->dip)); 1116 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1117 "OBDB (intr): 0x%08x (mask): 0x%08x (clear): 0x%08x", 1118 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB), 1119 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_MASK), 1120 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR)); 1121 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH0: 0x%08x", 1122 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0)); 1123 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH1: 0x%08x", 1124 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1)); 1125 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH2: 0x%08x", 1126 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2)); 1127 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH3: 0x%08x", 1128 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH3)); 1129 for (i = 0; i < PMCS_NIQ; i++) { 1130 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "IQ %d: CI %u PI %u", 1131 i, pmcs_rd_iqci(pwp, i), pmcs_rd_iqpi(pwp, i)); 1132 } 1133 for (i = 0; i < PMCS_NOQ; i++) { 1134 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "OQ %d: CI %u PI %u", 1135 i, pmcs_rd_oqci(pwp, i), pmcs_rd_oqpi(pwp, i)); 1136 } 1137 val = pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE); 1138 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1139 "GST TABLE BASE: 0x%08x (STATE=0x%x QF=%d GSTLEN=%d HMI_ERR=0x%x)", 1140 val, PMCS_MPI_S(val), PMCS_QF(val), PMCS_GSTLEN(val) * 4, 1141 PMCS_HMI_ERR(val)); 1142 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ0: 0x%08x", 1143 pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ0)); 1144 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ1: 0x%08x", 1145 pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ1)); 1146 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE MSGU TICK: 0x%08x", 1147 pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK)); 1148 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IOP TICK: 0x%08x", 1149 pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK)); 1150 for (i = 0; i < pwp->nphy; i++) { 1151 uint32_t rerrf, pinfo, started = 0, link = 0; 1152 pinfo = pmcs_rd_gst_tbl(pwp, PMCS_GST_PHY_INFO(i)); 1153 if (pinfo & 1) { 1154 started = 1; 1155 link = pinfo & 2; 1156 } 1157 rerrf = pmcs_rd_gst_tbl(pwp, PMCS_GST_RERR_INFO(i)); 1158 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 1159 "GST TABLE PHY%d STARTED=%d LINK=%d RERR=0x%08x", 1160 i, started, link, rerrf); 1161 } 1162 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump end", 1163 ddi_get_instance(pwp->dip)); 1164 } 1165 1166 /* 1167 * Handle SATA Abort and other error processing 1168 */ 1169 int 1170 pmcs_abort_handler(pmcs_hw_t *pwp) 1171 { 1172 pmcs_phy_t *pptr, *pnext, *pnext_uplevel[PMCS_MAX_XPND]; 1173 pmcs_xscsi_t *tgt; 1174 int r, level = 0; 1175 1176 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s", __func__); 1177 1178 mutex_enter(&pwp->lock); 1179 pptr = pwp->root_phys; 1180 mutex_exit(&pwp->lock); 1181 1182 while (pptr) { 1183 /* 1184 * XXX: Need to make sure this doesn't happen 1185 * XXX: when non-NCQ commands are running. 1186 */ 1187 pmcs_lock_phy(pptr); 1188 if (pptr->need_rl_ext) { 1189 ASSERT(pptr->dtype == SATA); 1190 if (pmcs_acquire_scratch(pwp, B_FALSE)) { 1191 goto next_phy; 1192 } 1193 r = pmcs_sata_abort_ncq(pwp, pptr); 1194 pmcs_release_scratch(pwp); 1195 if (r == ENOMEM) { 1196 goto next_phy; 1197 } 1198 if (r) { 1199 r = pmcs_reset_phy(pwp, pptr, 1200 PMCS_PHYOP_LINK_RESET); 1201 if (r == ENOMEM) { 1202 goto next_phy; 1203 } 1204 /* what if other failures happened? */ 1205 pptr->abort_pending = 1; 1206 pptr->abort_sent = 0; 1207 } 1208 } 1209 if (pptr->abort_pending == 0 || pptr->abort_sent) { 1210 goto next_phy; 1211 } 1212 pptr->abort_pending = 0; 1213 if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) == ENOMEM) { 1214 pptr->abort_pending = 1; 1215 goto next_phy; 1216 } 1217 pptr->abort_sent = 1; 1218 1219 /* 1220 * If the iport is no longer active, flush the queues 1221 */ 1222 if ((pptr->iport == NULL) || 1223 (pptr->iport->ua_state != UA_ACTIVE)) { 1224 tgt = pptr->target; 1225 if (tgt) { 1226 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 1227 "%s: Clearing target 0x%p, inactive iport", 1228 __func__, (void *) tgt); 1229 mutex_enter(&tgt->statlock); 1230 pmcs_clear_xp(pwp, tgt); 1231 mutex_exit(&tgt->statlock); 1232 } 1233 } 1234 1235 next_phy: 1236 if (pptr->children) { 1237 pnext = pptr->children; 1238 pnext_uplevel[level++] = pptr->sibling; 1239 } else { 1240 pnext = pptr->sibling; 1241 while ((pnext == NULL) && (level > 0)) { 1242 pnext = pnext_uplevel[--level]; 1243 } 1244 } 1245 1246 pmcs_unlock_phy(pptr); 1247 pptr = pnext; 1248 } 1249 1250 return (0); 1251 } 1252 1253 /* 1254 * Register a device (get a device handle for it). 1255 * Called with PHY lock held. 1256 */ 1257 int 1258 pmcs_register_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1259 { 1260 struct pmcwork *pwrk; 1261 int result = 0; 1262 uint32_t *msg; 1263 uint32_t tmp, status; 1264 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 1265 1266 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1267 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1268 1269 if (msg == NULL || 1270 (pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) { 1271 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1272 result = ENOMEM; 1273 goto out; 1274 } 1275 1276 pwrk->arg = iomb; 1277 pwrk->dtype = pptr->dtype; 1278 1279 msg[1] = LE_32(pwrk->htag); 1280 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_REGISTER_DEVICE)); 1281 tmp = PMCS_DEVREG_TLR | 1282 (pptr->link_rate << PMCS_DEVREG_LINK_RATE_SHIFT); 1283 if (IS_ROOT_PHY(pptr)) { 1284 msg[2] = LE_32(pptr->portid | 1285 (pptr->phynum << PMCS_PHYID_SHIFT)); 1286 } else { 1287 msg[2] = LE_32(pptr->portid); 1288 } 1289 if (pptr->dtype == SATA) { 1290 if (IS_ROOT_PHY(pptr)) { 1291 tmp |= PMCS_DEVREG_TYPE_SATA_DIRECT; 1292 } else { 1293 tmp |= PMCS_DEVREG_TYPE_SATA; 1294 } 1295 } else { 1296 tmp |= PMCS_DEVREG_TYPE_SAS; 1297 } 1298 msg[3] = LE_32(tmp); 1299 msg[4] = LE_32(PMCS_DEVREG_IT_NEXUS_TIMEOUT); 1300 (void) memcpy(&msg[5], pptr->sas_address, 8); 1301 1302 CLEAN_MESSAGE(msg, 7); 1303 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1304 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1305 1306 pmcs_unlock_phy(pptr); 1307 WAIT_FOR(pwrk, 250, result); 1308 pmcs_lock_phy(pptr); 1309 pmcs_pwork(pwp, pwrk); 1310 1311 if (result) { 1312 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 1313 result = ETIMEDOUT; 1314 goto out; 1315 } 1316 status = LE_32(iomb[2]); 1317 tmp = LE_32(iomb[3]); 1318 switch (status) { 1319 case PMCS_DEVREG_OK: 1320 case PMCS_DEVREG_DEVICE_ALREADY_REGISTERED: 1321 case PMCS_DEVREG_PHY_ALREADY_REGISTERED: 1322 if (pmcs_validate_devid(pwp->root_phys, pptr, tmp) == B_FALSE) { 1323 result = EEXIST; 1324 goto out; 1325 } else if (status != PMCS_DEVREG_OK) { 1326 if (tmp == 0xffffffff) { /* F/W bug */ 1327 pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL, 1328 "%s: phy %s already has bogus devid 0x%x", 1329 __func__, pptr->path, tmp); 1330 result = EIO; 1331 goto out; 1332 } else { 1333 pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL, 1334 "%s: phy %s already has a device id 0x%x", 1335 __func__, pptr->path, tmp); 1336 } 1337 } 1338 break; 1339 default: 1340 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1341 "%s: status 0x%x when trying to register device %s", 1342 __func__, status, pptr->path); 1343 result = EIO; 1344 goto out; 1345 } 1346 pptr->device_id = tmp; 1347 pptr->valid_device_id = 1; 1348 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Phy %s/" SAS_ADDR_FMT 1349 " registered with device_id 0x%x (portid %d)", pptr->path, 1350 SAS_ADDR_PRT(pptr->sas_address), tmp, pptr->portid); 1351 out: 1352 return (result); 1353 } 1354 1355 /* 1356 * Deregister a device (remove a device handle). 1357 * Called with PHY locked. 1358 */ 1359 void 1360 pmcs_deregister_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1361 { 1362 struct pmcwork *pwrk; 1363 uint32_t msg[PMCS_MSG_SIZE], *ptr, status; 1364 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2]; 1365 int result; 1366 1367 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 1368 if (pwrk == NULL) { 1369 return; 1370 } 1371 1372 pwrk->arg = iomb; 1373 pwrk->dtype = pptr->dtype; 1374 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1375 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1376 if (ptr == NULL) { 1377 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1378 pmcs_pwork(pwp, pwrk); 1379 return; 1380 } 1381 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 1382 PMCIN_DEREGISTER_DEVICE_HANDLE)); 1383 msg[1] = LE_32(pwrk->htag); 1384 msg[2] = LE_32(pptr->device_id); 1385 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1386 COPY_MESSAGE(ptr, msg, 3); 1387 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1388 1389 pmcs_unlock_phy(pptr); 1390 WAIT_FOR(pwrk, 250, result); 1391 pmcs_pwork(pwp, pwrk); 1392 pmcs_lock_phy(pptr); 1393 1394 if (result) { 1395 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 1396 return; 1397 } 1398 status = LE_32(iomb[2]); 1399 if (status != PMCOUT_STATUS_OK) { 1400 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1401 "%s: status 0x%x when trying to deregister device %s", 1402 __func__, status, pptr->path); 1403 } else { 1404 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1405 "%s: device %s deregistered", __func__, pptr->path); 1406 pptr->valid_device_id = 0; 1407 pptr->device_id = PMCS_INVALID_DEVICE_ID; 1408 pptr->configured = 0; 1409 pptr->deregister_wait = 0; 1410 } 1411 } 1412 1413 /* 1414 * Deregister all registered devices. 1415 */ 1416 void 1417 pmcs_deregister_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 1418 { 1419 /* 1420 * Start at the maximum level and walk back to level 0. This only 1421 * gets done during detach after all threads and timers have been 1422 * destroyed, so there's no need to hold the softstate or PHY lock. 1423 */ 1424 while (phyp) { 1425 if (phyp->children) { 1426 pmcs_deregister_devices(pwp, phyp->children); 1427 } 1428 if (phyp->valid_device_id) { 1429 pmcs_deregister_device(pwp, phyp); 1430 } 1431 phyp = phyp->sibling; 1432 } 1433 } 1434 1435 /* 1436 * Perform a 'soft' reset on the PMC chip 1437 */ 1438 int 1439 pmcs_soft_reset(pmcs_hw_t *pwp, boolean_t no_restart) 1440 { 1441 uint32_t s2, sfrbits, gsm, rapchk, wapchk, wdpchk, spc, tsmode; 1442 pmcs_phy_t *pptr; 1443 char *msg = NULL; 1444 int i; 1445 1446 /* 1447 * Disable interrupts 1448 */ 1449 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1450 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1451 1452 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%s", __func__); 1453 1454 if (pwp->locks_initted) { 1455 mutex_enter(&pwp->lock); 1456 } 1457 pwp->blocked = 1; 1458 1459 /* 1460 * Clear our softstate copies of the MSGU and IOP heartbeats. 1461 */ 1462 pwp->last_msgu_tick = pwp->last_iop_tick = 0; 1463 1464 /* 1465 * Step 1 1466 */ 1467 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2); 1468 if ((s2 & PMCS_MSGU_HOST_SOFT_RESET_READY) == 0) { 1469 pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE); 1470 pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE); 1471 for (i = 0; i < 100; i++) { 1472 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1473 PMCS_MSGU_HOST_SOFT_RESET_READY; 1474 if (s2) { 1475 break; 1476 } 1477 drv_usecwait(10000); 1478 } 1479 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1480 PMCS_MSGU_HOST_SOFT_RESET_READY; 1481 if (s2 == 0) { 1482 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1483 "%s: PMCS_MSGU_HOST_SOFT_RESET_READY never came " 1484 "ready", __func__); 1485 pmcs_register_dump(pwp); 1486 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1487 PMCS_MSGU_CPU_SOFT_RESET_READY) == 0 || 1488 (pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) & 1489 PMCS_MSGU_CPU_SOFT_RESET_READY) == 0) { 1490 pwp->state = STATE_DEAD; 1491 pwp->blocked = 0; 1492 if (pwp->locks_initted) { 1493 mutex_exit(&pwp->lock); 1494 } 1495 return (-1); 1496 } 1497 } 1498 } 1499 1500 /* 1501 * Step 2 1502 */ 1503 pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_IOP, 0); 1504 drv_usecwait(10); 1505 pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_AAP1, 0); 1506 drv_usecwait(10); 1507 pmcs_wr_topunit(pwp, PMCS_EVENT_INT_ENABLE, 0); 1508 drv_usecwait(10); 1509 pmcs_wr_topunit(pwp, PMCS_EVENT_INT_STAT, 1510 pmcs_rd_topunit(pwp, PMCS_EVENT_INT_STAT)); 1511 drv_usecwait(10); 1512 pmcs_wr_topunit(pwp, PMCS_ERROR_INT_ENABLE, 0); 1513 drv_usecwait(10); 1514 pmcs_wr_topunit(pwp, PMCS_ERROR_INT_STAT, 1515 pmcs_rd_topunit(pwp, PMCS_ERROR_INT_STAT)); 1516 drv_usecwait(10); 1517 1518 sfrbits = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1519 PMCS_MSGU_AAP_SFR_PROGRESS; 1520 sfrbits ^= PMCS_MSGU_AAP_SFR_PROGRESS; 1521 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "PMCS_MSGU_HOST_SCRATCH0 " 1522 "%08x -> %08x", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0), 1523 HST_SFT_RESET_SIG); 1524 pmcs_wr_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0, HST_SFT_RESET_SIG); 1525 1526 /* 1527 * Step 3 1528 */ 1529 gsm = pmcs_rd_gsm_reg(pwp, 0, GSM_CFG_AND_RESET); 1530 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm, 1531 gsm & ~PMCS_SOFT_RESET_BITS); 1532 pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm & ~PMCS_SOFT_RESET_BITS); 1533 1534 /* 1535 * Step 4 1536 */ 1537 rapchk = pmcs_rd_gsm_reg(pwp, 0, READ_ADR_PARITY_CHK_EN); 1538 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN " 1539 "%08x -> %08x", rapchk, 0); 1540 pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, 0); 1541 wapchk = pmcs_rd_gsm_reg(pwp, 0, WRITE_ADR_PARITY_CHK_EN); 1542 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN " 1543 "%08x -> %08x", wapchk, 0); 1544 pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, 0); 1545 wdpchk = pmcs_rd_gsm_reg(pwp, 0, WRITE_DATA_PARITY_CHK_EN); 1546 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN " 1547 "%08x -> %08x", wdpchk, 0); 1548 pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, 0); 1549 1550 /* 1551 * Step 5 1552 */ 1553 drv_usecwait(100); 1554 1555 /* 1556 * Step 5.5 (Temporary workaround for 1.07.xx Beta) 1557 */ 1558 tsmode = pmcs_rd_gsm_reg(pwp, 0, PMCS_GPIO_TRISTATE_MODE_ADDR); 1559 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GPIO TSMODE %08x -> %08x", 1560 tsmode, tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1)); 1561 pmcs_wr_gsm_reg(pwp, PMCS_GPIO_TRISTATE_MODE_ADDR, 1562 tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1)); 1563 drv_usecwait(10); 1564 1565 /* 1566 * Step 6 1567 */ 1568 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1569 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1570 spc, spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1571 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, 1572 spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1573 drv_usecwait(10); 1574 1575 /* 1576 * Step 7 1577 */ 1578 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1579 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1580 spc, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB)); 1581 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB)); 1582 1583 /* 1584 * Step 8 1585 */ 1586 drv_usecwait(100); 1587 1588 /* 1589 * Step 9 1590 */ 1591 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1592 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1593 spc, spc | (BDMA_CORE_RSTB|OSSP_RSTB)); 1594 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc | (BDMA_CORE_RSTB|OSSP_RSTB)); 1595 1596 /* 1597 * Step 10 1598 */ 1599 drv_usecwait(100); 1600 1601 /* 1602 * Step 11 1603 */ 1604 gsm = pmcs_rd_gsm_reg(pwp, 0, GSM_CFG_AND_RESET); 1605 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm, 1606 gsm | PMCS_SOFT_RESET_BITS); 1607 pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm | PMCS_SOFT_RESET_BITS); 1608 drv_usecwait(10); 1609 1610 /* 1611 * Step 12 1612 */ 1613 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN " 1614 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, READ_ADR_PARITY_CHK_EN), 1615 rapchk); 1616 pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, rapchk); 1617 drv_usecwait(10); 1618 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN " 1619 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, WRITE_ADR_PARITY_CHK_EN), 1620 wapchk); 1621 pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, wapchk); 1622 drv_usecwait(10); 1623 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN " 1624 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, WRITE_DATA_PARITY_CHK_EN), 1625 wapchk); 1626 pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, wdpchk); 1627 drv_usecwait(10); 1628 1629 /* 1630 * Step 13 1631 */ 1632 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET); 1633 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x", 1634 spc, spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1635 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, 1636 spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB)); 1637 1638 /* 1639 * Step 14 1640 */ 1641 drv_usecwait(100); 1642 1643 /* 1644 * Step 15 1645 */ 1646 for (spc = 0, i = 0; i < 1000; i++) { 1647 drv_usecwait(1000); 1648 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 1649 if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) == sfrbits) { 1650 break; 1651 } 1652 } 1653 1654 if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) != sfrbits) { 1655 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1656 "SFR didn't toggle (sfr 0x%x)", spc); 1657 pwp->state = STATE_DEAD; 1658 pwp->blocked = 0; 1659 if (pwp->locks_initted) { 1660 mutex_exit(&pwp->lock); 1661 } 1662 return (-1); 1663 } 1664 1665 /* 1666 * Step 16 1667 */ 1668 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1669 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1670 1671 /* 1672 * Wait for up to 5 seconds for AAP state to come either ready or error. 1673 */ 1674 for (i = 0; i < 50; i++) { 1675 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) & 1676 PMCS_MSGU_AAP_STATE_MASK; 1677 if (spc == PMCS_MSGU_AAP_STATE_ERROR || 1678 spc == PMCS_MSGU_AAP_STATE_READY) { 1679 break; 1680 } 1681 drv_usecwait(100000); 1682 } 1683 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1); 1684 if ((spc & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) { 1685 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1686 "soft reset failed (state 0x%x)", spc); 1687 pwp->state = STATE_DEAD; 1688 pwp->blocked = 0; 1689 if (pwp->locks_initted) { 1690 mutex_exit(&pwp->lock); 1691 } 1692 return (-1); 1693 } 1694 1695 /* Clear the firmware log */ 1696 if (pwp->fwlogp) { 1697 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE); 1698 } 1699 1700 /* Reset our queue indices and entries */ 1701 bzero(pwp->shadow_iqpi, sizeof (pwp->shadow_iqpi)); 1702 bzero(pwp->last_iqci, sizeof (pwp->last_iqci)); 1703 bzero(pwp->last_htag, sizeof (pwp->last_htag)); 1704 for (i = 0; i < PMCS_NIQ; i++) { 1705 if (pwp->iqp[i]) { 1706 bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 1707 pmcs_wr_iqpi(pwp, i, 0); 1708 pmcs_wr_iqci(pwp, i, 0); 1709 } 1710 } 1711 for (i = 0; i < PMCS_NOQ; i++) { 1712 if (pwp->oqp[i]) { 1713 bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth); 1714 pmcs_wr_oqpi(pwp, i, 0); 1715 pmcs_wr_oqci(pwp, i, 0); 1716 } 1717 1718 } 1719 1720 if (pwp->state == STATE_DEAD || pwp->state == STATE_UNPROBING || 1721 pwp->state == STATE_PROBING || pwp->locks_initted == 0) { 1722 pwp->blocked = 0; 1723 if (pwp->locks_initted) { 1724 mutex_exit(&pwp->lock); 1725 } 1726 return (0); 1727 } 1728 1729 /* 1730 * Return at this point if we dont need to startup. 1731 */ 1732 if (no_restart) { 1733 return (0); 1734 } 1735 1736 ASSERT(pwp->locks_initted != 0); 1737 1738 /* 1739 * Flush the target queues and clear each target's PHY 1740 */ 1741 if (pwp->targets) { 1742 for (i = 0; i < pwp->max_dev; i++) { 1743 pmcs_xscsi_t *xp = pwp->targets[i]; 1744 1745 if (xp == NULL) { 1746 continue; 1747 } 1748 1749 mutex_enter(&xp->statlock); 1750 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES); 1751 xp->phy = NULL; 1752 mutex_exit(&xp->statlock); 1753 } 1754 } 1755 1756 /* 1757 * Zero out the ports list, free non root phys, clear root phys 1758 */ 1759 bzero(pwp->ports, sizeof (pwp->ports)); 1760 pmcs_free_all_phys(pwp, pwp->root_phys); 1761 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 1762 pmcs_lock_phy(pptr); 1763 pmcs_clear_phy(pwp, pptr); 1764 pptr->target = NULL; 1765 pmcs_unlock_phy(pptr); 1766 } 1767 1768 /* 1769 * Restore Interrupt Mask 1770 */ 1771 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask); 1772 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1773 1774 pwp->mpi_table_setup = 0; 1775 mutex_exit(&pwp->lock); 1776 1777 /* 1778 * Set up MPI again. 1779 */ 1780 if (pmcs_setup(pwp)) { 1781 msg = "unable to setup MPI tables again"; 1782 goto fail_restart; 1783 } 1784 pmcs_report_fwversion(pwp); 1785 1786 /* 1787 * Restart MPI 1788 */ 1789 if (pmcs_start_mpi(pwp)) { 1790 msg = "unable to restart MPI again"; 1791 goto fail_restart; 1792 } 1793 1794 mutex_enter(&pwp->lock); 1795 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1796 mutex_exit(&pwp->lock); 1797 1798 /* 1799 * Run any completions 1800 */ 1801 PMCS_CQ_RUN(pwp); 1802 1803 /* 1804 * Delay 1805 */ 1806 drv_usecwait(1000000); 1807 return (0); 1808 1809 fail_restart: 1810 mutex_enter(&pwp->lock); 1811 pwp->state = STATE_DEAD; 1812 mutex_exit(&pwp->lock); 1813 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 1814 "%s: Failed: %s", __func__, msg); 1815 return (-1); 1816 } 1817 1818 1819 /* 1820 * Perform a 'hot' reset, which will soft reset the chip and 1821 * restore the state back to pre-reset context. Called with pwp 1822 * lock held. 1823 */ 1824 int 1825 pmcs_hot_reset(pmcs_hw_t *pwp) 1826 { 1827 pmcs_iport_t *iport; 1828 1829 ASSERT(mutex_owned(&pwp->lock)); 1830 pwp->state = STATE_IN_RESET; 1831 1832 /* 1833 * For any iports on this HBA, report empty target sets and 1834 * then tear them down. 1835 */ 1836 rw_enter(&pwp->iports_lock, RW_READER); 1837 for (iport = list_head(&pwp->iports); iport != NULL; 1838 iport = list_next(&pwp->iports, iport)) { 1839 mutex_enter(&iport->lock); 1840 (void) scsi_hba_tgtmap_set_begin(iport->iss_tgtmap); 1841 (void) scsi_hba_tgtmap_set_end(iport->iss_tgtmap, 0); 1842 pmcs_iport_teardown_phys(iport); 1843 mutex_exit(&iport->lock); 1844 } 1845 rw_exit(&pwp->iports_lock); 1846 1847 /* Grab a register dump, in the event that reset fails */ 1848 pmcs_register_dump_int(pwp); 1849 mutex_exit(&pwp->lock); 1850 1851 /* Ensure discovery is not running before we proceed */ 1852 mutex_enter(&pwp->config_lock); 1853 while (pwp->configuring) { 1854 cv_wait(&pwp->config_cv, &pwp->config_lock); 1855 } 1856 mutex_exit(&pwp->config_lock); 1857 1858 /* Issue soft reset and clean up related softstate */ 1859 if (pmcs_soft_reset(pwp, B_FALSE)) { 1860 /* 1861 * Disable interrupts, in case we got far enough along to 1862 * enable them, then fire off ereport and service impact. 1863 */ 1864 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1865 "%s: failed soft reset", __func__); 1866 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1867 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 1868 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE); 1869 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 1870 mutex_enter(&pwp->lock); 1871 pwp->state = STATE_DEAD; 1872 return (DDI_FAILURE); 1873 } 1874 1875 mutex_enter(&pwp->lock); 1876 pwp->state = STATE_RUNNING; 1877 mutex_exit(&pwp->lock); 1878 1879 /* 1880 * Finally, restart the phys, which will bring the iports back 1881 * up and eventually result in discovery running. 1882 */ 1883 if (pmcs_start_phys(pwp)) { 1884 /* We should be up and running now, so retry */ 1885 if (pmcs_start_phys(pwp)) { 1886 /* Apparently unable to restart PHYs, fail */ 1887 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1888 "%s: failed to restart PHYs after soft reset", 1889 __func__); 1890 mutex_enter(&pwp->lock); 1891 return (DDI_FAILURE); 1892 } 1893 } 1894 1895 mutex_enter(&pwp->lock); 1896 return (DDI_SUCCESS); 1897 } 1898 1899 /* 1900 * Reset a device or a logical unit. 1901 */ 1902 int 1903 pmcs_reset_dev(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint64_t lun) 1904 { 1905 int rval = 0; 1906 1907 if (pptr == NULL) { 1908 return (ENXIO); 1909 } 1910 1911 pmcs_lock_phy(pptr); 1912 if (pptr->dtype == SAS) { 1913 /* 1914 * Some devices do not support SAS_I_T_NEXUS_RESET as 1915 * it is not a mandatory (in SAM4) task management 1916 * function, while LOGIC_UNIT_RESET is mandatory. 1917 * 1918 * The problem here is that we need to iterate over 1919 * all known LUNs to emulate the semantics of 1920 * "RESET_TARGET". 1921 * 1922 * XXX: FIX ME 1923 */ 1924 if (lun == (uint64_t)-1) { 1925 lun = 0; 1926 } 1927 rval = pmcs_ssp_tmf(pwp, pptr, SAS_LOGICAL_UNIT_RESET, 0, lun, 1928 NULL); 1929 } else if (pptr->dtype == SATA) { 1930 if (lun != 0ull) { 1931 pmcs_unlock_phy(pptr); 1932 return (EINVAL); 1933 } 1934 rval = pmcs_reset_phy(pwp, pptr, PMCS_PHYOP_LINK_RESET); 1935 } else { 1936 pmcs_unlock_phy(pptr); 1937 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1938 "%s: cannot reset a SMP device yet (%s)", 1939 __func__, pptr->path); 1940 return (EINVAL); 1941 } 1942 1943 /* 1944 * Now harvest any commands killed by this action 1945 * by issuing an ABORT for all commands on this device. 1946 * 1947 * We do this even if the the tmf or reset fails (in case there 1948 * are any dead commands around to be harvested *anyway*). 1949 * We don't have to await for the abort to complete. 1950 */ 1951 if (pmcs_abort(pwp, pptr, 0, 1, 0)) { 1952 pptr->abort_pending = 1; 1953 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 1954 } 1955 1956 pmcs_unlock_phy(pptr); 1957 return (rval); 1958 } 1959 1960 /* 1961 * Called with PHY locked. 1962 */ 1963 static int 1964 pmcs_get_device_handle(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 1965 { 1966 if (pptr->valid_device_id == 0) { 1967 int result = pmcs_register_device(pwp, pptr); 1968 1969 /* 1970 * If we changed while registering, punt 1971 */ 1972 if (pptr->changed) { 1973 RESTART_DISCOVERY(pwp); 1974 return (-1); 1975 } 1976 1977 /* 1978 * If we had a failure to register, check against errors. 1979 * An ENOMEM error means we just retry (temp resource shortage). 1980 */ 1981 if (result == ENOMEM) { 1982 PHY_CHANGED(pwp, pptr); 1983 RESTART_DISCOVERY(pwp); 1984 return (-1); 1985 } 1986 1987 /* 1988 * An ETIMEDOUT error means we retry (if our counter isn't 1989 * exhausted) 1990 */ 1991 if (result == ETIMEDOUT) { 1992 if (ddi_get_lbolt() < pptr->config_stop) { 1993 PHY_CHANGED(pwp, pptr); 1994 RESTART_DISCOVERY(pwp); 1995 } else { 1996 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 1997 "%s: Retries exhausted for %s, killing", 1998 __func__, pptr->path); 1999 pptr->config_stop = 0; 2000 pmcs_kill_changed(pwp, pptr, 0); 2001 } 2002 return (-1); 2003 } 2004 /* 2005 * Other errors or no valid device id is fatal, but don't 2006 * preclude a future action. 2007 */ 2008 if (result || pptr->valid_device_id == 0) { 2009 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 2010 "%s: %s could not be registered", __func__, 2011 pptr->path); 2012 return (-1); 2013 } 2014 } 2015 return (0); 2016 } 2017 2018 int 2019 pmcs_iport_tgtmap_create(pmcs_iport_t *iport) 2020 { 2021 ASSERT(iport); 2022 if (iport == NULL) 2023 return (B_FALSE); 2024 2025 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__); 2026 2027 /* create target map */ 2028 if (scsi_hba_tgtmap_create(iport->dip, SCSI_TM_FULLSET, 2029 tgtmap_csync_usec, tgtmap_stable_usec, (void *)iport, 2030 pmcs_tgtmap_activate_cb, pmcs_tgtmap_deactivate_cb, 2031 &iport->iss_tgtmap) != DDI_SUCCESS) { 2032 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG, NULL, NULL, 2033 "%s: failed to create tgtmap", __func__); 2034 return (B_FALSE); 2035 } 2036 return (B_TRUE); 2037 } 2038 2039 int 2040 pmcs_iport_tgtmap_destroy(pmcs_iport_t *iport) 2041 { 2042 ASSERT(iport && iport->iss_tgtmap); 2043 if ((iport == NULL) || (iport->iss_tgtmap == NULL)) 2044 return (B_FALSE); 2045 2046 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__); 2047 2048 /* destroy target map */ 2049 scsi_hba_tgtmap_destroy(iport->iss_tgtmap); 2050 return (B_TRUE); 2051 } 2052 2053 /* 2054 * Remove all phys from an iport's phymap and empty it's phylist. 2055 * Called when a port has been reset by the host (see pmcs_intr.c) 2056 * or prior to issuing a soft reset if we detect a stall on the chip 2057 * (see pmcs_attach.c). 2058 */ 2059 void 2060 pmcs_iport_teardown_phys(pmcs_iport_t *iport) 2061 { 2062 pmcs_hw_t *pwp; 2063 sas_phymap_phys_t *phys; 2064 int phynum; 2065 2066 ASSERT(iport); 2067 ASSERT(mutex_owned(&iport->lock)); 2068 pwp = iport->pwp; 2069 ASSERT(pwp); 2070 2071 /* 2072 * Remove all phys from the iport handle's phy list, unset its 2073 * primary phy and update its state. 2074 */ 2075 pmcs_remove_phy_from_iport(iport, NULL); 2076 iport->pptr = NULL; 2077 iport->ua_state = UA_PEND_DEACTIVATE; 2078 2079 /* Remove all phys from the phymap */ 2080 phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua); 2081 if (phys) { 2082 while ((phynum = sas_phymap_phys_next(phys)) != -1) { 2083 (void) sas_phymap_phy_rem(pwp->hss_phymap, phynum); 2084 } 2085 sas_phymap_phys_free(phys); 2086 } 2087 } 2088 2089 /* 2090 * Query the phymap and populate the iport handle passed in. 2091 * Called with iport lock held. 2092 */ 2093 int 2094 pmcs_iport_configure_phys(pmcs_iport_t *iport) 2095 { 2096 pmcs_hw_t *pwp; 2097 pmcs_phy_t *pptr; 2098 sas_phymap_phys_t *phys; 2099 int phynum; 2100 int inst; 2101 2102 ASSERT(iport); 2103 ASSERT(mutex_owned(&iport->lock)); 2104 pwp = iport->pwp; 2105 ASSERT(pwp); 2106 inst = ddi_get_instance(iport->dip); 2107 2108 mutex_enter(&pwp->lock); 2109 ASSERT(pwp->root_phys != NULL); 2110 2111 /* 2112 * Query the phymap regarding the phys in this iport and populate 2113 * the iport's phys list. Hereafter this list is maintained via 2114 * port up and down events in pmcs_intr.c 2115 */ 2116 ASSERT(list_is_empty(&iport->phys)); 2117 phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua); 2118 ASSERT(phys != NULL); 2119 while ((phynum = sas_phymap_phys_next(phys)) != -1) { 2120 /* Grab the phy pointer from root_phys */ 2121 pptr = pwp->root_phys + phynum; 2122 ASSERT(pptr); 2123 pmcs_lock_phy(pptr); 2124 ASSERT(pptr->phynum == phynum); 2125 2126 /* 2127 * Set a back pointer in the phy to this iport. 2128 */ 2129 pptr->iport = iport; 2130 2131 /* 2132 * If this phy is the primary, set a pointer to it on our 2133 * iport handle, and set our portid from it. 2134 */ 2135 if (!pptr->subsidiary) { 2136 iport->pptr = pptr; 2137 iport->portid = pptr->portid; 2138 } 2139 2140 /* 2141 * Finally, insert the phy into our list 2142 */ 2143 pmcs_unlock_phy(pptr); 2144 pmcs_add_phy_to_iport(iport, pptr); 2145 2146 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: found " 2147 "phy %d [0x%p] on iport%d, refcnt(%d)", __func__, phynum, 2148 (void *)pptr, inst, iport->refcnt); 2149 } 2150 mutex_exit(&pwp->lock); 2151 sas_phymap_phys_free(phys); 2152 RESTART_DISCOVERY(pwp); 2153 return (DDI_SUCCESS); 2154 } 2155 2156 /* 2157 * Return the iport that ua is associated with, or NULL. If an iport is 2158 * returned, it will be held and the caller must release the hold. 2159 */ 2160 static pmcs_iport_t * 2161 pmcs_get_iport_by_ua(pmcs_hw_t *pwp, char *ua) 2162 { 2163 pmcs_iport_t *iport = NULL; 2164 2165 rw_enter(&pwp->iports_lock, RW_READER); 2166 for (iport = list_head(&pwp->iports); 2167 iport != NULL; 2168 iport = list_next(&pwp->iports, iport)) { 2169 mutex_enter(&iport->lock); 2170 if (strcmp(iport->ua, ua) == 0) { 2171 mutex_exit(&iport->lock); 2172 mutex_enter(&iport->refcnt_lock); 2173 iport->refcnt++; 2174 mutex_exit(&iport->refcnt_lock); 2175 break; 2176 } 2177 mutex_exit(&iport->lock); 2178 } 2179 rw_exit(&pwp->iports_lock); 2180 2181 return (iport); 2182 } 2183 2184 /* 2185 * Return the iport that pptr is associated with, or NULL. 2186 * If an iport is returned, there is a hold that the caller must release. 2187 */ 2188 pmcs_iport_t * 2189 pmcs_get_iport_by_wwn(pmcs_hw_t *pwp, uint64_t wwn) 2190 { 2191 pmcs_iport_t *iport = NULL; 2192 char *ua; 2193 2194 ua = sas_phymap_lookup_ua(pwp->hss_phymap, pwp->sas_wwns[0], wwn); 2195 if (ua) { 2196 iport = pmcs_get_iport_by_ua(pwp, ua); 2197 if (iport) { 2198 mutex_enter(&iport->lock); 2199 pmcs_iport_active(iport); 2200 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: " 2201 "found iport [0x%p] on ua (%s), refcnt (%d)", 2202 __func__, (void *)iport, ua, iport->refcnt); 2203 mutex_exit(&iport->lock); 2204 } 2205 } 2206 2207 return (iport); 2208 } 2209 2210 /* 2211 * Promote the next phy on this port to primary, and return it. 2212 * Called when the primary PHY on a port is going down, but the port 2213 * remains up (see pmcs_intr.c). 2214 */ 2215 pmcs_phy_t * 2216 pmcs_promote_next_phy(pmcs_phy_t *prev_primary) 2217 { 2218 pmcs_hw_t *pwp; 2219 pmcs_iport_t *iport; 2220 pmcs_phy_t *pptr, *child; 2221 int portid; 2222 2223 pmcs_lock_phy(prev_primary); 2224 portid = prev_primary->portid; 2225 iport = prev_primary->iport; 2226 pwp = prev_primary->pwp; 2227 2228 /* Use the first available phy in this port */ 2229 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 2230 if ((pptr->portid == portid) && (pptr != prev_primary)) { 2231 mutex_enter(&pptr->phy_lock); 2232 break; 2233 } 2234 } 2235 2236 if (pptr == NULL) { 2237 pmcs_unlock_phy(prev_primary); 2238 return (NULL); 2239 } 2240 2241 if (iport) { 2242 mutex_enter(&iport->lock); 2243 iport->pptr = pptr; 2244 mutex_exit(&iport->lock); 2245 } 2246 2247 /* Update the phy handle with the data from the previous primary */ 2248 pptr->children = prev_primary->children; 2249 child = pptr->children; 2250 while (child) { 2251 child->parent = pptr; 2252 child = child->sibling; 2253 } 2254 pptr->ncphy = prev_primary->ncphy; 2255 pptr->width = prev_primary->width; 2256 pptr->dtype = prev_primary->dtype; 2257 pptr->pend_dtype = prev_primary->pend_dtype; 2258 pptr->tolerates_sas2 = prev_primary->tolerates_sas2; 2259 pptr->atdt = prev_primary->atdt; 2260 pptr->portid = prev_primary->portid; 2261 pptr->link_rate = prev_primary->link_rate; 2262 pptr->configured = prev_primary->configured; 2263 pptr->iport = prev_primary->iport; 2264 pptr->target = prev_primary->target; 2265 if (pptr->target) { 2266 pptr->target->phy = pptr; 2267 } 2268 2269 /* Update the phy mask properties for the affected PHYs */ 2270 /* Clear the current values... */ 2271 pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp, 2272 pptr->tgt_port_pm_tmp, B_FALSE); 2273 /* ...replace with the values from prev_primary... */ 2274 pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm_tmp, 2275 prev_primary->tgt_port_pm_tmp, B_TRUE); 2276 /* ...then clear prev_primary's PHY values from the new primary */ 2277 pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm, 2278 prev_primary->tgt_port_pm, B_FALSE); 2279 /* Clear the prev_primary's values */ 2280 pmcs_update_phy_pm_props(prev_primary, prev_primary->att_port_pm_tmp, 2281 prev_primary->tgt_port_pm_tmp, B_FALSE); 2282 2283 pptr->subsidiary = 0; 2284 2285 prev_primary->subsidiary = 1; 2286 prev_primary->children = NULL; 2287 prev_primary->target = NULL; 2288 pptr->device_id = prev_primary->device_id; 2289 pptr->valid_device_id = 1; 2290 pmcs_unlock_phy(prev_primary); 2291 2292 /* 2293 * We call pmcs_unlock_phy() on pptr because it now contains the 2294 * list of children. 2295 */ 2296 pmcs_unlock_phy(pptr); 2297 2298 return (pptr); 2299 } 2300 2301 void 2302 pmcs_rele_iport(pmcs_iport_t *iport) 2303 { 2304 /* 2305 * Release a refcnt on this iport. If this is the last reference, 2306 * signal the potential waiter in pmcs_iport_unattach(). 2307 */ 2308 ASSERT(iport->refcnt > 0); 2309 mutex_enter(&iport->refcnt_lock); 2310 iport->refcnt--; 2311 mutex_exit(&iport->refcnt_lock); 2312 if (iport->refcnt == 0) { 2313 cv_signal(&iport->refcnt_cv); 2314 } 2315 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: iport " 2316 "[0x%p] refcnt (%d)", __func__, (void *)iport, iport->refcnt); 2317 } 2318 2319 void 2320 pmcs_phymap_activate(void *arg, char *ua, void **privp) 2321 { 2322 _NOTE(ARGUNUSED(privp)); 2323 pmcs_hw_t *pwp = arg; 2324 pmcs_iport_t *iport = NULL; 2325 2326 mutex_enter(&pwp->lock); 2327 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD) || 2328 (pwp->state == STATE_IN_RESET)) { 2329 mutex_exit(&pwp->lock); 2330 return; 2331 } 2332 pwp->phymap_active++; 2333 mutex_exit(&pwp->lock); 2334 2335 if (scsi_hba_iportmap_iport_add(pwp->hss_iportmap, ua, NULL) != 2336 DDI_SUCCESS) { 2337 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to " 2338 "add iport handle on unit address [%s]", __func__, ua); 2339 } else { 2340 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: " 2341 "phymap_active count (%d), added iport handle on unit " 2342 "address [%s]", __func__, pwp->phymap_active, ua); 2343 } 2344 2345 /* Set the HBA softstate as our private data for this unit address */ 2346 *privp = (void *)pwp; 2347 2348 /* 2349 * We are waiting on attach for this iport node, unless it is still 2350 * attached. This can happen if a consumer has an outstanding open 2351 * on our iport node, but the port is down. If this is the case, we 2352 * need to configure our iport here for reuse. 2353 */ 2354 iport = pmcs_get_iport_by_ua(pwp, ua); 2355 if (iport) { 2356 mutex_enter(&iport->lock); 2357 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) { 2358 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: " 2359 "failed to configure phys on iport [0x%p] at " 2360 "unit address (%s)", __func__, (void *)iport, ua); 2361 } 2362 pmcs_iport_active(iport); 2363 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 2364 &iport->nphy); 2365 mutex_exit(&iport->lock); 2366 pmcs_rele_iport(iport); 2367 } 2368 2369 } 2370 2371 void 2372 pmcs_phymap_deactivate(void *arg, char *ua, void *privp) 2373 { 2374 _NOTE(ARGUNUSED(privp)); 2375 pmcs_hw_t *pwp = arg; 2376 pmcs_iport_t *iport; 2377 2378 mutex_enter(&pwp->lock); 2379 pwp->phymap_active--; 2380 mutex_exit(&pwp->lock); 2381 2382 if (scsi_hba_iportmap_iport_remove(pwp->hss_iportmap, ua) != 2383 DDI_SUCCESS) { 2384 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to " 2385 "remove iport handle on unit address [%s]", __func__, ua); 2386 } else { 2387 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: " 2388 "phymap_active count (%d), removed iport handle on unit " 2389 "address [%s]", __func__, pwp->phymap_active, ua); 2390 } 2391 2392 iport = pmcs_get_iport_by_ua(pwp, ua); 2393 2394 if (iport == NULL) { 2395 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: failed " 2396 "lookup of iport handle on unit addr (%s)", __func__, ua); 2397 return; 2398 } 2399 2400 mutex_enter(&iport->lock); 2401 iport->ua_state = UA_INACTIVE; 2402 iport->portid = PMCS_IPORT_INVALID_PORT_ID; 2403 pmcs_remove_phy_from_iport(iport, NULL); 2404 mutex_exit(&iport->lock); 2405 pmcs_rele_iport(iport); 2406 } 2407 2408 /* 2409 * Top-level discovery function 2410 */ 2411 void 2412 pmcs_discover(pmcs_hw_t *pwp) 2413 { 2414 pmcs_phy_t *pptr; 2415 pmcs_phy_t *root_phy; 2416 2417 DTRACE_PROBE2(pmcs__discover__entry, ulong_t, pwp->work_flags, 2418 boolean_t, pwp->config_changed); 2419 2420 mutex_enter(&pwp->lock); 2421 2422 if (pwp->state != STATE_RUNNING) { 2423 mutex_exit(&pwp->lock); 2424 return; 2425 } 2426 2427 /* Ensure we have at least one phymap active */ 2428 if (pwp->phymap_active == 0) { 2429 mutex_exit(&pwp->lock); 2430 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2431 "%s: phymap inactive, exiting", __func__); 2432 return; 2433 } 2434 2435 mutex_exit(&pwp->lock); 2436 2437 /* 2438 * If no iports have attached, but we have PHYs that are up, we 2439 * are waiting for iport attach to complete. Restart discovery. 2440 */ 2441 rw_enter(&pwp->iports_lock, RW_READER); 2442 if (!pwp->iports_attached) { 2443 rw_exit(&pwp->iports_lock); 2444 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2445 "%s: no iports attached, retry discovery", __func__); 2446 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2447 return; 2448 } 2449 rw_exit(&pwp->iports_lock); 2450 2451 mutex_enter(&pwp->config_lock); 2452 if (pwp->configuring) { 2453 mutex_exit(&pwp->config_lock); 2454 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2455 "%s: configuration already in progress", __func__); 2456 return; 2457 } 2458 2459 if (pmcs_acquire_scratch(pwp, B_FALSE)) { 2460 mutex_exit(&pwp->config_lock); 2461 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2462 "%s: cannot allocate scratch", __func__); 2463 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2464 return; 2465 } 2466 2467 pwp->configuring = 1; 2468 pwp->config_changed = B_FALSE; 2469 mutex_exit(&pwp->config_lock); 2470 2471 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery begin"); 2472 2473 /* 2474 * First, tell SCSA that we're beginning set operations. 2475 */ 2476 pmcs_begin_observations(pwp); 2477 2478 /* 2479 * The order of the following traversals is important. 2480 * 2481 * The first one checks for changed expanders. 2482 * 2483 * The second one aborts commands for dead devices and deregisters them. 2484 * 2485 * The third one clears the contents of dead expanders from the tree 2486 * 2487 * The fourth one clears now dead devices in expanders that remain. 2488 */ 2489 2490 /* 2491 * 1. Check expanders marked changed (but not dead) to see if they still 2492 * have the same number of phys and the same SAS address. Mark them, 2493 * their subsidiary phys (if wide) and their descendents dead if 2494 * anything has changed. Check the devices they contain to see if 2495 * *they* have changed. If they've changed from type NOTHING we leave 2496 * them marked changed to be configured later (picking up a new SAS 2497 * address and link rate if possible). Otherwise, any change in type, 2498 * SAS address or removal of target role will cause us to mark them 2499 * (and their descendents) as dead (and cause any pending commands 2500 * and associated devices to be removed). 2501 * 2502 * NOTE: We don't want to bail on discovery if the config has 2503 * changed until *after* we run pmcs_kill_devices. 2504 */ 2505 root_phy = pwp->root_phys; 2506 pmcs_check_expanders(pwp, root_phy); 2507 2508 /* 2509 * 2. Descend the tree looking for dead devices and kill them 2510 * by aborting all active commands and then deregistering them. 2511 */ 2512 if (pmcs_kill_devices(pwp, root_phy)) { 2513 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2514 "%s: pmcs_kill_devices failed!", __func__); 2515 } 2516 2517 /* 2518 * 3. Check for dead expanders and remove their children from the tree. 2519 * By the time we get here, the devices and commands for them have 2520 * already been terminated and removed. 2521 * 2522 * We do this independent of the configuration count changing so we can 2523 * free any dead device PHYs that were discovered while checking 2524 * expanders. We ignore any subsidiary phys as pmcs_clear_expander 2525 * will take care of those. 2526 * 2527 * NOTE: pmcs_clear_expander requires softstate lock 2528 */ 2529 mutex_enter(&pwp->lock); 2530 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 2531 /* 2532 * Call pmcs_clear_expander for every root PHY. It will 2533 * recurse and determine which (if any) expanders actually 2534 * need to be cleared. 2535 */ 2536 pmcs_lock_phy(pptr); 2537 pmcs_clear_expander(pwp, pptr, 0); 2538 pmcs_unlock_phy(pptr); 2539 } 2540 mutex_exit(&pwp->lock); 2541 2542 /* 2543 * 4. Check for dead devices and nullify them. By the time we get here, 2544 * the devices and commands for them have already been terminated 2545 * and removed. This is different from step 2 in that this just nulls 2546 * phys that are part of expanders that are still here but used to 2547 * be something but are no longer something (e.g., after a pulled 2548 * disk drive). Note that dead expanders had their contained phys 2549 * removed from the tree- here, the expanders themselves are 2550 * nullified (unless they were removed by being contained in another 2551 * expander phy). 2552 */ 2553 pmcs_clear_phys(pwp, root_phy); 2554 2555 /* 2556 * 5. Now check for and configure new devices. 2557 */ 2558 if (pmcs_configure_new_devices(pwp, root_phy)) { 2559 goto restart; 2560 } 2561 2562 out: 2563 DTRACE_PROBE2(pmcs__discover__exit, ulong_t, pwp->work_flags, 2564 boolean_t, pwp->config_changed); 2565 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery end"); 2566 2567 mutex_enter(&pwp->config_lock); 2568 2569 if (pwp->config_changed == B_FALSE) { 2570 /* 2571 * Observation is stable, report what we currently see to 2572 * the tgtmaps for delta processing. Start by setting 2573 * BEGIN on all tgtmaps. 2574 */ 2575 mutex_exit(&pwp->config_lock); 2576 if (pmcs_report_observations(pwp) == B_FALSE) { 2577 goto restart; 2578 } 2579 mutex_enter(&pwp->config_lock); 2580 } else { 2581 /* 2582 * If config_changed is TRUE, we need to reschedule 2583 * discovery now. 2584 */ 2585 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2586 "%s: Config has changed, will re-run discovery", __func__); 2587 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2588 } 2589 2590 pmcs_release_scratch(pwp); 2591 if (!pwp->quiesced) { 2592 pwp->blocked = 0; 2593 } 2594 pwp->configuring = 0; 2595 cv_signal(&pwp->config_cv); 2596 mutex_exit(&pwp->config_lock); 2597 2598 #ifdef DEBUG 2599 pptr = pmcs_find_phy_needing_work(pwp, pwp->root_phys); 2600 if (pptr != NULL) { 2601 if (!WORK_IS_SCHEDULED(pwp, PMCS_WORK_DISCOVER)) { 2602 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 2603 "PHY %s dead=%d changed=%d configured=%d " 2604 "but no work scheduled", pptr->path, pptr->dead, 2605 pptr->changed, pptr->configured); 2606 } 2607 pmcs_unlock_phy(pptr); 2608 } 2609 #endif 2610 2611 return; 2612 2613 restart: 2614 /* Clean up and restart discovery */ 2615 pmcs_release_scratch(pwp); 2616 pmcs_flush_observations(pwp); 2617 mutex_enter(&pwp->config_lock); 2618 pwp->configuring = 0; 2619 cv_signal(&pwp->config_cv); 2620 RESTART_DISCOVERY_LOCKED(pwp); 2621 mutex_exit(&pwp->config_lock); 2622 } 2623 2624 /* 2625 * Return any PHY that needs to have scheduled work done. The PHY is returned 2626 * locked. 2627 */ 2628 static pmcs_phy_t * 2629 pmcs_find_phy_needing_work(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2630 { 2631 pmcs_phy_t *cphyp, *pnext; 2632 2633 while (pptr) { 2634 pmcs_lock_phy(pptr); 2635 2636 if (pptr->changed || (pptr->dead && pptr->valid_device_id)) { 2637 return (pptr); 2638 } 2639 2640 pnext = pptr->sibling; 2641 2642 if (pptr->children) { 2643 cphyp = pptr->children; 2644 pmcs_unlock_phy(pptr); 2645 cphyp = pmcs_find_phy_needing_work(pwp, cphyp); 2646 if (cphyp) { 2647 return (cphyp); 2648 } 2649 } else { 2650 pmcs_unlock_phy(pptr); 2651 } 2652 2653 pptr = pnext; 2654 } 2655 2656 return (NULL); 2657 } 2658 2659 /* 2660 * We may (or may not) report observations to SCSA. This is prefaced by 2661 * issuing a set_begin for each iport target map. 2662 */ 2663 static void 2664 pmcs_begin_observations(pmcs_hw_t *pwp) 2665 { 2666 pmcs_iport_t *iport; 2667 scsi_hba_tgtmap_t *tgtmap; 2668 2669 rw_enter(&pwp->iports_lock, RW_READER); 2670 for (iport = list_head(&pwp->iports); iport != NULL; 2671 iport = list_next(&pwp->iports, iport)) { 2672 /* 2673 * Unless we have at least one phy up, skip this iport. 2674 * Note we don't need to lock the iport for report_skip 2675 * since it is only used here. We are doing the skip so that 2676 * the phymap and iportmap stabilization times are honored - 2677 * giving us the ability to recover port operation within the 2678 * stabilization time without unconfiguring targets using the 2679 * port. 2680 */ 2681 if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) { 2682 iport->report_skip = 1; 2683 continue; /* skip set_begin */ 2684 } 2685 iport->report_skip = 0; 2686 2687 tgtmap = iport->iss_tgtmap; 2688 ASSERT(tgtmap); 2689 if (scsi_hba_tgtmap_set_begin(tgtmap) != DDI_SUCCESS) { 2690 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2691 "%s: cannot set_begin tgtmap ", __func__); 2692 rw_exit(&pwp->iports_lock); 2693 return; 2694 } 2695 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2696 "%s: set begin on tgtmap [0x%p]", __func__, (void *)tgtmap); 2697 } 2698 rw_exit(&pwp->iports_lock); 2699 } 2700 2701 /* 2702 * Tell SCSA to flush the observations we've already sent (if any), as they 2703 * are no longer valid. 2704 */ 2705 static void 2706 pmcs_flush_observations(pmcs_hw_t *pwp) 2707 { 2708 pmcs_iport_t *iport; 2709 scsi_hba_tgtmap_t *tgtmap; 2710 2711 rw_enter(&pwp->iports_lock, RW_READER); 2712 for (iport = list_head(&pwp->iports); iport != NULL; 2713 iport = list_next(&pwp->iports, iport)) { 2714 /* 2715 * Skip this iport if it has no PHYs up. 2716 */ 2717 if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) { 2718 continue; 2719 } 2720 2721 tgtmap = iport->iss_tgtmap; 2722 ASSERT(tgtmap); 2723 if (scsi_hba_tgtmap_set_flush(tgtmap) != DDI_SUCCESS) { 2724 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2725 "%s: Failed set_flush on tgtmap 0x%p", __func__, 2726 (void *)tgtmap); 2727 } else { 2728 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2729 "%s: set flush on tgtmap 0x%p", __func__, 2730 (void *)tgtmap); 2731 } 2732 } 2733 rw_exit(&pwp->iports_lock); 2734 } 2735 2736 /* 2737 * Report current observations to SCSA. 2738 */ 2739 static boolean_t 2740 pmcs_report_observations(pmcs_hw_t *pwp) 2741 { 2742 pmcs_iport_t *iport; 2743 scsi_hba_tgtmap_t *tgtmap; 2744 char *ap; 2745 pmcs_phy_t *pptr; 2746 uint64_t wwn; 2747 2748 /* 2749 * Observation is stable, report what we currently see to the tgtmaps 2750 * for delta processing. 2751 */ 2752 pptr = pwp->root_phys; 2753 2754 while (pptr) { 2755 pmcs_lock_phy(pptr); 2756 2757 /* 2758 * Skip PHYs that have nothing attached or are dead. 2759 */ 2760 if ((pptr->dtype == NOTHING) || pptr->dead) { 2761 pmcs_unlock_phy(pptr); 2762 pptr = pptr->sibling; 2763 continue; 2764 } 2765 2766 if (pptr->changed) { 2767 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 2768 "%s: oops, PHY %s changed; restart discovery", 2769 __func__, pptr->path); 2770 pmcs_unlock_phy(pptr); 2771 return (B_FALSE); 2772 } 2773 2774 /* 2775 * Get the iport for this root PHY, then call the helper 2776 * to report observations for this iport's targets 2777 */ 2778 wwn = pmcs_barray2wwn(pptr->sas_address); 2779 pmcs_unlock_phy(pptr); 2780 iport = pmcs_get_iport_by_wwn(pwp, wwn); 2781 if (iport == NULL) { 2782 /* No iport for this tgt */ 2783 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2784 "%s: no iport for this target", __func__); 2785 pptr = pptr->sibling; 2786 continue; 2787 } 2788 2789 pmcs_lock_phy(pptr); 2790 if (!iport->report_skip) { 2791 if (pmcs_report_iport_observations( 2792 pwp, iport, pptr) == B_FALSE) { 2793 pmcs_rele_iport(iport); 2794 pmcs_unlock_phy(pptr); 2795 return (B_FALSE); 2796 } 2797 } 2798 pmcs_rele_iport(iport); 2799 pmcs_unlock_phy(pptr); 2800 pptr = pptr->sibling; 2801 } 2802 2803 /* 2804 * The observation is complete, end sets. Note we will skip any 2805 * iports that are active, but have no PHYs in them (i.e. awaiting 2806 * unconfigure). Set to restart discovery if we find this. 2807 */ 2808 rw_enter(&pwp->iports_lock, RW_READER); 2809 for (iport = list_head(&pwp->iports); 2810 iport != NULL; 2811 iport = list_next(&pwp->iports, iport)) { 2812 2813 if (iport->report_skip) 2814 continue; /* skip set_end */ 2815 2816 tgtmap = iport->iss_tgtmap; 2817 ASSERT(tgtmap); 2818 if (scsi_hba_tgtmap_set_end(tgtmap, 0) != DDI_SUCCESS) { 2819 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2820 "%s: cannot set_end tgtmap ", __func__); 2821 rw_exit(&pwp->iports_lock); 2822 return (B_FALSE); 2823 } 2824 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2825 "%s: set end on tgtmap [0x%p]", __func__, (void *)tgtmap); 2826 } 2827 2828 /* 2829 * Now that discovery is complete, set up the necessary 2830 * DDI properties on each iport node. 2831 */ 2832 for (iport = list_head(&pwp->iports); iport != NULL; 2833 iport = list_next(&pwp->iports, iport)) { 2834 /* Set up the 'attached-port' property on the iport */ 2835 ap = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP); 2836 mutex_enter(&iport->lock); 2837 pptr = iport->pptr; 2838 mutex_exit(&iport->lock); 2839 if (pptr == NULL) { 2840 /* 2841 * This iport is down, but has not been 2842 * removed from our list (unconfigured). 2843 * Set our value to '0'. 2844 */ 2845 (void) snprintf(ap, 1, "%s", "0"); 2846 } else { 2847 /* Otherwise, set it to remote phy's wwn */ 2848 pmcs_lock_phy(pptr); 2849 wwn = pmcs_barray2wwn(pptr->sas_address); 2850 (void) scsi_wwn_to_wwnstr(wwn, 1, ap); 2851 pmcs_unlock_phy(pptr); 2852 } 2853 if (ndi_prop_update_string(DDI_DEV_T_NONE, iport->dip, 2854 SCSI_ADDR_PROP_ATTACHED_PORT, ap) != DDI_SUCCESS) { 2855 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed " 2856 "to set prop ("SCSI_ADDR_PROP_ATTACHED_PORT")", 2857 __func__); 2858 } 2859 kmem_free(ap, PMCS_MAX_UA_SIZE); 2860 } 2861 rw_exit(&pwp->iports_lock); 2862 2863 return (B_TRUE); 2864 } 2865 2866 /* 2867 * Report observations into a particular iport's target map 2868 * 2869 * Called with phyp (and all descendents) locked 2870 */ 2871 static boolean_t 2872 pmcs_report_iport_observations(pmcs_hw_t *pwp, pmcs_iport_t *iport, 2873 pmcs_phy_t *phyp) 2874 { 2875 pmcs_phy_t *lphyp; 2876 scsi_hba_tgtmap_t *tgtmap; 2877 scsi_tgtmap_tgt_type_t tgt_type; 2878 char *ua; 2879 uint64_t wwn; 2880 2881 tgtmap = iport->iss_tgtmap; 2882 ASSERT(tgtmap); 2883 2884 lphyp = phyp; 2885 while (lphyp) { 2886 switch (lphyp->dtype) { 2887 default: /* Skip unknown PHYs. */ 2888 /* for non-root phys, skip to sibling */ 2889 goto next_phy; 2890 2891 case SATA: 2892 case SAS: 2893 tgt_type = SCSI_TGT_SCSI_DEVICE; 2894 break; 2895 2896 case EXPANDER: 2897 tgt_type = SCSI_TGT_SMP_DEVICE; 2898 break; 2899 } 2900 2901 if (lphyp->dead || !lphyp->configured) { 2902 goto next_phy; 2903 } 2904 2905 /* 2906 * Validate the PHY's SAS address 2907 */ 2908 if (((lphyp->sas_address[0] & 0xf0) >> 4) != NAA_IEEE_REG) { 2909 pmcs_prt(pwp, PMCS_PRT_ERR, lphyp, NULL, 2910 "PHY 0x%p (%s) has invalid SAS address; " 2911 "will not enumerate", (void *)lphyp, lphyp->path); 2912 goto next_phy; 2913 } 2914 2915 wwn = pmcs_barray2wwn(lphyp->sas_address); 2916 ua = scsi_wwn_to_wwnstr(wwn, 1, NULL); 2917 2918 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, lphyp, NULL, 2919 "iport_observation: adding %s on tgtmap [0x%p] phy [0x%p]", 2920 ua, (void *)tgtmap, (void*)lphyp); 2921 2922 if (scsi_hba_tgtmap_set_add(tgtmap, tgt_type, ua, NULL) != 2923 DDI_SUCCESS) { 2924 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, 2925 "%s: failed to add address %s", __func__, ua); 2926 scsi_free_wwnstr(ua); 2927 return (B_FALSE); 2928 } 2929 scsi_free_wwnstr(ua); 2930 2931 if (lphyp->children) { 2932 if (pmcs_report_iport_observations(pwp, iport, 2933 lphyp->children) == B_FALSE) { 2934 return (B_FALSE); 2935 } 2936 } 2937 2938 /* for non-root phys, report siblings too */ 2939 next_phy: 2940 if (IS_ROOT_PHY(lphyp)) { 2941 lphyp = NULL; 2942 } else { 2943 lphyp = lphyp->sibling; 2944 } 2945 } 2946 2947 return (B_TRUE); 2948 } 2949 2950 /* 2951 * Check for and configure new devices. 2952 * 2953 * If the changed device is a SATA device, add a SATA device. 2954 * 2955 * If the changed device is a SAS device, add a SAS device. 2956 * 2957 * If the changed device is an EXPANDER device, do a REPORT 2958 * GENERAL SMP command to find out the number of contained phys. 2959 * 2960 * For each number of contained phys, allocate a phy, do a 2961 * DISCOVERY SMP command to find out what kind of device it 2962 * is and add it to the linked list of phys on the *next* level. 2963 * 2964 * NOTE: pptr passed in by the caller will be a root PHY 2965 */ 2966 static int 2967 pmcs_configure_new_devices(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2968 { 2969 int rval = 0; 2970 pmcs_iport_t *iport; 2971 pmcs_phy_t *pnext, *orig_pptr = pptr, *root_phy, *pchild; 2972 uint64_t wwn; 2973 2974 /* 2975 * First, walk through each PHY at this level 2976 */ 2977 while (pptr) { 2978 pmcs_lock_phy(pptr); 2979 pnext = pptr->sibling; 2980 2981 /* 2982 * Set the new dtype if it has changed 2983 */ 2984 if ((pptr->pend_dtype != NEW) && 2985 (pptr->pend_dtype != pptr->dtype)) { 2986 pptr->dtype = pptr->pend_dtype; 2987 } 2988 2989 if (pptr->changed == 0 || pptr->dead || pptr->configured) { 2990 goto next_phy; 2991 } 2992 2993 /* 2994 * Confirm that this target's iport is configured 2995 */ 2996 root_phy = pmcs_get_root_phy(pptr); 2997 wwn = pmcs_barray2wwn(root_phy->sas_address); 2998 pmcs_unlock_phy(pptr); 2999 iport = pmcs_get_iport_by_wwn(pwp, wwn); 3000 if (iport == NULL) { 3001 /* No iport for this tgt, restart */ 3002 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 3003 "%s: iport not yet configured, " 3004 "retry discovery", __func__); 3005 pnext = NULL; 3006 rval = -1; 3007 pmcs_lock_phy(pptr); 3008 goto next_phy; 3009 } 3010 3011 pmcs_lock_phy(pptr); 3012 switch (pptr->dtype) { 3013 case NOTHING: 3014 pptr->changed = 0; 3015 break; 3016 case SATA: 3017 case SAS: 3018 pptr->iport = iport; 3019 pmcs_new_tport(pwp, pptr); 3020 break; 3021 case EXPANDER: 3022 pmcs_configure_expander(pwp, pptr, iport); 3023 break; 3024 } 3025 pmcs_rele_iport(iport); 3026 3027 mutex_enter(&pwp->config_lock); 3028 if (pwp->config_changed) { 3029 mutex_exit(&pwp->config_lock); 3030 pnext = NULL; 3031 goto next_phy; 3032 } 3033 mutex_exit(&pwp->config_lock); 3034 3035 next_phy: 3036 pmcs_unlock_phy(pptr); 3037 pptr = pnext; 3038 } 3039 3040 if (rval != 0) { 3041 return (rval); 3042 } 3043 3044 /* 3045 * Now walk through each PHY again, recalling ourselves if they 3046 * have children 3047 */ 3048 pptr = orig_pptr; 3049 while (pptr) { 3050 pmcs_lock_phy(pptr); 3051 pnext = pptr->sibling; 3052 pchild = pptr->children; 3053 pmcs_unlock_phy(pptr); 3054 3055 if (pchild) { 3056 rval = pmcs_configure_new_devices(pwp, pchild); 3057 if (rval != 0) { 3058 break; 3059 } 3060 } 3061 3062 pptr = pnext; 3063 } 3064 3065 return (rval); 3066 } 3067 3068 /* 3069 * Set all phys and descendent phys as changed if changed == B_TRUE, otherwise 3070 * mark them all as not changed. 3071 * 3072 * Called with parent PHY locked. 3073 */ 3074 void 3075 pmcs_set_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, boolean_t changed, 3076 int level) 3077 { 3078 pmcs_phy_t *pptr; 3079 3080 if (level == 0) { 3081 if (changed) { 3082 PHY_CHANGED(pwp, parent); 3083 } else { 3084 parent->changed = 0; 3085 } 3086 if (parent->dtype == EXPANDER && parent->level) { 3087 parent->width = 1; 3088 } 3089 if (parent->children) { 3090 pmcs_set_changed(pwp, parent->children, changed, 3091 level + 1); 3092 } 3093 } else { 3094 pptr = parent; 3095 while (pptr) { 3096 if (changed) { 3097 PHY_CHANGED(pwp, pptr); 3098 } else { 3099 pptr->changed = 0; 3100 } 3101 if (pptr->dtype == EXPANDER && pptr->level) { 3102 pptr->width = 1; 3103 } 3104 if (pptr->children) { 3105 pmcs_set_changed(pwp, pptr->children, changed, 3106 level + 1); 3107 } 3108 pptr = pptr->sibling; 3109 } 3110 } 3111 } 3112 3113 /* 3114 * Take the passed phy mark it and its descendants as dead. 3115 * Fire up reconfiguration to abort commands and bury it. 3116 * 3117 * Called with the parent PHY locked. 3118 */ 3119 void 3120 pmcs_kill_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, int level) 3121 { 3122 pmcs_phy_t *pptr = parent; 3123 3124 while (pptr) { 3125 pptr->link_rate = 0; 3126 pptr->abort_sent = 0; 3127 pptr->abort_pending = 1; 3128 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 3129 pptr->need_rl_ext = 0; 3130 3131 if (pptr->dead == 0) { 3132 PHY_CHANGED(pwp, pptr); 3133 RESTART_DISCOVERY(pwp); 3134 } 3135 3136 pptr->dead = 1; 3137 3138 if (pptr->children) { 3139 pmcs_kill_changed(pwp, pptr->children, level + 1); 3140 } 3141 3142 /* 3143 * Only kill siblings at level > 0 3144 */ 3145 if (level == 0) { 3146 return; 3147 } 3148 3149 pptr = pptr->sibling; 3150 } 3151 } 3152 3153 /* 3154 * Go through every PHY and clear any that are dead (unless they're expanders) 3155 */ 3156 static void 3157 pmcs_clear_phys(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3158 { 3159 pmcs_phy_t *pnext, *phyp; 3160 3161 phyp = pptr; 3162 while (phyp) { 3163 if (IS_ROOT_PHY(phyp)) { 3164 pmcs_lock_phy(phyp); 3165 } 3166 3167 if ((phyp->dtype != EXPANDER) && phyp->dead) { 3168 pmcs_clear_phy(pwp, phyp); 3169 } 3170 3171 if (phyp->children) { 3172 pmcs_clear_phys(pwp, phyp->children); 3173 } 3174 3175 pnext = phyp->sibling; 3176 3177 if (IS_ROOT_PHY(phyp)) { 3178 pmcs_unlock_phy(phyp); 3179 } 3180 3181 phyp = pnext; 3182 } 3183 } 3184 3185 /* 3186 * Clear volatile parts of a phy. Called with PHY locked. 3187 */ 3188 void 3189 pmcs_clear_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3190 { 3191 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: %s", 3192 __func__, pptr->path); 3193 ASSERT(mutex_owned(&pptr->phy_lock)); 3194 /* keep sibling */ 3195 /* keep children */ 3196 /* keep parent */ 3197 pptr->device_id = PMCS_INVALID_DEVICE_ID; 3198 /* keep hw_event_ack */ 3199 pptr->ncphy = 0; 3200 /* keep phynum */ 3201 pptr->width = 0; 3202 pptr->ds_recovery_retries = 0; 3203 pptr->ds_prev_good_recoveries = 0; 3204 pptr->last_good_recovery = 0; 3205 pptr->prev_recovery = 0; 3206 3207 /* keep dtype */ 3208 pptr->config_stop = 0; 3209 pptr->spinup_hold = 0; 3210 pptr->atdt = 0; 3211 /* keep portid */ 3212 pptr->link_rate = 0; 3213 pptr->valid_device_id = 0; 3214 pptr->abort_sent = 0; 3215 pptr->abort_pending = 0; 3216 pptr->need_rl_ext = 0; 3217 pptr->subsidiary = 0; 3218 pptr->configured = 0; 3219 pptr->deregister_wait = 0; 3220 pptr->reenumerate = 0; 3221 /* Only mark dead if it's not a root PHY and its dtype isn't NOTHING */ 3222 /* XXX: What about directly attached disks? */ 3223 if (!IS_ROOT_PHY(pptr) && (pptr->dtype != NOTHING)) 3224 pptr->dead = 1; 3225 pptr->changed = 0; 3226 /* keep SAS address */ 3227 /* keep path */ 3228 /* keep ref_count */ 3229 /* Don't clear iport on root PHYs - they are handled in pmcs_intr.c */ 3230 if (!IS_ROOT_PHY(pptr)) { 3231 pptr->last_iport = pptr->iport; 3232 pptr->iport = NULL; 3233 } 3234 /* keep target */ 3235 } 3236 3237 /* 3238 * Allocate softstate for this target if there isn't already one. If there 3239 * is, just redo our internal configuration. If it is actually "new", we'll 3240 * soon get a tran_tgt_init for it. 3241 * 3242 * Called with PHY locked. 3243 */ 3244 static void 3245 pmcs_new_tport(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3246 { 3247 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: phy 0x%p @ %s", 3248 __func__, (void *)pptr, pptr->path); 3249 3250 if (pmcs_configure_phy(pwp, pptr) == B_FALSE) { 3251 /* 3252 * If the config failed, mark the PHY as changed. 3253 */ 3254 PHY_CHANGED(pwp, pptr); 3255 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3256 "%s: pmcs_configure_phy failed for phy 0x%p", __func__, 3257 (void *)pptr); 3258 return; 3259 } 3260 3261 /* Mark PHY as no longer changed */ 3262 pptr->changed = 0; 3263 3264 /* 3265 * If the PHY has no target pointer: 3266 * 3267 * If it's a root PHY, see if another PHY in the iport holds the 3268 * target pointer (primary PHY changed). If so, move it over. 3269 * 3270 * If it's not a root PHY, see if there's a PHY on the dead_phys 3271 * list that matches. 3272 */ 3273 if (pptr->target == NULL) { 3274 if (IS_ROOT_PHY(pptr)) { 3275 pmcs_phy_t *rphy = pwp->root_phys; 3276 3277 while (rphy) { 3278 if (rphy == pptr) { 3279 rphy = rphy->sibling; 3280 continue; 3281 } 3282 3283 mutex_enter(&rphy->phy_lock); 3284 if ((rphy->iport == pptr->iport) && 3285 (rphy->target != NULL)) { 3286 mutex_enter(&rphy->target->statlock); 3287 pptr->target = rphy->target; 3288 rphy->target = NULL; 3289 pptr->target->phy = pptr; 3290 /* The target is now on pptr */ 3291 mutex_exit(&pptr->target->statlock); 3292 mutex_exit(&rphy->phy_lock); 3293 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 3294 pptr, pptr->target, 3295 "%s: Moved target from %s to %s", 3296 __func__, rphy->path, pptr->path); 3297 break; 3298 } 3299 mutex_exit(&rphy->phy_lock); 3300 3301 rphy = rphy->sibling; 3302 } 3303 } else { 3304 pmcs_reap_dead_phy(pptr); 3305 } 3306 } 3307 3308 /* 3309 * Only assign the device if there is a target for this PHY with a 3310 * matching SAS address. If an iport is disconnected from one piece 3311 * of storage and connected to another within the iport stabilization 3312 * time, we can get the PHY/target mismatch situation. 3313 * 3314 * Otherwise, it'll get done in tran_tgt_init. 3315 */ 3316 if (pptr->target) { 3317 mutex_enter(&pptr->target->statlock); 3318 if (pmcs_phy_target_match(pptr) == B_FALSE) { 3319 mutex_exit(&pptr->target->statlock); 3320 if (!IS_ROOT_PHY(pptr)) { 3321 pmcs_dec_phy_ref_count(pptr); 3322 } 3323 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 3324 "%s: Not assigning existing tgt %p for PHY %p " 3325 "(WWN mismatch)", __func__, (void *)pptr->target, 3326 (void *)pptr); 3327 pptr->target = NULL; 3328 return; 3329 } 3330 3331 if (!pmcs_assign_device(pwp, pptr->target)) { 3332 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 3333 "%s: pmcs_assign_device failed for target 0x%p", 3334 __func__, (void *)pptr->target); 3335 } 3336 mutex_exit(&pptr->target->statlock); 3337 } 3338 } 3339 3340 /* 3341 * Called with PHY lock held. 3342 */ 3343 static boolean_t 3344 pmcs_configure_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3345 { 3346 char *dtype; 3347 3348 ASSERT(mutex_owned(&pptr->phy_lock)); 3349 3350 /* 3351 * Mark this device as no longer changed. 3352 */ 3353 pptr->changed = 0; 3354 3355 /* 3356 * If we don't have a device handle, get one. 3357 */ 3358 if (pmcs_get_device_handle(pwp, pptr)) { 3359 return (B_FALSE); 3360 } 3361 3362 pptr->configured = 1; 3363 3364 switch (pptr->dtype) { 3365 case SAS: 3366 dtype = "SAS"; 3367 break; 3368 case SATA: 3369 dtype = "SATA"; 3370 break; 3371 case EXPANDER: 3372 dtype = "SMP"; 3373 break; 3374 default: 3375 dtype = "???"; 3376 } 3377 3378 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "config_dev: %s " 3379 "dev %s " SAS_ADDR_FMT " dev id 0x%x lr 0x%x", dtype, pptr->path, 3380 SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate); 3381 3382 return (B_TRUE); 3383 } 3384 3385 /* 3386 * Called with PHY locked 3387 */ 3388 static void 3389 pmcs_configure_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, pmcs_iport_t *iport) 3390 { 3391 pmcs_phy_t *ctmp, *clist = NULL, *cnext; 3392 int result, i, nphy = 0; 3393 boolean_t root_phy = B_FALSE; 3394 3395 ASSERT(iport); 3396 3397 /* 3398 * Step 1- clear our "changed" bit. If we need to retry/restart due 3399 * to resource shortages, we'll set it again. While we're doing 3400 * configuration, other events may set it again as well. If the PHY 3401 * is a root PHY and is currently marked as having changed, reset the 3402 * config_stop timer as well. 3403 */ 3404 if (IS_ROOT_PHY(pptr) && pptr->changed) { 3405 pptr->config_stop = ddi_get_lbolt() + 3406 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3407 } 3408 pptr->changed = 0; 3409 3410 /* 3411 * Step 2- make sure we don't overflow 3412 */ 3413 if (pptr->level == PMCS_MAX_XPND-1) { 3414 pmcs_prt(pwp, PMCS_PRT_WARN, pptr, NULL, 3415 "%s: SAS expansion tree too deep", __func__); 3416 return; 3417 } 3418 3419 /* 3420 * Step 3- Check if this expander is part of a wide phy that has 3421 * already been configured. 3422 * 3423 * This is known by checking this level for another EXPANDER device 3424 * with the same SAS address and isn't already marked as a subsidiary 3425 * phy and a parent whose SAS address is the same as our SAS address 3426 * (if there are parents). 3427 */ 3428 if (!IS_ROOT_PHY(pptr)) { 3429 /* 3430 * No need to lock the parent here because we're in discovery 3431 * and the only time a PHY's children pointer can change is 3432 * in discovery; either in pmcs_clear_expander (which has 3433 * already been called) or here, down below. Plus, trying to 3434 * grab the parent's lock here can cause deadlock. 3435 */ 3436 ctmp = pptr->parent->children; 3437 } else { 3438 ctmp = pwp->root_phys; 3439 root_phy = B_TRUE; 3440 } 3441 3442 while (ctmp) { 3443 /* 3444 * If we've checked all PHYs up to pptr, we stop. Otherwise, 3445 * we'll be checking for a primary PHY with a higher PHY 3446 * number than pptr, which will never happen. The primary 3447 * PHY on non-root expanders will ALWAYS be the lowest 3448 * numbered PHY. 3449 */ 3450 if (ctmp == pptr) { 3451 break; 3452 } 3453 3454 /* 3455 * If pptr and ctmp are root PHYs, just grab the mutex on 3456 * ctmp. No need to lock the entire tree. If they are not 3457 * root PHYs, there is no need to lock since a non-root PHY's 3458 * SAS address and other characteristics can only change in 3459 * discovery anyway. 3460 */ 3461 if (root_phy) { 3462 mutex_enter(&ctmp->phy_lock); 3463 } 3464 3465 if (ctmp->dtype == EXPANDER && ctmp->width && 3466 memcmp(ctmp->sas_address, pptr->sas_address, 8) == 0) { 3467 int widephy = 0; 3468 /* 3469 * If these phys are not root PHYs, compare their SAS 3470 * addresses too. 3471 */ 3472 if (!root_phy) { 3473 if (memcmp(ctmp->parent->sas_address, 3474 pptr->parent->sas_address, 8) == 0) { 3475 widephy = 1; 3476 } 3477 } else { 3478 widephy = 1; 3479 } 3480 if (widephy) { 3481 ctmp->width++; 3482 pptr->subsidiary = 1; 3483 3484 /* 3485 * Update the primary PHY's attached-port-pm 3486 * and target-port-pm information with the info 3487 * from this subsidiary 3488 */ 3489 pmcs_update_phy_pm_props(ctmp, 3490 pptr->att_port_pm_tmp, 3491 pptr->tgt_port_pm_tmp, B_TRUE); 3492 3493 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3494 "%s: PHY %s part of wide PHY %s " 3495 "(now %d wide)", __func__, pptr->path, 3496 ctmp->path, ctmp->width); 3497 if (root_phy) { 3498 mutex_exit(&ctmp->phy_lock); 3499 } 3500 return; 3501 } 3502 } 3503 3504 cnext = ctmp->sibling; 3505 if (root_phy) { 3506 mutex_exit(&ctmp->phy_lock); 3507 } 3508 ctmp = cnext; 3509 } 3510 3511 /* 3512 * Step 4- If we don't have a device handle, get one. Since this 3513 * is the primary PHY, make sure subsidiary is cleared. 3514 */ 3515 pptr->subsidiary = 0; 3516 pptr->iport = iport; 3517 if (pmcs_get_device_handle(pwp, pptr)) { 3518 goto out; 3519 } 3520 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Config expander %s " 3521 SAS_ADDR_FMT " dev id 0x%x lr 0x%x", pptr->path, 3522 SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate); 3523 3524 /* 3525 * Step 5- figure out how many phys are in this expander. 3526 */ 3527 nphy = pmcs_expander_get_nphy(pwp, pptr); 3528 if (nphy <= 0) { 3529 if (nphy == 0 && ddi_get_lbolt() < pptr->config_stop) { 3530 PHY_CHANGED(pwp, pptr); 3531 RESTART_DISCOVERY(pwp); 3532 } else { 3533 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3534 "%s: Retries exhausted for %s, killing", __func__, 3535 pptr->path); 3536 pptr->config_stop = 0; 3537 pmcs_kill_changed(pwp, pptr, 0); 3538 } 3539 goto out; 3540 } 3541 3542 /* 3543 * Step 6- Allocate a list of phys for this expander and figure out 3544 * what each one is. 3545 */ 3546 for (i = 0; i < nphy; i++) { 3547 ctmp = kmem_cache_alloc(pwp->phy_cache, KM_SLEEP); 3548 bzero(ctmp, sizeof (pmcs_phy_t)); 3549 ctmp->device_id = PMCS_INVALID_DEVICE_ID; 3550 ctmp->sibling = clist; 3551 ctmp->pend_dtype = NEW; /* Init pending dtype */ 3552 ctmp->config_stop = ddi_get_lbolt() + 3553 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3554 clist = ctmp; 3555 } 3556 3557 mutex_enter(&pwp->config_lock); 3558 if (pwp->config_changed) { 3559 RESTART_DISCOVERY_LOCKED(pwp); 3560 mutex_exit(&pwp->config_lock); 3561 /* 3562 * Clean up the newly allocated PHYs and return 3563 */ 3564 while (clist) { 3565 ctmp = clist->sibling; 3566 kmem_cache_free(pwp->phy_cache, clist); 3567 clist = ctmp; 3568 } 3569 return; 3570 } 3571 mutex_exit(&pwp->config_lock); 3572 3573 /* 3574 * Step 7- Now fill in the rest of the static portions of the phy. 3575 */ 3576 for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) { 3577 ctmp->parent = pptr; 3578 ctmp->pwp = pwp; 3579 ctmp->level = pptr->level+1; 3580 ctmp->portid = pptr->portid; 3581 if (ctmp->tolerates_sas2) { 3582 ASSERT(i < SAS2_PHYNUM_MAX); 3583 ctmp->phynum = i & SAS2_PHYNUM_MASK; 3584 } else { 3585 ASSERT(i < SAS_PHYNUM_MAX); 3586 ctmp->phynum = i & SAS_PHYNUM_MASK; 3587 } 3588 pmcs_phy_name(pwp, ctmp, ctmp->path, sizeof (ctmp->path)); 3589 pmcs_lock_phy(ctmp); 3590 } 3591 3592 /* 3593 * Step 8- Discover things about each phy in the expander. 3594 */ 3595 for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) { 3596 result = pmcs_expander_content_discover(pwp, pptr, ctmp); 3597 if (result <= 0) { 3598 if (ddi_get_lbolt() < pptr->config_stop) { 3599 PHY_CHANGED(pwp, pptr); 3600 RESTART_DISCOVERY(pwp); 3601 } else { 3602 pptr->config_stop = 0; 3603 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3604 "%s: Retries exhausted for %s, killing", 3605 __func__, pptr->path); 3606 pmcs_kill_changed(pwp, pptr, 0); 3607 } 3608 goto out; 3609 } 3610 3611 /* Set pend_dtype to dtype for 1st time initialization */ 3612 ctmp->pend_dtype = ctmp->dtype; 3613 } 3614 3615 /* 3616 * Step 9: Install the new list on the next level. There should 3617 * typically be no children pointer on this PHY. There is one known 3618 * case where this can happen, though. If a root PHY goes down and 3619 * comes back up before discovery can run, we will fail to remove the 3620 * children from that PHY since it will no longer be marked dead. 3621 * However, in this case, all children should also be marked dead. If 3622 * we see that, take those children and put them on the dead_phys list. 3623 */ 3624 if (pptr->children != NULL) { 3625 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 3626 "%s: Expander @ %s still has children: Clean up", 3627 __func__, pptr->path); 3628 pmcs_add_dead_phys(pwp, pptr->children); 3629 } 3630 3631 /* 3632 * Set the new children pointer for this expander 3633 */ 3634 pptr->children = clist; 3635 clist = NULL; 3636 pptr->ncphy = nphy; 3637 pptr->configured = 1; 3638 3639 /* 3640 * We only set width if we're greater than level 0. 3641 */ 3642 if (pptr->level) { 3643 pptr->width = 1; 3644 } 3645 3646 /* 3647 * Now tell the rest of the world about us, as an SMP node. 3648 */ 3649 pptr->iport = iport; 3650 pmcs_new_tport(pwp, pptr); 3651 3652 out: 3653 while (clist) { 3654 ctmp = clist->sibling; 3655 pmcs_unlock_phy(clist); 3656 kmem_cache_free(pwp->phy_cache, clist); 3657 clist = ctmp; 3658 } 3659 } 3660 3661 /* 3662 * 2. Check expanders marked changed (but not dead) to see if they still have 3663 * the same number of phys and the same SAS address. Mark them, their subsidiary 3664 * phys (if wide) and their descendents dead if anything has changed. Check the 3665 * the devices they contain to see if *they* have changed. If they've changed 3666 * from type NOTHING we leave them marked changed to be configured later 3667 * (picking up a new SAS address and link rate if possible). Otherwise, any 3668 * change in type, SAS address or removal of target role will cause us to 3669 * mark them (and their descendents) as dead and cause any pending commands 3670 * and associated devices to be removed. 3671 * 3672 * Called with PHY (pptr) locked. 3673 */ 3674 3675 static void 3676 pmcs_check_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3677 { 3678 int nphy, result; 3679 pmcs_phy_t *ctmp, *local, *local_list = NULL, *local_tail = NULL; 3680 boolean_t kill_changed, changed; 3681 3682 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3683 "%s: check %s", __func__, pptr->path); 3684 3685 /* 3686 * Step 1: Mark phy as not changed. We will mark it changed if we need 3687 * to retry. 3688 */ 3689 pptr->changed = 0; 3690 3691 /* 3692 * Reset the config_stop time. Although we're not actually configuring 3693 * anything here, we do want some indication of when to give up trying 3694 * if we can't communicate with the expander. 3695 */ 3696 pptr->config_stop = ddi_get_lbolt() + 3697 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3698 3699 /* 3700 * Step 2: Figure out how many phys are in this expander. If 3701 * pmcs_expander_get_nphy returns 0 we ran out of resources, 3702 * so reschedule and try later. If it returns another error, 3703 * just return. 3704 */ 3705 nphy = pmcs_expander_get_nphy(pwp, pptr); 3706 if (nphy <= 0) { 3707 if ((nphy == 0) && (ddi_get_lbolt() < pptr->config_stop)) { 3708 PHY_CHANGED(pwp, pptr); 3709 RESTART_DISCOVERY(pwp); 3710 } else { 3711 pptr->config_stop = 0; 3712 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3713 "%s: Retries exhausted for %s, killing", __func__, 3714 pptr->path); 3715 pmcs_kill_changed(pwp, pptr, 0); 3716 } 3717 return; 3718 } 3719 3720 /* 3721 * Step 3: If the number of phys don't agree, kill the old sub-tree. 3722 */ 3723 if (nphy != pptr->ncphy) { 3724 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3725 "%s: number of contained phys for %s changed from %d to %d", 3726 __func__, pptr->path, pptr->ncphy, nphy); 3727 /* 3728 * Force a rescan of this expander after dead contents 3729 * are cleared and removed. 3730 */ 3731 pmcs_kill_changed(pwp, pptr, 0); 3732 return; 3733 } 3734 3735 /* 3736 * Step 4: if we're at the bottom of the stack, we're done 3737 * (we can't have any levels below us) 3738 */ 3739 if (pptr->level == PMCS_MAX_XPND-1) { 3740 return; 3741 } 3742 3743 /* 3744 * Step 5: Discover things about each phy in this expander. We do 3745 * this by walking the current list of contained phys and doing a 3746 * content discovery for it to a local phy. 3747 */ 3748 ctmp = pptr->children; 3749 ASSERT(ctmp); 3750 if (ctmp == NULL) { 3751 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3752 "%s: No children attached to expander @ %s?", __func__, 3753 pptr->path); 3754 return; 3755 } 3756 3757 while (ctmp) { 3758 /* 3759 * Allocate a local PHY to contain the proposed new contents 3760 * and link it to the rest of the local PHYs so that they 3761 * can all be freed later. 3762 */ 3763 local = pmcs_clone_phy(ctmp); 3764 3765 if (local_list == NULL) { 3766 local_list = local; 3767 local_tail = local; 3768 } else { 3769 local_tail->sibling = local; 3770 local_tail = local; 3771 } 3772 3773 /* 3774 * Need to lock the local PHY since pmcs_expander_content_ 3775 * discovery may call pmcs_clear_phy on it, which expects 3776 * the PHY to be locked. 3777 */ 3778 pmcs_lock_phy(local); 3779 result = pmcs_expander_content_discover(pwp, pptr, local); 3780 pmcs_unlock_phy(local); 3781 if (result <= 0) { 3782 if (ddi_get_lbolt() < pptr->config_stop) { 3783 PHY_CHANGED(pwp, pptr); 3784 RESTART_DISCOVERY(pwp); 3785 } else { 3786 pptr->config_stop = 0; 3787 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3788 "%s: Retries exhausted for %s, killing", 3789 __func__, pptr->path); 3790 pmcs_kill_changed(pwp, pptr, 0); 3791 } 3792 3793 /* 3794 * Release all the local PHYs that we allocated. 3795 */ 3796 pmcs_free_phys(pwp, local_list); 3797 return; 3798 } 3799 3800 ctmp = ctmp->sibling; 3801 } 3802 3803 /* 3804 * Step 6: Compare the local PHY's contents to our current PHY. If 3805 * there are changes, take the appropriate action. 3806 * This is done in two steps (step 5 above, and 6 here) so that if we 3807 * have to bail during this process (e.g. pmcs_expander_content_discover 3808 * fails), we haven't actually changed the state of any of the real 3809 * PHYs. Next time we come through here, we'll be starting over from 3810 * scratch. This keeps us from marking a changed PHY as no longer 3811 * changed, but then having to bail only to come back next time and 3812 * think that the PHY hadn't changed. If this were to happen, we 3813 * would fail to properly configure the device behind this PHY. 3814 */ 3815 local = local_list; 3816 ctmp = pptr->children; 3817 3818 while (ctmp) { 3819 changed = B_FALSE; 3820 kill_changed = B_FALSE; 3821 3822 /* 3823 * We set local to local_list prior to this loop so that we 3824 * can simply walk the local_list while we walk this list. The 3825 * two lists should be completely in sync. 3826 * 3827 * Clear the changed flag here. 3828 */ 3829 ctmp->changed = 0; 3830 3831 if (ctmp->dtype != local->dtype) { 3832 if (ctmp->dtype != NOTHING) { 3833 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3834 "%s: %s type changed from %s to %s " 3835 "(killing)", __func__, ctmp->path, 3836 PHY_TYPE(ctmp), PHY_TYPE(local)); 3837 /* 3838 * Force a rescan of this expander after dead 3839 * contents are cleared and removed. 3840 */ 3841 changed = B_TRUE; 3842 kill_changed = B_TRUE; 3843 } else { 3844 changed = B_TRUE; 3845 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3846 "%s: %s type changed from NOTHING to %s", 3847 __func__, ctmp->path, PHY_TYPE(local)); 3848 /* 3849 * Since this PHY was nothing and is now 3850 * something, reset the config_stop timer. 3851 */ 3852 ctmp->config_stop = ddi_get_lbolt() + 3853 drv_usectohz(PMCS_MAX_CONFIG_TIME); 3854 } 3855 3856 } else if (ctmp->atdt != local->atdt) { 3857 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, "%s: " 3858 "%s attached device type changed from %d to %d " 3859 "(killing)", __func__, ctmp->path, ctmp->atdt, 3860 local->atdt); 3861 /* 3862 * Force a rescan of this expander after dead 3863 * contents are cleared and removed. 3864 */ 3865 changed = B_TRUE; 3866 3867 if (local->atdt == 0) { 3868 kill_changed = B_TRUE; 3869 } 3870 } else if (ctmp->link_rate != local->link_rate) { 3871 pmcs_prt(pwp, PMCS_PRT_INFO, ctmp, NULL, "%s: %s " 3872 "changed speed from %s to %s", __func__, ctmp->path, 3873 pmcs_get_rate(ctmp->link_rate), 3874 pmcs_get_rate(local->link_rate)); 3875 /* If the speed changed from invalid, force rescan */ 3876 if (!PMCS_VALID_LINK_RATE(ctmp->link_rate)) { 3877 changed = B_TRUE; 3878 RESTART_DISCOVERY(pwp); 3879 } else { 3880 /* Just update to the new link rate */ 3881 ctmp->link_rate = local->link_rate; 3882 } 3883 3884 if (!PMCS_VALID_LINK_RATE(local->link_rate)) { 3885 kill_changed = B_TRUE; 3886 } 3887 } else if (memcmp(ctmp->sas_address, local->sas_address, 3888 sizeof (ctmp->sas_address)) != 0) { 3889 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3890 "%s: SAS Addr for %s changed from " SAS_ADDR_FMT 3891 "to " SAS_ADDR_FMT " (kill old tree)", __func__, 3892 ctmp->path, SAS_ADDR_PRT(ctmp->sas_address), 3893 SAS_ADDR_PRT(local->sas_address)); 3894 /* 3895 * Force a rescan of this expander after dead 3896 * contents are cleared and removed. 3897 */ 3898 changed = B_TRUE; 3899 } else { 3900 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 3901 "%s: %s looks the same (type %s)", 3902 __func__, ctmp->path, PHY_TYPE(ctmp)); 3903 /* 3904 * If EXPANDER, still mark it changed so we 3905 * re-evaluate its contents. If it's not an expander, 3906 * but it hasn't been configured, also mark it as 3907 * changed so that it will undergo configuration. 3908 */ 3909 if (ctmp->dtype == EXPANDER) { 3910 changed = B_TRUE; 3911 } else if ((ctmp->dtype != NOTHING) && 3912 !ctmp->configured) { 3913 ctmp->changed = 1; 3914 } else { 3915 /* It simply hasn't changed */ 3916 ctmp->changed = 0; 3917 } 3918 } 3919 3920 /* 3921 * If the PHY changed, call pmcs_kill_changed if indicated, 3922 * update its contents to reflect its current state and mark it 3923 * as changed. 3924 */ 3925 if (changed) { 3926 /* 3927 * pmcs_kill_changed will mark the PHY as changed, so 3928 * only do PHY_CHANGED if we did not do kill_changed. 3929 */ 3930 if (kill_changed) { 3931 pmcs_kill_changed(pwp, ctmp, 0); 3932 } else { 3933 /* 3934 * If we're not killing the device, it's not 3935 * dead. Mark the PHY as changed. 3936 */ 3937 PHY_CHANGED(pwp, ctmp); 3938 3939 if (ctmp->dead) { 3940 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, 3941 ctmp, NULL, "%s: Unmarking PHY %s " 3942 "dead, restarting discovery", 3943 __func__, ctmp->path); 3944 ctmp->dead = 0; 3945 RESTART_DISCOVERY(pwp); 3946 } 3947 } 3948 3949 /* 3950 * If the dtype of this PHY is now NOTHING, mark it as 3951 * unconfigured. Set pend_dtype to what the new dtype 3952 * is. It'll get updated at the end of the discovery 3953 * process. 3954 */ 3955 if (local->dtype == NOTHING) { 3956 bzero(ctmp->sas_address, 3957 sizeof (local->sas_address)); 3958 ctmp->atdt = 0; 3959 ctmp->link_rate = 0; 3960 ctmp->pend_dtype = NOTHING; 3961 ctmp->configured = 0; 3962 } else { 3963 (void) memcpy(ctmp->sas_address, 3964 local->sas_address, 3965 sizeof (local->sas_address)); 3966 ctmp->atdt = local->atdt; 3967 ctmp->link_rate = local->link_rate; 3968 ctmp->pend_dtype = local->dtype; 3969 } 3970 } 3971 3972 local = local->sibling; 3973 ctmp = ctmp->sibling; 3974 } 3975 3976 /* 3977 * If we got to here, that means we were able to see all the PHYs 3978 * and we can now update all of the real PHYs with the information 3979 * we got on the local PHYs. Once that's done, free all the local 3980 * PHYs. 3981 */ 3982 3983 pmcs_free_phys(pwp, local_list); 3984 } 3985 3986 /* 3987 * Top level routine to check expanders. We call pmcs_check_expander for 3988 * each expander. Since we're not doing any configuration right now, it 3989 * doesn't matter if this is breadth-first. 3990 */ 3991 static void 3992 pmcs_check_expanders(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 3993 { 3994 pmcs_phy_t *phyp, *pnext, *pchild; 3995 3996 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 3997 "%s: %s", __func__, pptr->path); 3998 3999 /* 4000 * Check each expander at this level 4001 */ 4002 phyp = pptr; 4003 while (phyp) { 4004 pmcs_lock_phy(phyp); 4005 4006 if ((phyp->dtype == EXPANDER) && phyp->changed && 4007 !phyp->dead && !phyp->subsidiary && 4008 phyp->configured) { 4009 pmcs_check_expander(pwp, phyp); 4010 } 4011 4012 pnext = phyp->sibling; 4013 pmcs_unlock_phy(phyp); 4014 phyp = pnext; 4015 } 4016 4017 /* 4018 * Now check the children 4019 */ 4020 phyp = pptr; 4021 while (phyp) { 4022 pmcs_lock_phy(phyp); 4023 pnext = phyp->sibling; 4024 pchild = phyp->children; 4025 pmcs_unlock_phy(phyp); 4026 4027 if (pchild) { 4028 pmcs_check_expanders(pwp, pchild); 4029 } 4030 4031 phyp = pnext; 4032 } 4033 } 4034 4035 /* 4036 * Called with softstate and PHY locked 4037 */ 4038 static void 4039 pmcs_clear_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, int level) 4040 { 4041 pmcs_phy_t *ctmp; 4042 4043 ASSERT(mutex_owned(&pwp->lock)); 4044 ASSERT(mutex_owned(&pptr->phy_lock)); 4045 ASSERT(pptr->level < PMCS_MAX_XPND - 1); 4046 4047 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4048 "%s: checking %s", __func__, pptr->path); 4049 4050 ctmp = pptr->children; 4051 while (ctmp) { 4052 /* 4053 * If the expander is dead, mark its children dead 4054 */ 4055 if (pptr->dead) { 4056 ctmp->dead = 1; 4057 } 4058 if (ctmp->dtype == EXPANDER) { 4059 pmcs_clear_expander(pwp, ctmp, level + 1); 4060 } 4061 ctmp = ctmp->sibling; 4062 } 4063 4064 /* 4065 * If this expander is not dead, we're done here. 4066 */ 4067 if (!pptr->dead) { 4068 return; 4069 } 4070 4071 /* 4072 * Now snip out the list of children below us and release them 4073 */ 4074 if (pptr->children) { 4075 pmcs_add_dead_phys(pwp, pptr->children); 4076 } 4077 4078 pptr->children = NULL; 4079 4080 /* 4081 * Clear subsidiary phys as well. Getting the parent's PHY lock 4082 * is only necessary if level == 0 since otherwise the parent is 4083 * already locked. 4084 */ 4085 if (!IS_ROOT_PHY(pptr)) { 4086 if (level == 0) { 4087 mutex_enter(&pptr->parent->phy_lock); 4088 } 4089 ctmp = pptr->parent->children; 4090 if (level == 0) { 4091 mutex_exit(&pptr->parent->phy_lock); 4092 } 4093 } else { 4094 ctmp = pwp->root_phys; 4095 } 4096 4097 while (ctmp) { 4098 if (ctmp == pptr) { 4099 ctmp = ctmp->sibling; 4100 continue; 4101 } 4102 /* 4103 * We only need to lock subsidiary PHYs on the level 0 4104 * expander. Any children of that expander, subsidiaries or 4105 * not, will already be locked. 4106 */ 4107 if (level == 0) { 4108 pmcs_lock_phy(ctmp); 4109 } 4110 if (ctmp->dtype != EXPANDER || ctmp->subsidiary == 0 || 4111 memcmp(ctmp->sas_address, pptr->sas_address, 4112 sizeof (ctmp->sas_address)) != 0) { 4113 if (level == 0) { 4114 pmcs_unlock_phy(ctmp); 4115 } 4116 ctmp = ctmp->sibling; 4117 continue; 4118 } 4119 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 4120 "%s: subsidiary %s", __func__, ctmp->path); 4121 pmcs_clear_phy(pwp, ctmp); 4122 if (level == 0) { 4123 pmcs_unlock_phy(ctmp); 4124 } 4125 ctmp = ctmp->sibling; 4126 } 4127 4128 pmcs_clear_phy(pwp, pptr); 4129 } 4130 4131 /* 4132 * Called with PHY locked and with scratch acquired. We return 0 if 4133 * we fail to allocate resources or notice that the configuration 4134 * count changed while we were running the command. We return 4135 * less than zero if we had an I/O error or received an unsupported 4136 * configuration. Otherwise we return the number of phys in the 4137 * expander. 4138 */ 4139 #define DFM(m, y) if (m == NULL) m = y 4140 static int 4141 pmcs_expander_get_nphy(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 4142 { 4143 struct pmcwork *pwrk; 4144 char buf[64]; 4145 const uint_t rdoff = 0x100; /* returned data offset */ 4146 smp_response_frame_t *srf; 4147 smp_report_general_resp_t *srgr; 4148 uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status, ival; 4149 int result = 0; 4150 4151 ival = 0x40001100; 4152 4153 again: 4154 if (!pptr->iport || !pptr->valid_device_id) { 4155 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 4156 "%s: Can't reach PHY %s", __func__, pptr->path); 4157 goto out; 4158 } 4159 4160 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 4161 if (pwrk == NULL) { 4162 goto out; 4163 } 4164 (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE); 4165 pwrk->arg = pwp->scratch; 4166 pwrk->dtype = pptr->dtype; 4167 pwrk->htag |= PMCS_TAG_NONIO_CMD; 4168 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4169 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4170 if (ptr == NULL) { 4171 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4172 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, NULL, 4173 "%s: GET_IQ_ENTRY failed", __func__); 4174 pmcs_pwork(pwp, pwrk); 4175 goto out; 4176 } 4177 4178 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 4179 msg[1] = LE_32(pwrk->htag); 4180 msg[2] = LE_32(pptr->device_id); 4181 msg[3] = LE_32((4 << SMP_REQUEST_LENGTH_SHIFT) | SMP_INDIRECT_RESPONSE); 4182 /* 4183 * Send SMP REPORT GENERAL (of either SAS1.1 or SAS2 flavors). 4184 */ 4185 msg[4] = BE_32(ival); 4186 msg[5] = 0; 4187 msg[6] = 0; 4188 msg[7] = 0; 4189 msg[8] = 0; 4190 msg[9] = 0; 4191 msg[10] = 0; 4192 msg[11] = 0; 4193 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 4194 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 4195 msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff); 4196 msg[15] = 0; 4197 4198 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 4199 4200 /* SMP serialization */ 4201 pmcs_smp_acquire(pptr->iport); 4202 4203 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4204 htag = pwrk->htag; 4205 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4206 4207 pmcs_unlock_phy(pptr); 4208 WAIT_FOR(pwrk, 1000, result); 4209 /* Release SMP lock before reacquiring PHY lock */ 4210 pmcs_smp_release(pptr->iport); 4211 pmcs_lock_phy(pptr); 4212 4213 pmcs_pwork(pwp, pwrk); 4214 4215 mutex_enter(&pwp->config_lock); 4216 if (pwp->config_changed) { 4217 RESTART_DISCOVERY_LOCKED(pwp); 4218 mutex_exit(&pwp->config_lock); 4219 result = 0; 4220 goto out; 4221 } 4222 mutex_exit(&pwp->config_lock); 4223 4224 if (result) { 4225 pmcs_timed_out(pwp, htag, __func__); 4226 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4227 "%s: Issuing SMP ABORT for htag 0x%08x", __func__, htag); 4228 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 4229 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4230 "%s: Unable to issue SMP ABORT for htag 0x%08x", 4231 __func__, htag); 4232 } else { 4233 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4234 "%s: Issuing SMP ABORT for htag 0x%08x", 4235 __func__, htag); 4236 } 4237 result = 0; 4238 goto out; 4239 } 4240 ptr = (void *)pwp->scratch; 4241 status = LE_32(ptr[2]); 4242 if (status == PMCOUT_STATUS_UNDERFLOW || 4243 status == PMCOUT_STATUS_OVERFLOW) { 4244 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL, 4245 "%s: over/underflow", __func__); 4246 status = PMCOUT_STATUS_OK; 4247 } 4248 srf = (smp_response_frame_t *)&((uint32_t *)pwp->scratch)[rdoff >> 2]; 4249 srgr = (smp_report_general_resp_t *) 4250 &((uint32_t *)pwp->scratch)[(rdoff >> 2)+1]; 4251 4252 if (status != PMCOUT_STATUS_OK) { 4253 char *nag = NULL; 4254 (void) snprintf(buf, sizeof (buf), 4255 "%s: SMP op failed (0x%x)", __func__, status); 4256 switch (status) { 4257 case PMCOUT_STATUS_IO_PORT_IN_RESET: 4258 DFM(nag, "I/O Port In Reset"); 4259 /* FALLTHROUGH */ 4260 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 4261 DFM(nag, "Hardware Timeout"); 4262 /* FALLTHROUGH */ 4263 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 4264 DFM(nag, "Internal SMP Resource Failure"); 4265 /* FALLTHROUGH */ 4266 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 4267 DFM(nag, "PHY Not Ready"); 4268 /* FALLTHROUGH */ 4269 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 4270 DFM(nag, "Connection Rate Not Supported"); 4271 /* FALLTHROUGH */ 4272 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 4273 DFM(nag, "Open Retry Timeout"); 4274 /* FALLTHROUGH */ 4275 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 4276 DFM(nag, "HW Resource Busy"); 4277 /* FALLTHROUGH */ 4278 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 4279 DFM(nag, "Response Connection Error"); 4280 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4281 "%s: expander %s SMP operation failed (%s)", 4282 __func__, pptr->path, nag); 4283 break; 4284 4285 /* 4286 * For the IO_DS_NON_OPERATIONAL case, we need to kick off 4287 * device state recovery and return 0 so that the caller 4288 * doesn't assume this expander is dead for good. 4289 */ 4290 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: { 4291 pmcs_xscsi_t *xp = pptr->target; 4292 4293 pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, pptr, xp, 4294 "%s: expander %s device state non-operational", 4295 __func__, pptr->path); 4296 4297 if (xp == NULL) { 4298 /* 4299 * Kick off recovery right now. 4300 */ 4301 SCHEDULE_WORK(pwp, PMCS_WORK_DS_ERR_RECOVERY); 4302 (void) ddi_taskq_dispatch(pwp->tq, pmcs_worker, 4303 pwp, DDI_NOSLEEP); 4304 } else { 4305 mutex_enter(&xp->statlock); 4306 pmcs_start_dev_state_recovery(xp, pptr); 4307 mutex_exit(&xp->statlock); 4308 } 4309 4310 break; 4311 } 4312 4313 default: 4314 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr); 4315 result = -EIO; 4316 break; 4317 } 4318 } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) { 4319 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4320 "%s: bad response frame type 0x%x", 4321 __func__, srf->srf_frame_type); 4322 result = -EINVAL; 4323 } else if (srf->srf_function != SMP_FUNC_REPORT_GENERAL) { 4324 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4325 "%s: bad response function 0x%x", 4326 __func__, srf->srf_function); 4327 result = -EINVAL; 4328 } else if (srf->srf_result != 0) { 4329 /* 4330 * Check to see if we have a value of 3 for failure and 4331 * whether we were using a SAS2.0 allocation length value 4332 * and retry without it. 4333 */ 4334 if (srf->srf_result == 3 && (ival & 0xff00)) { 4335 ival &= ~0xff00; 4336 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4337 "%s: err 0x%x with SAS2 request- retry with SAS1", 4338 __func__, srf->srf_result); 4339 goto again; 4340 } 4341 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4342 "%s: bad response 0x%x", __func__, srf->srf_result); 4343 result = -EINVAL; 4344 } else if (srgr->srgr_configuring) { 4345 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4346 "%s: expander at phy %s is still configuring", 4347 __func__, pptr->path); 4348 result = 0; 4349 } else { 4350 result = srgr->srgr_number_of_phys; 4351 if (ival & 0xff00) { 4352 pptr->tolerates_sas2 = 1; 4353 } 4354 /* 4355 * Save off the REPORT_GENERAL response 4356 */ 4357 bcopy(srgr, &pptr->rg_resp, sizeof (smp_report_general_resp_t)); 4358 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4359 "%s has %d phys and %s SAS2", pptr->path, result, 4360 pptr->tolerates_sas2? "tolerates" : "does not tolerate"); 4361 } 4362 out: 4363 return (result); 4364 } 4365 4366 /* 4367 * Called with expander locked (and thus, pptr) as well as all PHYs up to 4368 * the root, and scratch acquired. Return 0 if we fail to allocate resources 4369 * or notice that the configuration changed while we were running the command. 4370 * 4371 * We return less than zero if we had an I/O error or received an 4372 * unsupported configuration. 4373 */ 4374 static int 4375 pmcs_expander_content_discover(pmcs_hw_t *pwp, pmcs_phy_t *expander, 4376 pmcs_phy_t *pptr) 4377 { 4378 struct pmcwork *pwrk; 4379 char buf[64]; 4380 uint8_t sas_address[8]; 4381 uint8_t att_sas_address[8]; 4382 smp_response_frame_t *srf; 4383 smp_discover_resp_t *sdr; 4384 const uint_t rdoff = 0x100; /* returned data offset */ 4385 uint8_t *roff; 4386 uint32_t status, *ptr, msg[PMCS_MSG_SIZE], htag; 4387 int result = 0; 4388 uint8_t ini_support; 4389 uint8_t tgt_support; 4390 4391 if (!expander->iport || !expander->valid_device_id) { 4392 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, expander, expander->target, 4393 "%s: Can't reach PHY %s", __func__, expander->path); 4394 goto out; 4395 } 4396 4397 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, expander); 4398 if (pwrk == NULL) { 4399 goto out; 4400 } 4401 (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE); 4402 pwrk->arg = pwp->scratch; 4403 pwrk->dtype = expander->dtype; 4404 pwrk->htag |= PMCS_TAG_NONIO_CMD; 4405 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 4406 msg[1] = LE_32(pwrk->htag); 4407 msg[2] = LE_32(expander->device_id); 4408 msg[3] = LE_32((12 << SMP_REQUEST_LENGTH_SHIFT) | 4409 SMP_INDIRECT_RESPONSE); 4410 /* 4411 * Send SMP DISCOVER (of either SAS1.1 or SAS2 flavors). 4412 */ 4413 if (expander->tolerates_sas2) { 4414 msg[4] = BE_32(0x40101B00); 4415 } else { 4416 msg[4] = BE_32(0x40100000); 4417 } 4418 msg[5] = 0; 4419 msg[6] = BE_32((pptr->phynum << 16)); 4420 msg[7] = 0; 4421 msg[8] = 0; 4422 msg[9] = 0; 4423 msg[10] = 0; 4424 msg[11] = 0; 4425 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 4426 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 4427 msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff); 4428 msg[15] = 0; 4429 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4430 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4431 if (ptr == NULL) { 4432 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4433 goto out; 4434 } 4435 4436 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 4437 4438 /* SMP serialization */ 4439 pmcs_smp_acquire(expander->iport); 4440 4441 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4442 htag = pwrk->htag; 4443 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4444 4445 /* 4446 * Drop PHY lock while waiting so other completions aren't potentially 4447 * blocked. 4448 */ 4449 pmcs_unlock_phy(expander); 4450 WAIT_FOR(pwrk, 1000, result); 4451 /* Release SMP lock before reacquiring PHY lock */ 4452 pmcs_smp_release(expander->iport); 4453 pmcs_lock_phy(expander); 4454 4455 pmcs_pwork(pwp, pwrk); 4456 4457 mutex_enter(&pwp->config_lock); 4458 if (pwp->config_changed) { 4459 RESTART_DISCOVERY_LOCKED(pwp); 4460 mutex_exit(&pwp->config_lock); 4461 result = 0; 4462 goto out; 4463 } 4464 mutex_exit(&pwp->config_lock); 4465 4466 if (result) { 4467 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__); 4468 if (pmcs_abort(pwp, expander, htag, 0, 0)) { 4469 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4470 "%s: Unable to issue SMP ABORT for htag 0x%08x", 4471 __func__, htag); 4472 } else { 4473 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4474 "%s: Issuing SMP ABORT for htag 0x%08x", 4475 __func__, htag); 4476 } 4477 result = -ETIMEDOUT; 4478 goto out; 4479 } 4480 ptr = (void *)pwp->scratch; 4481 /* 4482 * Point roff to the DMA offset for returned data 4483 */ 4484 roff = pwp->scratch; 4485 roff += rdoff; 4486 srf = (smp_response_frame_t *)roff; 4487 sdr = (smp_discover_resp_t *)(roff+4); 4488 status = LE_32(ptr[2]); 4489 if (status == PMCOUT_STATUS_UNDERFLOW || 4490 status == PMCOUT_STATUS_OVERFLOW) { 4491 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL, 4492 "%s: over/underflow", __func__); 4493 status = PMCOUT_STATUS_OK; 4494 } 4495 if (status != PMCOUT_STATUS_OK) { 4496 char *nag = NULL; 4497 (void) snprintf(buf, sizeof (buf), 4498 "%s: SMP op failed (0x%x)", __func__, status); 4499 switch (status) { 4500 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 4501 DFM(nag, "Hardware Timeout"); 4502 /* FALLTHROUGH */ 4503 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 4504 DFM(nag, "Internal SMP Resource Failure"); 4505 /* FALLTHROUGH */ 4506 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 4507 DFM(nag, "PHY Not Ready"); 4508 /* FALLTHROUGH */ 4509 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 4510 DFM(nag, "Connection Rate Not Supported"); 4511 /* FALLTHROUGH */ 4512 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 4513 DFM(nag, "Open Retry Timeout"); 4514 /* FALLTHROUGH */ 4515 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 4516 DFM(nag, "HW Resource Busy"); 4517 /* FALLTHROUGH */ 4518 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 4519 DFM(nag, "Response Connection Error"); 4520 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4521 "%s: expander %s SMP operation failed (%s)", 4522 __func__, pptr->path, nag); 4523 break; 4524 default: 4525 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr); 4526 result = -EIO; 4527 break; 4528 } 4529 goto out; 4530 } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) { 4531 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4532 "%s: bad response frame type 0x%x", 4533 __func__, srf->srf_frame_type); 4534 result = -EINVAL; 4535 goto out; 4536 } else if (srf->srf_function != SMP_FUNC_DISCOVER) { 4537 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4538 "%s: bad response function 0x%x", 4539 __func__, srf->srf_function); 4540 result = -EINVAL; 4541 goto out; 4542 } else if (srf->srf_result != SMP_RES_FUNCTION_ACCEPTED) { 4543 result = pmcs_smp_function_result(pwp, srf); 4544 /* Need not fail if PHY is Vacant */ 4545 if (result != SMP_RES_PHY_VACANT) { 4546 result = -EINVAL; 4547 goto out; 4548 } 4549 } 4550 4551 /* 4552 * Save off the DISCOVER response 4553 */ 4554 bcopy(sdr, &pptr->disc_resp, sizeof (smp_discover_resp_t)); 4555 4556 ini_support = (sdr->sdr_attached_sata_host | 4557 (sdr->sdr_attached_smp_initiator << 1) | 4558 (sdr->sdr_attached_stp_initiator << 2) | 4559 (sdr->sdr_attached_ssp_initiator << 3)); 4560 4561 tgt_support = (sdr->sdr_attached_sata_device | 4562 (sdr->sdr_attached_smp_target << 1) | 4563 (sdr->sdr_attached_stp_target << 2) | 4564 (sdr->sdr_attached_ssp_target << 3)); 4565 4566 pmcs_wwn2barray(BE_64(sdr->sdr_sas_addr), sas_address); 4567 pmcs_wwn2barray(BE_64(sdr->sdr_attached_sas_addr), att_sas_address); 4568 4569 /* 4570 * Set the routing attribute regardless of the PHY type. 4571 */ 4572 pptr->routing_attr = sdr->sdr_routing_attr; 4573 4574 switch (sdr->sdr_attached_device_type) { 4575 case SAS_IF_DTYPE_ENDPOINT: 4576 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4577 "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS=" 4578 SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x", 4579 pptr->path, 4580 sdr->sdr_attached_device_type, 4581 sdr->sdr_negotiated_logical_link_rate, 4582 ini_support, 4583 tgt_support, 4584 SAS_ADDR_PRT(sas_address), 4585 SAS_ADDR_PRT(att_sas_address), 4586 sdr->sdr_attached_phy_identifier); 4587 4588 if (sdr->sdr_attached_sata_device || 4589 sdr->sdr_attached_stp_target) { 4590 pptr->dtype = SATA; 4591 } else if (sdr->sdr_attached_ssp_target) { 4592 pptr->dtype = SAS; 4593 } else if (tgt_support || ini_support) { 4594 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4595 "%s: %s has tgt support=%x init support=(%x)", 4596 __func__, pptr->path, tgt_support, ini_support); 4597 } 4598 4599 switch (pptr->routing_attr) { 4600 case SMP_ROUTING_SUBTRACTIVE: 4601 case SMP_ROUTING_TABLE: 4602 case SMP_ROUTING_DIRECT: 4603 pptr->routing_method = SMP_ROUTING_DIRECT; 4604 break; 4605 default: 4606 pptr->routing_method = 0xff; /* Invalid method */ 4607 break; 4608 } 4609 pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum), 4610 (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE); 4611 break; 4612 case SAS_IF_DTYPE_EDGE: 4613 case SAS_IF_DTYPE_FANOUT: 4614 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4615 "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS=" 4616 SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x", 4617 pptr->path, 4618 sdr->sdr_attached_device_type, 4619 sdr->sdr_negotiated_logical_link_rate, 4620 ini_support, 4621 tgt_support, 4622 SAS_ADDR_PRT(sas_address), 4623 SAS_ADDR_PRT(att_sas_address), 4624 sdr->sdr_attached_phy_identifier); 4625 if (sdr->sdr_attached_smp_target) { 4626 /* 4627 * Avoid configuring phys that just point back 4628 * at a parent phy 4629 */ 4630 if (expander->parent && 4631 memcmp(expander->parent->sas_address, 4632 att_sas_address, 4633 sizeof (expander->parent->sas_address)) == 0) { 4634 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, NULL, 4635 "%s: skipping port back to parent " 4636 "expander (%s)", __func__, pptr->path); 4637 pptr->dtype = NOTHING; 4638 break; 4639 } 4640 pptr->dtype = EXPANDER; 4641 4642 } else if (tgt_support || ini_support) { 4643 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4644 "%s has tgt support=%x init support=(%x)", 4645 pptr->path, tgt_support, ini_support); 4646 pptr->dtype = EXPANDER; 4647 } 4648 if (pptr->routing_attr == SMP_ROUTING_DIRECT) { 4649 pptr->routing_method = 0xff; /* Invalid method */ 4650 } else { 4651 pptr->routing_method = pptr->routing_attr; 4652 } 4653 pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum), 4654 (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE); 4655 break; 4656 default: 4657 pptr->dtype = NOTHING; 4658 break; 4659 } 4660 if (pptr->dtype != NOTHING) { 4661 pmcs_phy_t *ctmp; 4662 4663 /* 4664 * If the attached device is a SATA device and the expander 4665 * is (possibly) a SAS2 compliant expander, check for whether 4666 * there is a NAA=5 WWN field starting at this offset and 4667 * use that for the SAS Address for this device. 4668 */ 4669 if (expander->tolerates_sas2 && pptr->dtype == SATA && 4670 (roff[SAS_ATTACHED_NAME_OFFSET] >> 8) == NAA_IEEE_REG) { 4671 (void) memcpy(pptr->sas_address, 4672 &roff[SAS_ATTACHED_NAME_OFFSET], 8); 4673 } else { 4674 (void) memcpy(pptr->sas_address, att_sas_address, 8); 4675 } 4676 pptr->atdt = (sdr->sdr_attached_device_type); 4677 /* 4678 * Now run up from the expander's parent up to the top to 4679 * make sure we only use the least common link_rate. 4680 */ 4681 for (ctmp = expander->parent; ctmp; ctmp = ctmp->parent) { 4682 if (ctmp->link_rate < 4683 sdr->sdr_negotiated_logical_link_rate) { 4684 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, 4685 "%s: derating link rate from %x to %x due " 4686 "to %s being slower", pptr->path, 4687 sdr->sdr_negotiated_logical_link_rate, 4688 ctmp->link_rate, 4689 ctmp->path); 4690 sdr->sdr_negotiated_logical_link_rate = 4691 ctmp->link_rate; 4692 } 4693 } 4694 pptr->link_rate = sdr->sdr_negotiated_logical_link_rate; 4695 pptr->state.prog_min_rate = sdr->sdr_prog_min_phys_link_rate; 4696 pptr->state.hw_min_rate = sdr->sdr_hw_min_phys_link_rate; 4697 pptr->state.prog_max_rate = sdr->sdr_prog_max_phys_link_rate; 4698 pptr->state.hw_max_rate = sdr->sdr_hw_max_phys_link_rate; 4699 PHY_CHANGED(pwp, pptr); 4700 } else { 4701 pmcs_clear_phy(pwp, pptr); 4702 } 4703 result = 1; 4704 out: 4705 return (result); 4706 } 4707 4708 /* 4709 * Get a work structure and assign it a tag with type and serial number 4710 * If a structure is returned, it is returned locked. 4711 */ 4712 pmcwork_t * 4713 pmcs_gwork(pmcs_hw_t *pwp, uint32_t tag_type, pmcs_phy_t *phyp) 4714 { 4715 pmcwork_t *p; 4716 uint16_t snum; 4717 uint32_t off; 4718 4719 mutex_enter(&pwp->wfree_lock); 4720 p = STAILQ_FIRST(&pwp->wf); 4721 if (p == NULL) { 4722 /* 4723 * If we couldn't get a work structure, it's time to bite 4724 * the bullet, grab the pfree_lock and copy over all the 4725 * work structures from the pending free list to the actual 4726 * free list (assuming it's not also empty). 4727 */ 4728 mutex_enter(&pwp->pfree_lock); 4729 if (STAILQ_FIRST(&pwp->pf) == NULL) { 4730 mutex_exit(&pwp->pfree_lock); 4731 mutex_exit(&pwp->wfree_lock); 4732 return (NULL); 4733 } 4734 pwp->wf.stqh_first = pwp->pf.stqh_first; 4735 pwp->wf.stqh_last = pwp->pf.stqh_last; 4736 STAILQ_INIT(&pwp->pf); 4737 mutex_exit(&pwp->pfree_lock); 4738 4739 p = STAILQ_FIRST(&pwp->wf); 4740 ASSERT(p != NULL); 4741 } 4742 STAILQ_REMOVE(&pwp->wf, p, pmcwork, next); 4743 snum = pwp->wserno++; 4744 mutex_exit(&pwp->wfree_lock); 4745 4746 off = p - pwp->work; 4747 4748 mutex_enter(&p->lock); 4749 ASSERT(p->state == PMCS_WORK_STATE_NIL); 4750 ASSERT(p->htag == PMCS_TAG_FREE); 4751 p->htag = (tag_type << PMCS_TAG_TYPE_SHIFT) & PMCS_TAG_TYPE_MASK; 4752 p->htag |= ((snum << PMCS_TAG_SERNO_SHIFT) & PMCS_TAG_SERNO_MASK); 4753 p->htag |= ((off << PMCS_TAG_INDEX_SHIFT) & PMCS_TAG_INDEX_MASK); 4754 p->start = gethrtime(); 4755 p->state = PMCS_WORK_STATE_READY; 4756 p->ssp_event = 0; 4757 p->dead = 0; 4758 4759 if (phyp) { 4760 p->phy = phyp; 4761 pmcs_inc_phy_ref_count(phyp); 4762 } 4763 4764 return (p); 4765 } 4766 4767 /* 4768 * Called with pwrk lock held. Returned with lock released. 4769 */ 4770 void 4771 pmcs_pwork(pmcs_hw_t *pwp, pmcwork_t *p) 4772 { 4773 ASSERT(p != NULL); 4774 ASSERT(mutex_owned(&p->lock)); 4775 4776 p->last_ptr = p->ptr; 4777 p->last_arg = p->arg; 4778 p->last_phy = p->phy; 4779 p->last_xp = p->xp; 4780 p->last_htag = p->htag; 4781 p->last_state = p->state; 4782 p->finish = gethrtime(); 4783 4784 if (p->phy) { 4785 pmcs_dec_phy_ref_count(p->phy); 4786 } 4787 4788 p->state = PMCS_WORK_STATE_NIL; 4789 p->htag = PMCS_TAG_FREE; 4790 p->xp = NULL; 4791 p->ptr = NULL; 4792 p->arg = NULL; 4793 p->phy = NULL; 4794 p->abt_htag = 0; 4795 p->timer = 0; 4796 mutex_exit(&p->lock); 4797 4798 if (mutex_tryenter(&pwp->wfree_lock) == 0) { 4799 mutex_enter(&pwp->pfree_lock); 4800 STAILQ_INSERT_TAIL(&pwp->pf, p, next); 4801 mutex_exit(&pwp->pfree_lock); 4802 } else { 4803 STAILQ_INSERT_TAIL(&pwp->wf, p, next); 4804 mutex_exit(&pwp->wfree_lock); 4805 } 4806 } 4807 4808 /* 4809 * Find a work structure based upon a tag and make sure that the tag 4810 * serial number matches the work structure we've found. 4811 * If a structure is found, its lock is held upon return. 4812 * If lock_phy is B_TRUE, then lock the phy also when returning the work struct 4813 */ 4814 pmcwork_t * 4815 pmcs_tag2wp(pmcs_hw_t *pwp, uint32_t htag, boolean_t lock_phy) 4816 { 4817 pmcwork_t *p; 4818 uint32_t idx = PMCS_TAG_INDEX(htag); 4819 4820 p = &pwp->work[idx]; 4821 4822 mutex_enter(&p->lock); 4823 if (p->htag == htag) { 4824 if (lock_phy) { 4825 mutex_exit(&p->lock); 4826 mutex_enter(&p->phy->phy_lock); 4827 mutex_enter(&p->lock); 4828 } 4829 return (p); 4830 } 4831 mutex_exit(&p->lock); 4832 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 4833 "INDEX 0x%x HTAG 0x%x got p->htag 0x%x", idx, htag, p->htag); 4834 return (NULL); 4835 } 4836 4837 /* 4838 * Issue an abort for a command or for all commands. 4839 * 4840 * Since this can be called from interrupt context, 4841 * we don't wait for completion if wait is not set. 4842 * 4843 * Called with PHY lock held. 4844 */ 4845 int 4846 pmcs_abort(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint32_t tag, int all_cmds, 4847 int wait) 4848 { 4849 pmcwork_t *pwrk; 4850 pmcs_xscsi_t *tgt; 4851 uint32_t msg[PMCS_MSG_SIZE], *ptr; 4852 int result, abt_type; 4853 uint32_t abt_htag, status; 4854 4855 if (pptr->abort_all_start) { 4856 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "%s: ABORT_ALL for " 4857 "(%s) already in progress.", __func__, pptr->path); 4858 return (EBUSY); 4859 } 4860 4861 switch (pptr->dtype) { 4862 case SAS: 4863 abt_type = PMCIN_SSP_ABORT; 4864 break; 4865 case SATA: 4866 abt_type = PMCIN_SATA_ABORT; 4867 break; 4868 case EXPANDER: 4869 abt_type = PMCIN_SMP_ABORT; 4870 break; 4871 default: 4872 return (0); 4873 } 4874 4875 pwrk = pmcs_gwork(pwp, wait ? PMCS_TAG_TYPE_WAIT : PMCS_TAG_TYPE_NONE, 4876 pptr); 4877 4878 if (pwrk == NULL) { 4879 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 4880 return (ENOMEM); 4881 } 4882 4883 pwrk->dtype = pptr->dtype; 4884 pwrk->htag |= PMCS_TAG_NONIO_CMD; 4885 if (wait) { 4886 pwrk->arg = msg; 4887 } 4888 if (pptr->valid_device_id == 0) { 4889 pmcs_pwork(pwp, pwrk); 4890 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4891 "%s: Invalid DeviceID", __func__); 4892 return (ENODEV); 4893 } 4894 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, abt_type)); 4895 msg[1] = LE_32(pwrk->htag); 4896 msg[2] = LE_32(pptr->device_id); 4897 if (all_cmds) { 4898 msg[3] = 0; 4899 msg[4] = LE_32(1); 4900 pwrk->ptr = NULL; 4901 pptr->abort_all_start = gethrtime(); 4902 } else { 4903 msg[3] = LE_32(tag); 4904 msg[4] = 0; 4905 pwrk->abt_htag = tag; 4906 } 4907 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4908 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4909 if (ptr == NULL) { 4910 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 4911 pmcs_pwork(pwp, pwrk); 4912 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 4913 return (ENOMEM); 4914 } 4915 4916 COPY_MESSAGE(ptr, msg, 5); 4917 if (all_cmds) { 4918 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4919 "%s: aborting all commands for %s device %s. (htag=0x%x)", 4920 __func__, pmcs_get_typename(pptr->dtype), pptr->path, 4921 msg[1]); 4922 } else { 4923 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 4924 "%s: aborting tag 0x%x for %s device %s. (htag=0x%x)", 4925 __func__, tag, pmcs_get_typename(pptr->dtype), pptr->path, 4926 msg[1]); 4927 } 4928 pwrk->state = PMCS_WORK_STATE_ONCHIP; 4929 4930 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 4931 if (!wait) { 4932 mutex_exit(&pwrk->lock); 4933 return (0); 4934 } 4935 4936 abt_htag = pwrk->htag; 4937 pmcs_unlock_phy(pwrk->phy); 4938 WAIT_FOR(pwrk, 1000, result); 4939 pmcs_lock_phy(pwrk->phy); 4940 4941 tgt = pwrk->xp; 4942 pmcs_pwork(pwp, pwrk); 4943 4944 if (tgt != NULL) { 4945 mutex_enter(&tgt->aqlock); 4946 if (!STAILQ_EMPTY(&tgt->aq)) { 4947 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4948 "%s: Abort complete (result=0x%x), but " 4949 "aq not empty (tgt 0x%p), waiting", 4950 __func__, result, (void *)tgt); 4951 cv_wait(&tgt->abort_cv, &tgt->aqlock); 4952 } 4953 mutex_exit(&tgt->aqlock); 4954 } 4955 4956 if (all_cmds) { 4957 pptr->abort_all_start = 0; 4958 cv_signal(&pptr->abort_all_cv); 4959 } 4960 4961 if (result) { 4962 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4963 "%s: Abort (htag 0x%08x) request timed out", 4964 __func__, abt_htag); 4965 if (tgt != NULL) { 4966 mutex_enter(&tgt->statlock); 4967 if ((tgt->dev_state != PMCS_DEVICE_STATE_IN_RECOVERY) && 4968 (tgt->dev_state != 4969 PMCS_DEVICE_STATE_NON_OPERATIONAL)) { 4970 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4971 "%s: Trying DS error recovery for tgt 0x%p", 4972 __func__, (void *)tgt); 4973 (void) pmcs_send_err_recovery_cmd(pwp, 4974 PMCS_DEVICE_STATE_IN_RECOVERY, pptr, tgt); 4975 } 4976 mutex_exit(&tgt->statlock); 4977 } 4978 return (ETIMEDOUT); 4979 } 4980 4981 status = LE_32(msg[2]); 4982 if (status != PMCOUT_STATUS_OK) { 4983 /* 4984 * The only non-success status are IO_NOT_VALID & 4985 * IO_ABORT_IN_PROGRESS. 4986 * In case of IO_ABORT_IN_PROGRESS, the other ABORT cmd's 4987 * status is of concern and this duplicate cmd status can 4988 * be ignored. 4989 * If IO_NOT_VALID, that's not an error per-se. 4990 * For abort of single I/O complete the command anyway. 4991 * If, however, we were aborting all, that is a problem 4992 * as IO_NOT_VALID really means that the IO or device is 4993 * not there. So, discovery process will take of the cleanup. 4994 */ 4995 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 4996 "%s: abort result 0x%x", __func__, LE_32(msg[2])); 4997 if (all_cmds) { 4998 PHY_CHANGED(pwp, pptr); 4999 RESTART_DISCOVERY(pwp); 5000 } else { 5001 return (EINVAL); 5002 } 5003 5004 return (0); 5005 } 5006 5007 if (tgt != NULL) { 5008 mutex_enter(&tgt->statlock); 5009 if (tgt->dev_state == PMCS_DEVICE_STATE_IN_RECOVERY) { 5010 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5011 "%s: Restoring OPERATIONAL dev_state for tgt 0x%p", 5012 __func__, (void *)tgt); 5013 (void) pmcs_send_err_recovery_cmd(pwp, 5014 PMCS_DEVICE_STATE_OPERATIONAL, pptr, tgt); 5015 } 5016 mutex_exit(&tgt->statlock); 5017 } 5018 5019 return (0); 5020 } 5021 5022 /* 5023 * Issue a task management function to an SSP device. 5024 * 5025 * Called with PHY lock held. 5026 * statlock CANNOT be held upon entry. 5027 */ 5028 int 5029 pmcs_ssp_tmf(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t tmf, uint32_t tag, 5030 uint64_t lun, uint32_t *response) 5031 { 5032 int result, ds; 5033 uint8_t local[PMCS_QENTRY_SIZE << 1], *xd; 5034 sas_ssp_rsp_iu_t *rptr = (void *)local; 5035 static const uint8_t ssp_rsp_evec[] = { 5036 0x58, 0x61, 0x56, 0x72, 0x00 5037 }; 5038 uint32_t msg[PMCS_MSG_SIZE], *ptr, status; 5039 struct pmcwork *pwrk; 5040 pmcs_xscsi_t *xp; 5041 5042 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 5043 if (pwrk == NULL) { 5044 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 5045 return (ENOMEM); 5046 } 5047 /* 5048 * NB: We use the PMCS_OQ_GENERAL outbound queue 5049 * NB: so as to not get entangled in normal I/O 5050 * NB: processing. 5051 */ 5052 pwrk->htag |= PMCS_TAG_NONIO_CMD; 5053 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 5054 PMCIN_SSP_INI_TM_START)); 5055 msg[1] = LE_32(pwrk->htag); 5056 msg[2] = LE_32(pptr->device_id); 5057 if (tmf == SAS_ABORT_TASK || tmf == SAS_QUERY_TASK) { 5058 msg[3] = LE_32(tag); 5059 } else { 5060 msg[3] = 0; 5061 } 5062 msg[4] = LE_32(tmf); 5063 msg[5] = BE_32((uint32_t)lun); 5064 msg[6] = BE_32((uint32_t)(lun >> 32)); 5065 msg[7] = LE_32(PMCIN_MESSAGE_REPORT); 5066 5067 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5068 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5069 if (ptr == NULL) { 5070 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5071 pmcs_pwork(pwp, pwrk); 5072 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 5073 return (ENOMEM); 5074 } 5075 COPY_MESSAGE(ptr, msg, 7); 5076 pwrk->arg = msg; 5077 pwrk->dtype = pptr->dtype; 5078 xp = pptr->target; 5079 pwrk->xp = xp; 5080 5081 if (xp != NULL) { 5082 mutex_enter(&xp->statlock); 5083 if (xp->dev_state == PMCS_DEVICE_STATE_NON_OPERATIONAL) { 5084 mutex_exit(&xp->statlock); 5085 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5086 pmcs_pwork(pwp, pwrk); 5087 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: Not " 5088 "sending '%s' because DS is '%s'", __func__, 5089 pmcs_tmf2str(tmf), pmcs_status_str 5090 (PMCOUT_STATUS_IO_DS_NON_OPERATIONAL)); 5091 return (EIO); 5092 } 5093 mutex_exit(&xp->statlock); 5094 } 5095 5096 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5097 "%s: sending '%s' to %s (lun %llu) tag 0x%x", __func__, 5098 pmcs_tmf2str(tmf), pptr->path, (unsigned long long) lun, tag); 5099 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5100 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5101 5102 pmcs_unlock_phy(pptr); 5103 /* 5104 * This is a command sent to the target device, so it can take 5105 * significant amount of time to complete when path & device is busy. 5106 * Set a timeout to 20 seconds 5107 */ 5108 WAIT_FOR(pwrk, 20000, result); 5109 pmcs_lock_phy(pptr); 5110 pmcs_pwork(pwp, pwrk); 5111 5112 if (result) { 5113 if (xp == NULL) { 5114 return (ETIMEDOUT); 5115 } 5116 5117 mutex_enter(&xp->statlock); 5118 pmcs_start_dev_state_recovery(xp, pptr); 5119 mutex_exit(&xp->statlock); 5120 return (ETIMEDOUT); 5121 } 5122 5123 status = LE_32(msg[2]); 5124 if (status != PMCOUT_STATUS_OK) { 5125 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5126 "%s: status %s for TMF %s action to %s, lun %llu", 5127 __func__, pmcs_status_str(status), pmcs_tmf2str(tmf), 5128 pptr->path, (unsigned long long) lun); 5129 if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) || 5130 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) || 5131 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) { 5132 ds = PMCS_DEVICE_STATE_NON_OPERATIONAL; 5133 } else if (status == PMCOUT_STATUS_IO_DS_IN_RECOVERY) { 5134 /* 5135 * If the status is IN_RECOVERY, it's an indication 5136 * that it's now time for us to request to have the 5137 * device state set to OPERATIONAL since we're the ones 5138 * that requested recovery to begin with. 5139 */ 5140 ds = PMCS_DEVICE_STATE_OPERATIONAL; 5141 } else { 5142 ds = PMCS_DEVICE_STATE_IN_RECOVERY; 5143 } 5144 if (xp != NULL) { 5145 mutex_enter(&xp->statlock); 5146 if (xp->dev_state != ds) { 5147 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5148 "%s: Sending err recovery cmd" 5149 " for tgt 0x%p (status = %s)", 5150 __func__, (void *)xp, 5151 pmcs_status_str(status)); 5152 (void) pmcs_send_err_recovery_cmd(pwp, ds, 5153 pptr, xp); 5154 } 5155 mutex_exit(&xp->statlock); 5156 } 5157 return (EIO); 5158 } else { 5159 ds = PMCS_DEVICE_STATE_OPERATIONAL; 5160 if (xp != NULL) { 5161 mutex_enter(&xp->statlock); 5162 if (xp->dev_state != ds) { 5163 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5164 "%s: Sending err recovery cmd" 5165 " for tgt 0x%p (status = %s)", 5166 __func__, (void *)xp, 5167 pmcs_status_str(status)); 5168 (void) pmcs_send_err_recovery_cmd(pwp, ds, 5169 pptr, xp); 5170 } 5171 mutex_exit(&xp->statlock); 5172 } 5173 } 5174 if (LE_32(msg[3]) == 0) { 5175 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5176 "TMF completed with no response"); 5177 return (EIO); 5178 } 5179 pmcs_endian_transform(pwp, local, &msg[5], ssp_rsp_evec); 5180 xd = (uint8_t *)(&msg[5]); 5181 xd += SAS_RSP_HDR_SIZE; 5182 if (rptr->datapres != SAS_RSP_DATAPRES_RESPONSE_DATA) { 5183 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5184 "%s: TMF response not RESPONSE DATA (0x%x)", 5185 __func__, rptr->datapres); 5186 return (EIO); 5187 } 5188 if (rptr->response_data_length != 4) { 5189 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 5190 "Bad SAS RESPONSE DATA LENGTH", msg); 5191 return (EIO); 5192 } 5193 (void) memcpy(&status, xd, sizeof (uint32_t)); 5194 status = BE_32(status); 5195 if (response != NULL) 5196 *response = status; 5197 /* 5198 * The status is actually in the low-order byte. The upper three 5199 * bytes contain additional information for the TMFs that support them. 5200 * However, at this time we do not issue any of those. In the other 5201 * cases, the upper three bytes are supposed to be 0, but it appears 5202 * they aren't always. Just mask them off. 5203 */ 5204 switch (status & 0xff) { 5205 case SAS_RSP_TMF_COMPLETE: 5206 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5207 "%s: TMF complete", __func__); 5208 result = 0; 5209 break; 5210 case SAS_RSP_TMF_SUCCEEDED: 5211 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5212 "%s: TMF succeeded", __func__); 5213 result = 0; 5214 break; 5215 case SAS_RSP_INVALID_FRAME: 5216 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5217 "%s: TMF returned INVALID FRAME", __func__); 5218 result = EIO; 5219 break; 5220 case SAS_RSP_TMF_NOT_SUPPORTED: 5221 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5222 "%s: TMF returned TMF NOT SUPPORTED", __func__); 5223 result = EIO; 5224 break; 5225 case SAS_RSP_TMF_FAILED: 5226 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5227 "%s: TMF returned TMF FAILED", __func__); 5228 result = EIO; 5229 break; 5230 case SAS_RSP_TMF_INCORRECT_LUN: 5231 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5232 "%s: TMF returned INCORRECT LUN", __func__); 5233 result = EIO; 5234 break; 5235 case SAS_RSP_OVERLAPPED_OIPTTA: 5236 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5237 "%s: TMF returned OVERLAPPED INITIATOR PORT TRANSFER TAG " 5238 "ATTEMPTED", __func__); 5239 result = EIO; 5240 break; 5241 default: 5242 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 5243 "%s: TMF returned unknown code 0x%x", __func__, status); 5244 result = EIO; 5245 break; 5246 } 5247 return (result); 5248 } 5249 5250 /* 5251 * Called with PHY lock held and scratch acquired 5252 */ 5253 int 5254 pmcs_sata_abort_ncq(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 5255 { 5256 const char *utag_fail_fmt = "%s: untagged NCQ command failure"; 5257 const char *tag_fail_fmt = "%s: NCQ command failure (tag 0x%x)"; 5258 uint32_t msg[PMCS_QENTRY_SIZE], *ptr, result, status; 5259 uint8_t *fp = pwp->scratch, ds; 5260 fis_t fis; 5261 pmcwork_t *pwrk; 5262 pmcs_xscsi_t *tgt; 5263 5264 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 5265 if (pwrk == NULL) { 5266 return (ENOMEM); 5267 } 5268 pwrk->htag |= PMCS_TAG_NONIO_CMD; 5269 msg[0] = LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, 5270 PMCIN_SATA_HOST_IO_START)); 5271 msg[1] = LE_32(pwrk->htag); 5272 msg[2] = LE_32(pptr->device_id); 5273 msg[3] = LE_32(512); 5274 msg[4] = LE_32(SATA_PROTOCOL_PIO | PMCIN_DATADIR_2_INI); 5275 msg[5] = LE_32((READ_LOG_EXT << 16) | (C_BIT << 8) | FIS_REG_H2DEV); 5276 msg[6] = LE_32(0x10); 5277 msg[8] = LE_32(1); 5278 msg[9] = 0; 5279 msg[10] = 0; 5280 msg[11] = 0; 5281 msg[12] = LE_32(DWORD0(pwp->scratch_dma)); 5282 msg[13] = LE_32(DWORD1(pwp->scratch_dma)); 5283 msg[14] = LE_32(512); 5284 msg[15] = 0; 5285 5286 pwrk->arg = msg; 5287 pwrk->dtype = pptr->dtype; 5288 5289 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5290 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5291 if (ptr == NULL) { 5292 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5293 pmcs_pwork(pwp, pwrk); 5294 return (ENOMEM); 5295 } 5296 COPY_MESSAGE(ptr, msg, PMCS_QENTRY_SIZE); 5297 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5298 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5299 5300 pmcs_unlock_phy(pptr); 5301 WAIT_FOR(pwrk, 250, result); 5302 pmcs_lock_phy(pptr); 5303 pmcs_pwork(pwp, pwrk); 5304 5305 tgt = pptr->target; 5306 if (result) { 5307 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, pmcs_timeo, __func__); 5308 return (EIO); 5309 } 5310 status = LE_32(msg[2]); 5311 if (status != PMCOUT_STATUS_OK || LE_32(msg[3])) { 5312 if (tgt == NULL) { 5313 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5314 "%s: cannot find target for phy 0x%p for " 5315 "dev state recovery", __func__, (void *)pptr); 5316 return (EIO); 5317 } 5318 5319 mutex_enter(&tgt->statlock); 5320 5321 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, "READ LOG EXT", msg); 5322 if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) || 5323 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) || 5324 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) { 5325 ds = PMCS_DEVICE_STATE_NON_OPERATIONAL; 5326 } else { 5327 ds = PMCS_DEVICE_STATE_IN_RECOVERY; 5328 } 5329 if (tgt->dev_state != ds) { 5330 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, "%s: Trying " 5331 "SATA DS Recovery for tgt(0x%p) for status(%s)", 5332 __func__, (void *)tgt, pmcs_status_str(status)); 5333 (void) pmcs_send_err_recovery_cmd(pwp, ds, pptr, tgt); 5334 } 5335 5336 mutex_exit(&tgt->statlock); 5337 return (EIO); 5338 } 5339 fis[0] = (fp[4] << 24) | (fp[3] << 16) | (fp[2] << 8) | FIS_REG_D2H; 5340 fis[1] = (fp[8] << 24) | (fp[7] << 16) | (fp[6] << 8) | fp[5]; 5341 fis[2] = (fp[12] << 24) | (fp[11] << 16) | (fp[10] << 8) | fp[9]; 5342 fis[3] = (fp[16] << 24) | (fp[15] << 16) | (fp[14] << 8) | fp[13]; 5343 fis[4] = 0; 5344 if (fp[0] & 0x80) { 5345 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5346 utag_fail_fmt, __func__); 5347 } else { 5348 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, 5349 tag_fail_fmt, __func__, fp[0] & 0x1f); 5350 } 5351 pmcs_fis_dump(pwp, fis); 5352 pptr->need_rl_ext = 0; 5353 return (0); 5354 } 5355 5356 /* 5357 * Transform a structure from CPU to Device endian format, or 5358 * vice versa, based upon a transformation vector. 5359 * 5360 * A transformation vector is an array of bytes, each byte 5361 * of which is defined thusly: 5362 * 5363 * bit 7: from CPU to desired endian, otherwise from desired endian 5364 * to CPU format 5365 * bit 6: Big Endian, else Little Endian 5366 * bits 5-4: 5367 * 00 Undefined 5368 * 01 One Byte quantities 5369 * 02 Two Byte quantities 5370 * 03 Four Byte quantities 5371 * 5372 * bits 3-0: 5373 * 00 Undefined 5374 * Number of quantities to transform 5375 * 5376 * The vector is terminated by a 0 value. 5377 */ 5378 5379 void 5380 pmcs_endian_transform(pmcs_hw_t *pwp, void *orig_out, void *orig_in, 5381 const uint8_t *xfvec) 5382 { 5383 uint8_t c, *out = orig_out, *in = orig_in; 5384 5385 if (xfvec == NULL) { 5386 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5387 "%s: null xfvec", __func__); 5388 return; 5389 } 5390 if (out == NULL) { 5391 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5392 "%s: null out", __func__); 5393 return; 5394 } 5395 if (in == NULL) { 5396 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5397 "%s: null in", __func__); 5398 return; 5399 } 5400 while ((c = *xfvec++) != 0) { 5401 int nbyt = (c & 0xf); 5402 int size = (c >> 4) & 0x3; 5403 int bige = (c >> 4) & 0x4; 5404 5405 switch (size) { 5406 case 1: 5407 { 5408 while (nbyt-- > 0) { 5409 *out++ = *in++; 5410 } 5411 break; 5412 } 5413 case 2: 5414 { 5415 uint16_t tmp; 5416 while (nbyt-- > 0) { 5417 (void) memcpy(&tmp, in, sizeof (uint16_t)); 5418 if (bige) { 5419 tmp = BE_16(tmp); 5420 } else { 5421 tmp = LE_16(tmp); 5422 } 5423 (void) memcpy(out, &tmp, sizeof (uint16_t)); 5424 out += sizeof (uint16_t); 5425 in += sizeof (uint16_t); 5426 } 5427 break; 5428 } 5429 case 3: 5430 { 5431 uint32_t tmp; 5432 while (nbyt-- > 0) { 5433 (void) memcpy(&tmp, in, sizeof (uint32_t)); 5434 if (bige) { 5435 tmp = BE_32(tmp); 5436 } else { 5437 tmp = LE_32(tmp); 5438 } 5439 (void) memcpy(out, &tmp, sizeof (uint32_t)); 5440 out += sizeof (uint32_t); 5441 in += sizeof (uint32_t); 5442 } 5443 break; 5444 } 5445 default: 5446 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 5447 "%s: bad size", __func__); 5448 return; 5449 } 5450 } 5451 } 5452 5453 const char * 5454 pmcs_get_rate(unsigned int linkrt) 5455 { 5456 const char *rate; 5457 switch (linkrt) { 5458 case SAS_LINK_RATE_1_5GBIT: 5459 rate = "1.5"; 5460 break; 5461 case SAS_LINK_RATE_3GBIT: 5462 rate = "3.0"; 5463 break; 5464 case SAS_LINK_RATE_6GBIT: 5465 rate = "6.0"; 5466 break; 5467 default: 5468 rate = "???"; 5469 break; 5470 } 5471 return (rate); 5472 } 5473 5474 const char * 5475 pmcs_get_typename(pmcs_dtype_t type) 5476 { 5477 switch (type) { 5478 case NOTHING: 5479 return ("NIL"); 5480 case SATA: 5481 return ("SATA"); 5482 case SAS: 5483 return ("SSP"); 5484 case EXPANDER: 5485 return ("EXPANDER"); 5486 } 5487 return ("????"); 5488 } 5489 5490 const char * 5491 pmcs_tmf2str(int tmf) 5492 { 5493 switch (tmf) { 5494 case SAS_ABORT_TASK: 5495 return ("Abort Task"); 5496 case SAS_ABORT_TASK_SET: 5497 return ("Abort Task Set"); 5498 case SAS_CLEAR_TASK_SET: 5499 return ("Clear Task Set"); 5500 case SAS_LOGICAL_UNIT_RESET: 5501 return ("Logical Unit Reset"); 5502 case SAS_I_T_NEXUS_RESET: 5503 return ("I_T Nexus Reset"); 5504 case SAS_CLEAR_ACA: 5505 return ("Clear ACA"); 5506 case SAS_QUERY_TASK: 5507 return ("Query Task"); 5508 case SAS_QUERY_TASK_SET: 5509 return ("Query Task Set"); 5510 case SAS_QUERY_UNIT_ATTENTION: 5511 return ("Query Unit Attention"); 5512 default: 5513 return ("Unknown"); 5514 } 5515 } 5516 5517 const char * 5518 pmcs_status_str(uint32_t status) 5519 { 5520 switch (status) { 5521 case PMCOUT_STATUS_OK: 5522 return ("OK"); 5523 case PMCOUT_STATUS_ABORTED: 5524 return ("ABORTED"); 5525 case PMCOUT_STATUS_OVERFLOW: 5526 return ("OVERFLOW"); 5527 case PMCOUT_STATUS_UNDERFLOW: 5528 return ("UNDERFLOW"); 5529 case PMCOUT_STATUS_FAILED: 5530 return ("FAILED"); 5531 case PMCOUT_STATUS_ABORT_RESET: 5532 return ("ABORT_RESET"); 5533 case PMCOUT_STATUS_IO_NOT_VALID: 5534 return ("IO_NOT_VALID"); 5535 case PMCOUT_STATUS_NO_DEVICE: 5536 return ("NO_DEVICE"); 5537 case PMCOUT_STATUS_ILLEGAL_PARAMETER: 5538 return ("ILLEGAL_PARAMETER"); 5539 case PMCOUT_STATUS_LINK_FAILURE: 5540 return ("LINK_FAILURE"); 5541 case PMCOUT_STATUS_PROG_ERROR: 5542 return ("PROG_ERROR"); 5543 case PMCOUT_STATUS_EDC_IN_ERROR: 5544 return ("EDC_IN_ERROR"); 5545 case PMCOUT_STATUS_EDC_OUT_ERROR: 5546 return ("EDC_OUT_ERROR"); 5547 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 5548 return ("ERROR_HW_TIMEOUT"); 5549 case PMCOUT_STATUS_XFER_ERR_BREAK: 5550 return ("XFER_ERR_BREAK"); 5551 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 5552 return ("XFER_ERR_PHY_NOT_READY"); 5553 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 5554 return ("OPEN_CNX_PROTOCOL_NOT_SUPPORTED"); 5555 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 5556 return ("OPEN_CNX_ERROR_ZONE_VIOLATION"); 5557 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 5558 return ("OPEN_CNX_ERROR_BREAK"); 5559 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 5560 return ("OPEN_CNX_ERROR_IT_NEXUS_LOSS"); 5561 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 5562 return ("OPENCNX_ERROR_BAD_DESTINATION"); 5563 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 5564 return ("OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED"); 5565 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 5566 return ("OPEN_CNX_ERROR_STP_RESOURCES_BUSY"); 5567 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 5568 return ("OPEN_CNX_ERROR_WRONG_DESTINATION"); 5569 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 5570 return ("OPEN_CNX_ERROR_UNKNOWN_ERROR"); 5571 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 5572 return ("IO_XFER_ERROR_NAK_RECEIVED"); 5573 case PMCOUT_STATUS_XFER_ERROR_ACK_NAK_TIMEOUT: 5574 return ("XFER_ERROR_ACK_NAK_TIMEOUT"); 5575 case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED: 5576 return ("XFER_ERROR_PEER_ABORTED"); 5577 case PMCOUT_STATUS_XFER_ERROR_RX_FRAME: 5578 return ("XFER_ERROR_RX_FRAME"); 5579 case PMCOUT_STATUS_IO_XFER_ERROR_DMA: 5580 return ("IO_XFER_ERROR_DMA"); 5581 case PMCOUT_STATUS_XFER_ERROR_CREDIT_TIMEOUT: 5582 return ("XFER_ERROR_CREDIT_TIMEOUT"); 5583 case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT: 5584 return ("XFER_ERROR_SATA_LINK_TIMEOUT"); 5585 case PMCOUT_STATUS_XFER_ERROR_SATA: 5586 return ("XFER_ERROR_SATA"); 5587 case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE: 5588 return ("XFER_ERROR_REJECTED_NCQ_MODE"); 5589 case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST: 5590 return ("XFER_ERROR_ABORTED_DUE_TO_SRST"); 5591 case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE: 5592 return ("XFER_ERROR_ABORTED_NCQ_MODE"); 5593 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 5594 return ("IO_XFER_OPEN_RETRY_TIMEOUT"); 5595 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR: 5596 return ("SMP_RESP_CONNECTION_ERROR"); 5597 case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE: 5598 return ("XFER_ERROR_UNEXPECTED_PHASE"); 5599 case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN: 5600 return ("XFER_ERROR_RDY_OVERRUN"); 5601 case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED: 5602 return ("XFER_ERROR_RDY_NOT_EXPECTED"); 5603 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 5604 return ("XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT"); 5605 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK: 5606 return ("XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK"); 5607 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK: 5608 return ("XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK"); 5609 case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH: 5610 return ("XFER_ERROR_OFFSET_MISMATCH"); 5611 case PMCOUT_STATUS_XFER_ERROR_ZERO_DATA_LEN: 5612 return ("XFER_ERROR_ZERO_DATA_LEN"); 5613 case PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED: 5614 return ("XFER_CMD_FRAME_ISSUED"); 5615 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 5616 return ("ERROR_INTERNAL_SMP_RESOURCE"); 5617 case PMCOUT_STATUS_IO_PORT_IN_RESET: 5618 return ("IO_PORT_IN_RESET"); 5619 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 5620 return ("DEVICE STATE NON-OPERATIONAL"); 5621 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 5622 return ("DEVICE STATE IN RECOVERY"); 5623 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 5624 return ("OPEN CNX ERR HW RESOURCE BUSY"); 5625 default: 5626 return (NULL); 5627 } 5628 } 5629 5630 uint64_t 5631 pmcs_barray2wwn(uint8_t ba[8]) 5632 { 5633 uint64_t result = 0; 5634 int i; 5635 5636 for (i = 0; i < 8; i++) { 5637 result <<= 8; 5638 result |= ba[i]; 5639 } 5640 return (result); 5641 } 5642 5643 void 5644 pmcs_wwn2barray(uint64_t wwn, uint8_t ba[8]) 5645 { 5646 int i; 5647 for (i = 0; i < 8; i++) { 5648 ba[7 - i] = wwn & 0xff; 5649 wwn >>= 8; 5650 } 5651 } 5652 5653 void 5654 pmcs_report_fwversion(pmcs_hw_t *pwp) 5655 { 5656 const char *fwsupport; 5657 switch (PMCS_FW_TYPE(pwp)) { 5658 case PMCS_FW_TYPE_RELEASED: 5659 fwsupport = "Released"; 5660 break; 5661 case PMCS_FW_TYPE_DEVELOPMENT: 5662 fwsupport = "Development"; 5663 break; 5664 case PMCS_FW_TYPE_ALPHA: 5665 fwsupport = "Alpha"; 5666 break; 5667 case PMCS_FW_TYPE_BETA: 5668 fwsupport = "Beta"; 5669 break; 5670 default: 5671 fwsupport = "Special"; 5672 break; 5673 } 5674 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5675 "Chip Revision: %c; F/W Revision %x.%x.%x %s (ILA rev %08x)", 5676 'A' + pwp->chiprev, PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), 5677 PMCS_FW_MICRO(pwp), fwsupport, pwp->ila_ver); 5678 } 5679 5680 void 5681 pmcs_phy_name(pmcs_hw_t *pwp, pmcs_phy_t *pptr, char *obuf, size_t olen) 5682 { 5683 if (pptr->parent) { 5684 pmcs_phy_name(pwp, pptr->parent, obuf, olen); 5685 (void) snprintf(obuf, olen, "%s.%02x", obuf, pptr->phynum); 5686 } else { 5687 (void) snprintf(obuf, olen, "pp%02x", pptr->phynum); 5688 } 5689 } 5690 5691 /* 5692 * This function is called as a sanity check to ensure that a newly registered 5693 * PHY doesn't have a device_id that exists with another registered PHY. 5694 */ 5695 static boolean_t 5696 pmcs_validate_devid(pmcs_phy_t *parent, pmcs_phy_t *phyp, uint32_t device_id) 5697 { 5698 pmcs_phy_t *pptr, *pchild; 5699 boolean_t rval; 5700 5701 pptr = parent; 5702 5703 while (pptr) { 5704 if (pptr->valid_device_id && (pptr != phyp) && 5705 (pptr->device_id == device_id)) { 5706 /* 5707 * This can still be OK if both of these PHYs actually 5708 * represent the same device (e.g. expander). It could 5709 * be a case of a new "primary" PHY. If the SAS address 5710 * is the same and they have the same parent, we'll 5711 * accept this if the PHY to be registered is the 5712 * primary. 5713 */ 5714 if ((phyp->parent == pptr->parent) && 5715 (memcmp(phyp->sas_address, 5716 pptr->sas_address, 8) == 0) && (phyp->width > 1)) { 5717 /* 5718 * Move children over to the new primary and 5719 * update both PHYs 5720 */ 5721 pmcs_lock_phy(pptr); 5722 phyp->children = pptr->children; 5723 pchild = phyp->children; 5724 while (pchild) { 5725 pchild->parent = phyp; 5726 pchild = pchild->sibling; 5727 } 5728 phyp->subsidiary = 0; 5729 phyp->ncphy = pptr->ncphy; 5730 /* 5731 * device_id, valid_device_id, and configured 5732 * will be set by the caller 5733 */ 5734 pptr->children = NULL; 5735 pptr->subsidiary = 1; 5736 pptr->ncphy = 0; 5737 pmcs_unlock_phy(pptr); 5738 pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL, 5739 "%s: Moving device_id %d from PHY %s to %s", 5740 __func__, device_id, pptr->path, 5741 phyp->path); 5742 return (B_TRUE); 5743 } 5744 pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL, 5745 "%s: phy %s already exists as %s with " 5746 "device id 0x%x", __func__, phyp->path, 5747 pptr->path, device_id); 5748 return (B_FALSE); 5749 } 5750 5751 if (pptr->children) { 5752 rval = pmcs_validate_devid(pptr->children, phyp, 5753 device_id); 5754 if (rval == B_FALSE) { 5755 return (rval); 5756 } 5757 } 5758 5759 pptr = pptr->sibling; 5760 } 5761 5762 /* This PHY and device_id are valid */ 5763 return (B_TRUE); 5764 } 5765 5766 /* 5767 * If the PHY is found, it is returned locked 5768 */ 5769 static pmcs_phy_t * 5770 pmcs_find_phy_by_wwn_impl(pmcs_phy_t *phyp, uint8_t *wwn) 5771 { 5772 pmcs_phy_t *matched_phy, *cphyp, *nphyp; 5773 5774 ASSERT(!mutex_owned(&phyp->phy_lock)); 5775 5776 while (phyp) { 5777 pmcs_lock_phy(phyp); 5778 5779 if (phyp->valid_device_id) { 5780 if (memcmp(phyp->sas_address, wwn, 8) == 0) { 5781 return (phyp); 5782 } 5783 } 5784 5785 if (phyp->children) { 5786 cphyp = phyp->children; 5787 pmcs_unlock_phy(phyp); 5788 matched_phy = pmcs_find_phy_by_wwn_impl(cphyp, wwn); 5789 if (matched_phy) { 5790 ASSERT(mutex_owned(&matched_phy->phy_lock)); 5791 return (matched_phy); 5792 } 5793 pmcs_lock_phy(phyp); 5794 } 5795 5796 /* 5797 * Only iterate through non-root PHYs 5798 */ 5799 if (IS_ROOT_PHY(phyp)) { 5800 pmcs_unlock_phy(phyp); 5801 phyp = NULL; 5802 } else { 5803 nphyp = phyp->sibling; 5804 pmcs_unlock_phy(phyp); 5805 phyp = nphyp; 5806 } 5807 } 5808 5809 return (NULL); 5810 } 5811 5812 pmcs_phy_t * 5813 pmcs_find_phy_by_wwn(pmcs_hw_t *pwp, uint64_t wwn) 5814 { 5815 uint8_t ebstr[8]; 5816 pmcs_phy_t *pptr, *matched_phy; 5817 5818 pmcs_wwn2barray(wwn, ebstr); 5819 5820 pptr = pwp->root_phys; 5821 while (pptr) { 5822 matched_phy = pmcs_find_phy_by_wwn_impl(pptr, ebstr); 5823 if (matched_phy) { 5824 ASSERT(mutex_owned(&matched_phy->phy_lock)); 5825 return (matched_phy); 5826 } 5827 5828 pptr = pptr->sibling; 5829 } 5830 5831 return (NULL); 5832 } 5833 5834 5835 /* 5836 * pmcs_find_phy_by_sas_address 5837 * 5838 * Find a PHY that both matches "sas_addr" and is on "iport". 5839 * If a matching PHY is found, it is returned locked. 5840 */ 5841 pmcs_phy_t * 5842 pmcs_find_phy_by_sas_address(pmcs_hw_t *pwp, pmcs_iport_t *iport, 5843 pmcs_phy_t *root, char *sas_addr) 5844 { 5845 int ua_form = 1; 5846 uint64_t wwn; 5847 char addr[PMCS_MAX_UA_SIZE]; 5848 pmcs_phy_t *pptr, *pnext, *pchild; 5849 5850 if (root == NULL) { 5851 pptr = pwp->root_phys; 5852 } else { 5853 pptr = root; 5854 } 5855 5856 while (pptr) { 5857 pmcs_lock_phy(pptr); 5858 /* 5859 * If the PHY is dead or does not have a valid device ID, 5860 * skip it. 5861 */ 5862 if ((pptr->dead) || (!pptr->valid_device_id)) { 5863 goto next_phy; 5864 } 5865 5866 if (pptr->iport != iport) { 5867 goto next_phy; 5868 } 5869 5870 wwn = pmcs_barray2wwn(pptr->sas_address); 5871 (void *) scsi_wwn_to_wwnstr(wwn, ua_form, addr); 5872 if (strncmp(addr, sas_addr, strlen(addr)) == 0) { 5873 return (pptr); 5874 } 5875 5876 if (pptr->children) { 5877 pchild = pptr->children; 5878 pmcs_unlock_phy(pptr); 5879 pnext = pmcs_find_phy_by_sas_address(pwp, iport, pchild, 5880 sas_addr); 5881 if (pnext) { 5882 return (pnext); 5883 } 5884 pmcs_lock_phy(pptr); 5885 } 5886 5887 next_phy: 5888 pnext = pptr->sibling; 5889 pmcs_unlock_phy(pptr); 5890 pptr = pnext; 5891 } 5892 5893 return (NULL); 5894 } 5895 5896 void 5897 pmcs_fis_dump(pmcs_hw_t *pwp, fis_t fis) 5898 { 5899 switch (fis[0] & 0xff) { 5900 case FIS_REG_H2DEV: 5901 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5902 "FIS REGISTER HOST TO DEVICE: " 5903 "OP=0x%02x Feature=0x%04x Count=0x%04x Device=0x%02x " 5904 "LBA=%llu", BYTE2(fis[0]), BYTE3(fis[2]) << 8 | 5905 BYTE3(fis[0]), WORD0(fis[3]), BYTE3(fis[1]), 5906 (unsigned long long) 5907 (((uint64_t)fis[2] & 0x00ffffff) << 24 | 5908 ((uint64_t)fis[1] & 0x00ffffff))); 5909 break; 5910 case FIS_REG_D2H: 5911 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5912 "FIS REGISTER DEVICE TO HOST: Status=0x%02x " 5913 "Error=0x%02x Dev=0x%02x Count=0x%04x LBA=%llu", 5914 BYTE2(fis[0]), BYTE3(fis[0]), BYTE3(fis[1]), WORD0(fis[3]), 5915 (unsigned long long)(((uint64_t)fis[2] & 0x00ffffff) << 24 | 5916 ((uint64_t)fis[1] & 0x00ffffff))); 5917 break; 5918 default: 5919 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, 5920 "FIS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 5921 fis[0], fis[1], fis[2], fis[3], fis[4]); 5922 break; 5923 } 5924 } 5925 5926 void 5927 pmcs_print_entry(pmcs_hw_t *pwp, int level, char *msg, void *arg) 5928 { 5929 uint32_t *mb = arg; 5930 size_t i; 5931 5932 pmcs_prt(pwp, level, NULL, NULL, msg); 5933 for (i = 0; i < (PMCS_QENTRY_SIZE / sizeof (uint32_t)); i += 4) { 5934 pmcs_prt(pwp, level, NULL, NULL, 5935 "Offset %2lu: 0x%08x 0x%08x 0x%08x 0x%08x", 5936 i * sizeof (uint32_t), LE_32(mb[i]), 5937 LE_32(mb[i+1]), LE_32(mb[i+2]), LE_32(mb[i+3])); 5938 } 5939 } 5940 5941 /* 5942 * If phyp == NULL we're being called from the worker thread, in which 5943 * case we need to check all the PHYs. In this case, the softstate lock 5944 * will be held. 5945 * If phyp is non-NULL, just issue the spinup release for the specified PHY 5946 * (which will already be locked). 5947 */ 5948 void 5949 pmcs_spinup_release(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 5950 { 5951 uint32_t *msg; 5952 struct pmcwork *pwrk; 5953 pmcs_phy_t *tphyp; 5954 5955 if (phyp != NULL) { 5956 ASSERT(mutex_owned(&phyp->phy_lock)); 5957 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL, 5958 "%s: Issuing spinup release only for PHY %s", __func__, 5959 phyp->path); 5960 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5961 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5962 if (msg == NULL || (pwrk = 5963 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 5964 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 5965 SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE); 5966 return; 5967 } 5968 5969 phyp->spinup_hold = 0; 5970 bzero(msg, PMCS_QENTRY_SIZE); 5971 pwrk->htag |= PMCS_TAG_NONIO_CMD; 5972 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 5973 PMCIN_LOCAL_PHY_CONTROL)); 5974 msg[1] = LE_32(pwrk->htag); 5975 msg[2] = LE_32((0x10 << 8) | phyp->phynum); 5976 5977 pwrk->dtype = phyp->dtype; 5978 pwrk->state = PMCS_WORK_STATE_ONCHIP; 5979 mutex_exit(&pwrk->lock); 5980 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 5981 return; 5982 } 5983 5984 ASSERT(mutex_owned(&pwp->lock)); 5985 5986 tphyp = pwp->root_phys; 5987 while (tphyp) { 5988 pmcs_lock_phy(tphyp); 5989 if (tphyp->spinup_hold == 0) { 5990 pmcs_unlock_phy(tphyp); 5991 tphyp = tphyp->sibling; 5992 continue; 5993 } 5994 5995 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL, 5996 "%s: Issuing spinup release for PHY %s", __func__, 5997 phyp->path); 5998 5999 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6000 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6001 if (msg == NULL || (pwrk = 6002 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 6003 pmcs_unlock_phy(tphyp); 6004 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6005 SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE); 6006 break; 6007 } 6008 6009 tphyp->spinup_hold = 0; 6010 bzero(msg, PMCS_QENTRY_SIZE); 6011 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 6012 PMCIN_LOCAL_PHY_CONTROL)); 6013 msg[1] = LE_32(pwrk->htag); 6014 msg[2] = LE_32((0x10 << 8) | tphyp->phynum); 6015 6016 pwrk->dtype = phyp->dtype; 6017 pwrk->state = PMCS_WORK_STATE_ONCHIP; 6018 mutex_exit(&pwrk->lock); 6019 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6020 pmcs_unlock_phy(tphyp); 6021 6022 tphyp = tphyp->sibling; 6023 } 6024 } 6025 6026 /* 6027 * Abort commands on dead PHYs and deregister them as well as removing 6028 * the associated targets. 6029 */ 6030 static int 6031 pmcs_kill_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 6032 { 6033 pmcs_phy_t *pnext, *pchild; 6034 boolean_t remove_device; 6035 int rval = 0; 6036 6037 while (phyp) { 6038 pmcs_lock_phy(phyp); 6039 pchild = phyp->children; 6040 pnext = phyp->sibling; 6041 pmcs_unlock_phy(phyp); 6042 6043 if (pchild) { 6044 rval = pmcs_kill_devices(pwp, pchild); 6045 if (rval) { 6046 return (rval); 6047 } 6048 } 6049 6050 /* 6051 * pmcs_remove_device requires the softstate lock. 6052 */ 6053 mutex_enter(&pwp->lock); 6054 pmcs_lock_phy(phyp); 6055 if (phyp->dead && phyp->valid_device_id) { 6056 remove_device = B_TRUE; 6057 } else { 6058 remove_device = B_FALSE; 6059 } 6060 6061 if (remove_device) { 6062 pmcs_remove_device(pwp, phyp); 6063 mutex_exit(&pwp->lock); 6064 6065 rval = pmcs_kill_device(pwp, phyp); 6066 6067 if (rval) { 6068 pmcs_unlock_phy(phyp); 6069 return (rval); 6070 } 6071 } else { 6072 mutex_exit(&pwp->lock); 6073 } 6074 6075 pmcs_unlock_phy(phyp); 6076 phyp = pnext; 6077 } 6078 6079 return (rval); 6080 } 6081 6082 /* 6083 * Called with PHY locked 6084 */ 6085 int 6086 pmcs_kill_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 6087 { 6088 int r, result; 6089 uint32_t msg[PMCS_MSG_SIZE], *ptr, status; 6090 struct pmcwork *pwrk; 6091 6092 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "kill %s device @ %s", 6093 pmcs_get_typename(pptr->dtype), pptr->path); 6094 6095 /* 6096 * There may be an outstanding ABORT_ALL running, which we wouldn't 6097 * know just by checking abort_pending. We can, however, check 6098 * abort_all_start. If it's non-zero, there is one, and we'll just 6099 * sit here and wait for it to complete. If we don't, we'll remove 6100 * the device while there are still commands pending. 6101 */ 6102 if (pptr->abort_all_start) { 6103 while (pptr->abort_all_start) { 6104 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 6105 "%s: Waiting for outstanding ABORT_ALL on PHY 0x%p", 6106 __func__, (void *)pptr); 6107 cv_wait(&pptr->abort_all_cv, &pptr->phy_lock); 6108 } 6109 } else if (pptr->abort_pending) { 6110 r = pmcs_abort(pwp, pptr, pptr->device_id, 1, 1); 6111 6112 if (r) { 6113 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 6114 "%s: ABORT_ALL returned non-zero status (%d) for " 6115 "PHY 0x%p", __func__, r, (void *)pptr); 6116 return (r); 6117 } 6118 pptr->abort_pending = 0; 6119 } 6120 6121 if (pptr->valid_device_id == 0) { 6122 return (0); 6123 } 6124 6125 if ((pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) { 6126 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__); 6127 return (ENOMEM); 6128 } 6129 pwrk->arg = msg; 6130 pwrk->dtype = pptr->dtype; 6131 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 6132 PMCIN_DEREGISTER_DEVICE_HANDLE)); 6133 msg[1] = LE_32(pwrk->htag); 6134 msg[2] = LE_32(pptr->device_id); 6135 6136 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6137 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6138 if (ptr == NULL) { 6139 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6140 mutex_exit(&pwrk->lock); 6141 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__); 6142 return (ENOMEM); 6143 } 6144 6145 COPY_MESSAGE(ptr, msg, 3); 6146 pwrk->state = PMCS_WORK_STATE_ONCHIP; 6147 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6148 6149 pmcs_unlock_phy(pptr); 6150 WAIT_FOR(pwrk, 250, result); 6151 pmcs_lock_phy(pptr); 6152 pmcs_pwork(pwp, pwrk); 6153 6154 if (result) { 6155 return (ETIMEDOUT); 6156 } 6157 status = LE_32(msg[2]); 6158 if (status != PMCOUT_STATUS_OK) { 6159 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 6160 "%s: status 0x%x when trying to deregister device %s", 6161 __func__, status, pptr->path); 6162 } 6163 6164 pptr->device_id = PMCS_INVALID_DEVICE_ID; 6165 PHY_CHANGED(pwp, pptr); 6166 RESTART_DISCOVERY(pwp); 6167 pptr->valid_device_id = 0; 6168 return (0); 6169 } 6170 6171 /* 6172 * Acknowledge the SAS h/w events that need acknowledgement. 6173 * This is only needed for first level PHYs. 6174 */ 6175 void 6176 pmcs_ack_events(pmcs_hw_t *pwp) 6177 { 6178 uint32_t msg[PMCS_MSG_SIZE], *ptr; 6179 struct pmcwork *pwrk; 6180 pmcs_phy_t *pptr; 6181 6182 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 6183 pmcs_lock_phy(pptr); 6184 if (pptr->hw_event_ack == 0) { 6185 pmcs_unlock_phy(pptr); 6186 continue; 6187 } 6188 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6189 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6190 6191 if ((ptr == NULL) || (pwrk = 6192 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) { 6193 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 6194 pmcs_unlock_phy(pptr); 6195 SCHEDULE_WORK(pwp, PMCS_WORK_SAS_HW_ACK); 6196 break; 6197 } 6198 6199 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, 6200 PMCIN_SAS_HW_EVENT_ACK)); 6201 msg[1] = LE_32(pwrk->htag); 6202 msg[2] = LE_32(pptr->hw_event_ack); 6203 6204 mutex_exit(&pwrk->lock); 6205 pwrk->dtype = pptr->dtype; 6206 pptr->hw_event_ack = 0; 6207 COPY_MESSAGE(ptr, msg, 3); 6208 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 6209 pmcs_unlock_phy(pptr); 6210 } 6211 } 6212 6213 /* 6214 * Load DMA 6215 */ 6216 int 6217 pmcs_dma_load(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint32_t *msg) 6218 { 6219 ddi_dma_cookie_t *sg; 6220 pmcs_dmachunk_t *tc; 6221 pmcs_dmasgl_t *sgl, *prior; 6222 int seg, tsc; 6223 uint64_t sgl_addr; 6224 6225 /* 6226 * If we have no data segments, we're done. 6227 */ 6228 if (CMD2PKT(sp)->pkt_numcookies == 0) { 6229 return (0); 6230 } 6231 6232 /* 6233 * Get the S/G list pointer. 6234 */ 6235 sg = CMD2PKT(sp)->pkt_cookies; 6236 6237 /* 6238 * If we only have one dma segment, we can directly address that 6239 * data within the Inbound message itself. 6240 */ 6241 if (CMD2PKT(sp)->pkt_numcookies == 1) { 6242 msg[12] = LE_32(DWORD0(sg->dmac_laddress)); 6243 msg[13] = LE_32(DWORD1(sg->dmac_laddress)); 6244 msg[14] = LE_32(sg->dmac_size); 6245 msg[15] = 0; 6246 return (0); 6247 } 6248 6249 /* 6250 * Otherwise, we'll need one or more external S/G list chunks. 6251 * Get the first one and its dma address into the Inbound message. 6252 */ 6253 mutex_enter(&pwp->dma_lock); 6254 tc = pwp->dma_freelist; 6255 if (tc == NULL) { 6256 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 6257 mutex_exit(&pwp->dma_lock); 6258 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6259 "%s: out of SG lists", __func__); 6260 return (-1); 6261 } 6262 pwp->dma_freelist = tc->nxt; 6263 mutex_exit(&pwp->dma_lock); 6264 6265 tc->nxt = NULL; 6266 sp->cmd_clist = tc; 6267 sgl = tc->chunks; 6268 (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ); 6269 sgl_addr = tc->addr; 6270 msg[12] = LE_32(DWORD0(sgl_addr)); 6271 msg[13] = LE_32(DWORD1(sgl_addr)); 6272 msg[14] = 0; 6273 msg[15] = LE_32(PMCS_DMASGL_EXTENSION); 6274 6275 prior = sgl; 6276 tsc = 0; 6277 6278 for (seg = 0; seg < CMD2PKT(sp)->pkt_numcookies; seg++) { 6279 /* 6280 * If the current segment count for this chunk is one less than 6281 * the number s/g lists per chunk and we have more than one seg 6282 * to go, we need another chunk. Get it, and make sure that the 6283 * tail end of the the previous chunk points the new chunk 6284 * (if remembering an offset can be called 'pointing to'). 6285 * 6286 * Note that we can store the offset into our command area that 6287 * represents the new chunk in the length field of the part 6288 * that points the PMC chip at the next chunk- the PMC chip 6289 * ignores this field when the EXTENSION bit is set. 6290 * 6291 * This is required for dma unloads later. 6292 */ 6293 if (tsc == (PMCS_SGL_NCHUNKS - 1) && 6294 seg < (CMD2PKT(sp)->pkt_numcookies - 1)) { 6295 mutex_enter(&pwp->dma_lock); 6296 tc = pwp->dma_freelist; 6297 if (tc == NULL) { 6298 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 6299 mutex_exit(&pwp->dma_lock); 6300 pmcs_dma_unload(pwp, sp); 6301 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6302 "%s: out of SG lists", __func__); 6303 return (-1); 6304 } 6305 pwp->dma_freelist = tc->nxt; 6306 tc->nxt = sp->cmd_clist; 6307 mutex_exit(&pwp->dma_lock); 6308 6309 sp->cmd_clist = tc; 6310 (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ); 6311 sgl = tc->chunks; 6312 sgl_addr = tc->addr; 6313 prior[PMCS_SGL_NCHUNKS-1].sglal = 6314 LE_32(DWORD0(sgl_addr)); 6315 prior[PMCS_SGL_NCHUNKS-1].sglah = 6316 LE_32(DWORD1(sgl_addr)); 6317 prior[PMCS_SGL_NCHUNKS-1].sglen = 0; 6318 prior[PMCS_SGL_NCHUNKS-1].flags = 6319 LE_32(PMCS_DMASGL_EXTENSION); 6320 prior = sgl; 6321 tsc = 0; 6322 } 6323 sgl[tsc].sglal = LE_32(DWORD0(sg->dmac_laddress)); 6324 sgl[tsc].sglah = LE_32(DWORD1(sg->dmac_laddress)); 6325 sgl[tsc].sglen = LE_32(sg->dmac_size); 6326 sgl[tsc++].flags = 0; 6327 sg++; 6328 } 6329 return (0); 6330 } 6331 6332 /* 6333 * Unload DMA 6334 */ 6335 void 6336 pmcs_dma_unload(pmcs_hw_t *pwp, pmcs_cmd_t *sp) 6337 { 6338 pmcs_dmachunk_t *cp; 6339 6340 mutex_enter(&pwp->dma_lock); 6341 while ((cp = sp->cmd_clist) != NULL) { 6342 sp->cmd_clist = cp->nxt; 6343 cp->nxt = pwp->dma_freelist; 6344 pwp->dma_freelist = cp; 6345 } 6346 mutex_exit(&pwp->dma_lock); 6347 } 6348 6349 /* 6350 * Take a chunk of consistent memory that has just been allocated and inserted 6351 * into the cip indices and prepare it for DMA chunk usage and add it to the 6352 * freelist. 6353 * 6354 * Called with dma_lock locked (except during attach when it's unnecessary) 6355 */ 6356 void 6357 pmcs_idma_chunks(pmcs_hw_t *pwp, pmcs_dmachunk_t *dcp, 6358 pmcs_chunk_t *pchunk, unsigned long lim) 6359 { 6360 unsigned long off, n; 6361 pmcs_dmachunk_t *np = dcp; 6362 pmcs_chunk_t *tmp_chunk; 6363 6364 if (pwp->dma_chunklist == NULL) { 6365 pwp->dma_chunklist = pchunk; 6366 } else { 6367 tmp_chunk = pwp->dma_chunklist; 6368 while (tmp_chunk->next) { 6369 tmp_chunk = tmp_chunk->next; 6370 } 6371 tmp_chunk->next = pchunk; 6372 } 6373 6374 /* 6375 * Install offsets into chunk lists. 6376 */ 6377 for (n = 0, off = 0; off < lim; off += PMCS_SGL_CHUNKSZ, n++) { 6378 np->chunks = (void *)&pchunk->addrp[off]; 6379 np->addr = pchunk->dma_addr + off; 6380 np->acc_handle = pchunk->acc_handle; 6381 np->dma_handle = pchunk->dma_handle; 6382 if ((off + PMCS_SGL_CHUNKSZ) < lim) { 6383 np = np->nxt; 6384 } 6385 } 6386 np->nxt = pwp->dma_freelist; 6387 pwp->dma_freelist = dcp; 6388 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 6389 "added %lu DMA chunks ", n); 6390 } 6391 6392 /* 6393 * Change the value of the interrupt coalescing timer. This is done currently 6394 * only for I/O completions. If we're using the "auto clear" feature, it can 6395 * be turned back on when interrupt coalescing is turned off and must be 6396 * turned off when the coalescing timer is on. 6397 * NOTE: PMCS_MSIX_GENERAL and PMCS_OQ_IODONE are the same value. As long 6398 * as that's true, we don't need to distinguish between them. 6399 */ 6400 6401 void 6402 pmcs_set_intr_coal_timer(pmcs_hw_t *pwp, pmcs_coal_timer_adj_t adj) 6403 { 6404 if (adj == DECREASE_TIMER) { 6405 /* If the timer is already off, nothing to do. */ 6406 if (pwp->io_intr_coal.timer_on == B_FALSE) { 6407 return; 6408 } 6409 6410 pwp->io_intr_coal.intr_coal_timer -= PMCS_COAL_TIMER_GRAN; 6411 6412 if (pwp->io_intr_coal.intr_coal_timer == 0) { 6413 /* Disable the timer */ 6414 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 0); 6415 6416 if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) { 6417 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, 6418 pwp->odb_auto_clear); 6419 } 6420 6421 pwp->io_intr_coal.timer_on = B_FALSE; 6422 pwp->io_intr_coal.max_io_completions = B_FALSE; 6423 pwp->io_intr_coal.num_intrs = 0; 6424 pwp->io_intr_coal.int_cleared = B_FALSE; 6425 pwp->io_intr_coal.num_io_completions = 0; 6426 6427 DTRACE_PROBE1(pmcs__intr__coalesce__timer__off, 6428 pmcs_io_intr_coal_t *, &pwp->io_intr_coal); 6429 } else { 6430 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER, 6431 pwp->io_intr_coal.intr_coal_timer); 6432 } 6433 } else { 6434 /* 6435 * If the timer isn't on yet, do the setup for it now. 6436 */ 6437 if (pwp->io_intr_coal.timer_on == B_FALSE) { 6438 /* If auto clear is being used, turn it off. */ 6439 if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) { 6440 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, 6441 (pwp->odb_auto_clear & 6442 ~(1 << PMCS_MSIX_IODONE))); 6443 } 6444 6445 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 6446 (1 << PMCS_MSIX_IODONE)); 6447 pwp->io_intr_coal.timer_on = B_TRUE; 6448 pwp->io_intr_coal.intr_coal_timer = 6449 PMCS_COAL_TIMER_GRAN; 6450 6451 DTRACE_PROBE1(pmcs__intr__coalesce__timer__on, 6452 pmcs_io_intr_coal_t *, &pwp->io_intr_coal); 6453 } else { 6454 pwp->io_intr_coal.intr_coal_timer += 6455 PMCS_COAL_TIMER_GRAN; 6456 } 6457 6458 if (pwp->io_intr_coal.intr_coal_timer > PMCS_MAX_COAL_TIMER) { 6459 pwp->io_intr_coal.intr_coal_timer = PMCS_MAX_COAL_TIMER; 6460 } 6461 6462 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER, 6463 pwp->io_intr_coal.intr_coal_timer); 6464 } 6465 6466 /* 6467 * Adjust the interrupt threshold based on the current timer value 6468 */ 6469 pwp->io_intr_coal.intr_threshold = 6470 PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 / 6471 (pwp->io_intr_coal.intr_latency + 6472 (pwp->io_intr_coal.intr_coal_timer * 1000))); 6473 } 6474 6475 /* 6476 * Register Access functions 6477 */ 6478 uint32_t 6479 pmcs_rd_iqci(pmcs_hw_t *pwp, uint32_t qnum) 6480 { 6481 uint32_t iqci; 6482 6483 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 6484 DDI_SUCCESS) { 6485 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6486 "%s: ddi_dma_sync failed?", __func__); 6487 } 6488 6489 iqci = LE_32( 6490 ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2]); 6491 6492 return (iqci); 6493 } 6494 6495 uint32_t 6496 pmcs_rd_oqpi(pmcs_hw_t *pwp, uint32_t qnum) 6497 { 6498 uint32_t oqpi; 6499 6500 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 6501 DDI_SUCCESS) { 6502 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6503 "%s: ddi_dma_sync failed?", __func__); 6504 } 6505 6506 oqpi = LE_32( 6507 ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2]); 6508 6509 return (oqpi); 6510 } 6511 6512 uint32_t 6513 pmcs_rd_gsm_reg(pmcs_hw_t *pwp, uint8_t hi, uint32_t off) 6514 { 6515 uint32_t rv, newaxil, oldaxil, oldaxih; 6516 6517 newaxil = off & ~GSM_BASE_MASK; 6518 off &= GSM_BASE_MASK; 6519 mutex_enter(&pwp->axil_lock); 6520 oldaxil = ddi_get32(pwp->top_acc_handle, 6521 &pwp->top_regs[PMCS_AXI_TRANS >> 2]); 6522 ddi_put32(pwp->top_acc_handle, 6523 &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil); 6524 drv_usecwait(10); 6525 if (ddi_get32(pwp->top_acc_handle, 6526 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) { 6527 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6528 "AXIL register update failed"); 6529 } 6530 if (hi) { 6531 oldaxih = ddi_get32(pwp->top_acc_handle, 6532 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]); 6533 ddi_put32(pwp->top_acc_handle, 6534 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2], hi); 6535 drv_usecwait(10); 6536 if (ddi_get32(pwp->top_acc_handle, 6537 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]) != hi) { 6538 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6539 "AXIH register update failed"); 6540 } 6541 } 6542 rv = ddi_get32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2]); 6543 if (hi) { 6544 ddi_put32(pwp->top_acc_handle, 6545 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2], oldaxih); 6546 drv_usecwait(10); 6547 if (ddi_get32(pwp->top_acc_handle, 6548 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]) != oldaxih) { 6549 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6550 "AXIH register restore failed"); 6551 } 6552 } 6553 ddi_put32(pwp->top_acc_handle, 6554 &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil); 6555 drv_usecwait(10); 6556 if (ddi_get32(pwp->top_acc_handle, 6557 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) { 6558 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6559 "AXIL register restore failed"); 6560 } 6561 mutex_exit(&pwp->axil_lock); 6562 return (rv); 6563 } 6564 6565 void 6566 pmcs_wr_gsm_reg(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6567 { 6568 uint32_t newaxil, oldaxil; 6569 6570 newaxil = off & ~GSM_BASE_MASK; 6571 off &= GSM_BASE_MASK; 6572 mutex_enter(&pwp->axil_lock); 6573 oldaxil = ddi_get32(pwp->top_acc_handle, 6574 &pwp->top_regs[PMCS_AXI_TRANS >> 2]); 6575 ddi_put32(pwp->top_acc_handle, 6576 &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil); 6577 drv_usecwait(10); 6578 if (ddi_get32(pwp->top_acc_handle, 6579 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) { 6580 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6581 "AXIL register update failed"); 6582 } 6583 ddi_put32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2], val); 6584 ddi_put32(pwp->top_acc_handle, 6585 &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil); 6586 drv_usecwait(10); 6587 if (ddi_get32(pwp->top_acc_handle, 6588 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) { 6589 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6590 "AXIL register restore failed"); 6591 } 6592 mutex_exit(&pwp->axil_lock); 6593 } 6594 6595 uint32_t 6596 pmcs_rd_topunit(pmcs_hw_t *pwp, uint32_t off) 6597 { 6598 switch (off) { 6599 case PMCS_SPC_RESET: 6600 case PMCS_SPC_BOOT_STRAP: 6601 case PMCS_SPC_DEVICE_ID: 6602 case PMCS_DEVICE_REVISION: 6603 off = pmcs_rd_gsm_reg(pwp, 0, off); 6604 break; 6605 default: 6606 off = ddi_get32(pwp->top_acc_handle, 6607 &pwp->top_regs[off >> 2]); 6608 break; 6609 } 6610 return (off); 6611 } 6612 6613 void 6614 pmcs_wr_topunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6615 { 6616 switch (off) { 6617 case PMCS_SPC_RESET: 6618 case PMCS_DEVICE_REVISION: 6619 pmcs_wr_gsm_reg(pwp, off, val); 6620 break; 6621 default: 6622 ddi_put32(pwp->top_acc_handle, &pwp->top_regs[off >> 2], val); 6623 break; 6624 } 6625 } 6626 6627 uint32_t 6628 pmcs_rd_msgunit(pmcs_hw_t *pwp, uint32_t off) 6629 { 6630 return (ddi_get32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2])); 6631 } 6632 6633 uint32_t 6634 pmcs_rd_mpi_tbl(pmcs_hw_t *pwp, uint32_t off) 6635 { 6636 return (ddi_get32(pwp->mpi_acc_handle, 6637 &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2])); 6638 } 6639 6640 uint32_t 6641 pmcs_rd_gst_tbl(pmcs_hw_t *pwp, uint32_t off) 6642 { 6643 return (ddi_get32(pwp->mpi_acc_handle, 6644 &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2])); 6645 } 6646 6647 uint32_t 6648 pmcs_rd_iqc_tbl(pmcs_hw_t *pwp, uint32_t off) 6649 { 6650 return (ddi_get32(pwp->mpi_acc_handle, 6651 &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2])); 6652 } 6653 6654 uint32_t 6655 pmcs_rd_oqc_tbl(pmcs_hw_t *pwp, uint32_t off) 6656 { 6657 return (ddi_get32(pwp->mpi_acc_handle, 6658 &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2])); 6659 } 6660 6661 uint32_t 6662 pmcs_rd_iqpi(pmcs_hw_t *pwp, uint32_t qnum) 6663 { 6664 return (ddi_get32(pwp->mpi_acc_handle, 6665 &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2])); 6666 } 6667 6668 uint32_t 6669 pmcs_rd_oqci(pmcs_hw_t *pwp, uint32_t qnum) 6670 { 6671 return (ddi_get32(pwp->mpi_acc_handle, 6672 &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2])); 6673 } 6674 6675 void 6676 pmcs_wr_msgunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6677 { 6678 ddi_put32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2], val); 6679 } 6680 6681 void 6682 pmcs_wr_mpi_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6683 { 6684 ddi_put32(pwp->mpi_acc_handle, 6685 &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2], (val)); 6686 } 6687 6688 void 6689 pmcs_wr_gst_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6690 { 6691 ddi_put32(pwp->mpi_acc_handle, 6692 &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2], val); 6693 } 6694 6695 void 6696 pmcs_wr_iqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6697 { 6698 ddi_put32(pwp->mpi_acc_handle, 6699 &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2], val); 6700 } 6701 6702 void 6703 pmcs_wr_oqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val) 6704 { 6705 ddi_put32(pwp->mpi_acc_handle, 6706 &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2], val); 6707 } 6708 6709 void 6710 pmcs_wr_iqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6711 { 6712 ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2] = val; 6713 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) != 6714 DDI_SUCCESS) { 6715 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6716 "%s: ddi_dma_sync failed?", __func__); 6717 } 6718 } 6719 6720 void 6721 pmcs_wr_iqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6722 { 6723 ddi_put32(pwp->mpi_acc_handle, 6724 &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2], val); 6725 } 6726 6727 void 6728 pmcs_wr_oqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6729 { 6730 ddi_put32(pwp->mpi_acc_handle, 6731 &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2], val); 6732 } 6733 6734 void 6735 pmcs_wr_oqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val) 6736 { 6737 ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2] = val; 6738 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) != 6739 DDI_SUCCESS) { 6740 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6741 "%s: ddi_dma_sync failed?", __func__); 6742 } 6743 } 6744 6745 /* 6746 * Check the status value of an outbound IOMB and report anything bad 6747 */ 6748 6749 void 6750 pmcs_check_iomb_status(pmcs_hw_t *pwp, uint32_t *iomb) 6751 { 6752 uint16_t opcode; 6753 int offset; 6754 6755 if (iomb == NULL) { 6756 return; 6757 } 6758 6759 opcode = LE_32(iomb[0]) & 0xfff; 6760 6761 switch (opcode) { 6762 /* 6763 * The following have no status field, so ignore them 6764 */ 6765 case PMCOUT_ECHO: 6766 case PMCOUT_SAS_HW_EVENT: 6767 case PMCOUT_GET_DEVICE_HANDLE: 6768 case PMCOUT_SATA_EVENT: 6769 case PMCOUT_SSP_EVENT: 6770 case PMCOUT_DEVICE_HANDLE_ARRIVED: 6771 case PMCOUT_SMP_REQUEST_RECEIVED: 6772 case PMCOUT_GPIO: 6773 case PMCOUT_GPIO_EVENT: 6774 case PMCOUT_GET_TIME_STAMP: 6775 case PMCOUT_SKIP_ENTRIES: 6776 case PMCOUT_GET_NVMD_DATA: /* Actually lower 16 bits of word 3 */ 6777 case PMCOUT_SET_NVMD_DATA: /* but ignore - we don't use these */ 6778 case PMCOUT_DEVICE_HANDLE_REMOVED: 6779 case PMCOUT_SSP_REQUEST_RECEIVED: 6780 return; 6781 6782 case PMCOUT_GENERAL_EVENT: 6783 offset = 1; 6784 break; 6785 6786 case PMCOUT_SSP_COMPLETION: 6787 case PMCOUT_SMP_COMPLETION: 6788 case PMCOUT_DEVICE_REGISTRATION: 6789 case PMCOUT_DEREGISTER_DEVICE_HANDLE: 6790 case PMCOUT_SATA_COMPLETION: 6791 case PMCOUT_DEVICE_INFO: 6792 case PMCOUT_FW_FLASH_UPDATE: 6793 case PMCOUT_SSP_ABORT: 6794 case PMCOUT_SATA_ABORT: 6795 case PMCOUT_SAS_DIAG_MODE_START_END: 6796 case PMCOUT_SAS_HW_EVENT_ACK_ACK: 6797 case PMCOUT_SMP_ABORT: 6798 case PMCOUT_SET_DEVICE_STATE: 6799 case PMCOUT_GET_DEVICE_STATE: 6800 case PMCOUT_SET_DEVICE_INFO: 6801 offset = 2; 6802 break; 6803 6804 case PMCOUT_LOCAL_PHY_CONTROL: 6805 case PMCOUT_SAS_DIAG_EXECUTE: 6806 case PMCOUT_PORT_CONTROL: 6807 offset = 3; 6808 break; 6809 6810 case PMCOUT_GET_INFO: 6811 case PMCOUT_GET_VPD: 6812 case PMCOUT_SAS_ASSISTED_DISCOVERY_EVENT: 6813 case PMCOUT_SATA_ASSISTED_DISCOVERY_EVENT: 6814 case PMCOUT_SET_VPD: 6815 case PMCOUT_TWI: 6816 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6817 "Got response for deprecated opcode", iomb); 6818 return; 6819 6820 default: 6821 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6822 "Got response for unknown opcode", iomb); 6823 return; 6824 } 6825 6826 if (LE_32(iomb[offset]) != PMCOUT_STATUS_OK) { 6827 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 6828 "bad status on TAG_TYPE_NONE command", iomb); 6829 } 6830 } 6831 6832 /* 6833 * Called with statlock held 6834 */ 6835 void 6836 pmcs_clear_xp(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 6837 { 6838 _NOTE(ARGUNUSED(pwp)); 6839 6840 ASSERT(mutex_owned(&xp->statlock)); 6841 6842 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: Device 0x%p is gone.", 6843 __func__, (void *)xp); 6844 6845 /* 6846 * Clear the dip now. This keeps pmcs_remove_device from attempting 6847 * to call us on the same device while we're still flushing queues. 6848 * The only side effect is we can no longer update SM-HBA properties, 6849 * but this device is going away anyway, so no matter. 6850 */ 6851 xp->dip = NULL; 6852 xp->smpd = NULL; 6853 xp->special_running = 0; 6854 xp->recovering = 0; 6855 xp->recover_wait = 0; 6856 xp->draining = 0; 6857 xp->new = 0; 6858 xp->assigned = 0; 6859 xp->dev_state = 0; 6860 xp->tagmap = 0; 6861 xp->dev_gone = 1; 6862 xp->event_recovery = 0; 6863 xp->dtype = NOTHING; 6864 xp->wq_recovery_tail = NULL; 6865 /* Don't clear xp->phy */ 6866 /* Don't clear xp->actv_cnt */ 6867 /* Don't clear xp->actv_pkts */ 6868 6869 /* 6870 * Flush all target queues 6871 */ 6872 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES); 6873 } 6874 6875 static int 6876 pmcs_smp_function_result(pmcs_hw_t *pwp, smp_response_frame_t *srf) 6877 { 6878 int result = srf->srf_result; 6879 6880 switch (result) { 6881 case SMP_RES_UNKNOWN_FUNCTION: 6882 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6883 "%s: SMP DISCOVER Response " 6884 "Function Result: Unknown SMP Function(0x%x)", 6885 __func__, result); 6886 break; 6887 case SMP_RES_FUNCTION_FAILED: 6888 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6889 "%s: SMP DISCOVER Response " 6890 "Function Result: SMP Function Failed(0x%x)", 6891 __func__, result); 6892 break; 6893 case SMP_RES_INVALID_REQUEST_FRAME_LENGTH: 6894 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6895 "%s: SMP DISCOVER Response " 6896 "Function Result: Invalid Request Frame Length(0x%x)", 6897 __func__, result); 6898 break; 6899 case SMP_RES_INCOMPLETE_DESCRIPTOR_LIST: 6900 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6901 "%s: SMP DISCOVER Response " 6902 "Function Result: Incomplete Descriptor List(0x%x)", 6903 __func__, result); 6904 break; 6905 case SMP_RES_PHY_DOES_NOT_EXIST: 6906 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6907 "%s: SMP DISCOVER Response " 6908 "Function Result: PHY does not exist(0x%x)", 6909 __func__, result); 6910 break; 6911 case SMP_RES_PHY_VACANT: 6912 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6913 "%s: SMP DISCOVER Response " 6914 "Function Result: PHY Vacant(0x%x)", 6915 __func__, result); 6916 break; 6917 default: 6918 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6919 "%s: SMP DISCOVER Response " 6920 "Function Result: (0x%x)", 6921 __func__, result); 6922 break; 6923 } 6924 6925 return (result); 6926 } 6927 6928 /* 6929 * Do all the repetitive stuff necessary to setup for DMA 6930 * 6931 * pwp: Used for dip 6932 * dma_attr: ddi_dma_attr_t to use for the mapping 6933 * acch: ddi_acc_handle_t to use for the mapping 6934 * dmah: ddi_dma_handle_t to use 6935 * length: Amount of memory for mapping 6936 * kvap: Pointer filled in with kernel virtual address on successful return 6937 * dma_addr: Pointer filled in with DMA address on successful return 6938 */ 6939 boolean_t 6940 pmcs_dma_setup(pmcs_hw_t *pwp, ddi_dma_attr_t *dma_attr, ddi_acc_handle_t *acch, 6941 ddi_dma_handle_t *dmah, size_t length, caddr_t *kvap, uint64_t *dma_addr) 6942 { 6943 dev_info_t *dip = pwp->dip; 6944 ddi_dma_cookie_t cookie; 6945 size_t real_length; 6946 uint_t ddma_flag = DDI_DMA_CONSISTENT; 6947 uint_t ddabh_flag = DDI_DMA_CONSISTENT | DDI_DMA_RDWR; 6948 uint_t cookie_cnt; 6949 ddi_device_acc_attr_t mattr = { 6950 DDI_DEVICE_ATTR_V0, 6951 DDI_NEVERSWAP_ACC, 6952 DDI_STRICTORDER_ACC, 6953 DDI_DEFAULT_ACC 6954 }; 6955 6956 *acch = NULL; 6957 *dmah = NULL; 6958 6959 if (ddi_dma_alloc_handle(dip, dma_attr, DDI_DMA_SLEEP, NULL, dmah) != 6960 DDI_SUCCESS) { 6961 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6962 "Failed to allocate DMA handle"); 6963 return (B_FALSE); 6964 } 6965 6966 if (ddi_dma_mem_alloc(*dmah, length, &mattr, ddma_flag, DDI_DMA_SLEEP, 6967 NULL, kvap, &real_length, acch) != DDI_SUCCESS) { 6968 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 6969 "Failed to allocate DMA mem"); 6970 ddi_dma_free_handle(dmah); 6971 *dmah = NULL; 6972 return (B_FALSE); 6973 } 6974 6975 if (ddi_dma_addr_bind_handle(*dmah, NULL, *kvap, real_length, 6976 ddabh_flag, DDI_DMA_SLEEP, NULL, &cookie, &cookie_cnt) 6977 != DDI_DMA_MAPPED) { 6978 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Failed to bind DMA"); 6979 ddi_dma_free_handle(dmah); 6980 ddi_dma_mem_free(acch); 6981 *dmah = NULL; 6982 *acch = NULL; 6983 return (B_FALSE); 6984 } 6985 6986 if (cookie_cnt != 1) { 6987 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Multiple cookies"); 6988 if (ddi_dma_unbind_handle(*dmah) != DDI_SUCCESS) { 6989 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Condition " 6990 "failed at %s():%d", __func__, __LINE__); 6991 } 6992 ddi_dma_free_handle(dmah); 6993 ddi_dma_mem_free(acch); 6994 *dmah = NULL; 6995 *acch = NULL; 6996 return (B_FALSE); 6997 } 6998 6999 *dma_addr = cookie.dmac_laddress; 7000 7001 return (B_TRUE); 7002 } 7003 7004 /* 7005 * Flush requested queues for a particular target. Called with statlock held 7006 */ 7007 void 7008 pmcs_flush_target_queues(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt, uint8_t queues) 7009 { 7010 pmcs_cmd_t *sp, *sp_next; 7011 pmcwork_t *pwrk; 7012 7013 ASSERT(pwp != NULL); 7014 ASSERT(tgt != NULL); 7015 7016 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, tgt, 7017 "%s: Flushing queues (%d) for target 0x%p", __func__, 7018 queues, (void *)tgt); 7019 7020 /* 7021 * Commands on the wait queue (or the special queue below) don't have 7022 * work structures associated with them. 7023 */ 7024 if (queues & PMCS_TGT_WAIT_QUEUE) { 7025 mutex_enter(&tgt->wqlock); 7026 while ((sp = STAILQ_FIRST(&tgt->wq)) != NULL) { 7027 STAILQ_REMOVE(&tgt->wq, sp, pmcs_cmd, cmd_next); 7028 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, tgt, 7029 "%s: Removing cmd 0x%p from wq for target 0x%p", 7030 __func__, (void *)sp, (void *)tgt); 7031 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 7032 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 7033 mutex_exit(&tgt->wqlock); 7034 pmcs_dma_unload(pwp, sp); 7035 mutex_enter(&pwp->cq_lock); 7036 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 7037 PMCS_CQ_RUN_LOCKED(pwp); 7038 mutex_exit(&pwp->cq_lock); 7039 mutex_enter(&tgt->wqlock); 7040 } 7041 mutex_exit(&tgt->wqlock); 7042 } 7043 7044 /* 7045 * Commands on the active queue will have work structures associated 7046 * with them. 7047 */ 7048 if (queues & PMCS_TGT_ACTIVE_QUEUE) { 7049 mutex_exit(&tgt->statlock); 7050 mutex_enter(&tgt->aqlock); 7051 sp = STAILQ_FIRST(&tgt->aq); 7052 while (sp) { 7053 sp_next = STAILQ_NEXT(sp, cmd_next); 7054 pwrk = pmcs_tag2wp(pwp, sp->cmd_tag, B_FALSE); 7055 7056 /* 7057 * If we don't find a work structure, it's because 7058 * the command is already complete. If so, move on 7059 * to the next one. 7060 */ 7061 if (pwrk == NULL) { 7062 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7063 "%s: Not removing cmd 0x%p (htag 0x%x) " 7064 "from aq", __func__, (void *)sp, 7065 sp->cmd_tag); 7066 sp = sp_next; 7067 continue; 7068 } 7069 7070 STAILQ_REMOVE(&tgt->aq, sp, pmcs_cmd, cmd_next); 7071 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7072 "%s: Removing cmd 0x%p (htag 0x%x) from aq for " 7073 "target 0x%p", __func__, (void *)sp, sp->cmd_tag, 7074 (void *)tgt); 7075 mutex_exit(&tgt->aqlock); 7076 /* 7077 * Mark the work structure as dead and complete it 7078 */ 7079 pwrk->dead = 1; 7080 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 7081 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 7082 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 7083 pmcs_dma_unload(pwp, sp); 7084 mutex_enter(&pwp->cq_lock); 7085 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 7086 mutex_exit(&pwp->cq_lock); 7087 mutex_enter(&tgt->aqlock); 7088 sp = sp_next; 7089 } 7090 mutex_exit(&tgt->aqlock); 7091 mutex_enter(&tgt->statlock); 7092 } 7093 7094 if (queues & PMCS_TGT_SPECIAL_QUEUE) { 7095 while ((sp = STAILQ_FIRST(&tgt->sq)) != NULL) { 7096 STAILQ_REMOVE(&tgt->sq, sp, pmcs_cmd, cmd_next); 7097 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt, 7098 "%s: Removing cmd 0x%p from sq for target 0x%p", 7099 __func__, (void *)sp, (void *)tgt); 7100 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 7101 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 7102 pmcs_dma_unload(pwp, sp); 7103 mutex_enter(&pwp->cq_lock); 7104 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 7105 mutex_exit(&pwp->cq_lock); 7106 } 7107 } 7108 7109 if (queues == PMCS_TGT_ALL_QUEUES) { 7110 mutex_exit(&tgt->statlock); 7111 (void) pmcs_flush_nonio_cmds(pwp); 7112 mutex_enter(&tgt->statlock); 7113 } 7114 } 7115 7116 /* 7117 * Clean up work structures with no associated pmcs_cmd_t struct 7118 */ 7119 void 7120 pmcs_flush_nonio_cmds(pmcs_hw_t *pwp) 7121 { 7122 int i; 7123 pmcwork_t *p; 7124 7125 for (i = 0; i < pwp->max_cmd; i++) { 7126 p = &pwp->work[i]; 7127 mutex_enter(&p->lock); 7128 if (p->htag & PMCS_TAG_NONIO_CMD) { 7129 if (!PMCS_COMMAND_ACTIVE(p) || PMCS_COMMAND_DONE(p)) { 7130 mutex_exit(&p->lock); 7131 continue; 7132 } 7133 pmcs_prt(pwp, PMCS_PRT_DEBUG, p->phy, p->xp, 7134 "%s: Completing non-io cmd with HTAG 0x%x", 7135 __func__, p->htag); 7136 pmcs_complete_work_impl(pwp, p, NULL, 0); 7137 } else { 7138 mutex_exit(&p->lock); 7139 } 7140 } 7141 } 7142 7143 void 7144 pmcs_complete_work_impl(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *iomb, 7145 size_t amt) 7146 { 7147 switch (PMCS_TAG_TYPE(pwrk->htag)) { 7148 case PMCS_TAG_TYPE_CBACK: 7149 { 7150 pmcs_cb_t callback = (pmcs_cb_t)pwrk->ptr; 7151 (*callback)(pwp, pwrk, iomb); 7152 break; 7153 } 7154 case PMCS_TAG_TYPE_WAIT: 7155 if (pwrk->arg && iomb && amt) { 7156 (void) memcpy(pwrk->arg, iomb, amt); 7157 } 7158 cv_signal(&pwrk->sleep_cv); 7159 mutex_exit(&pwrk->lock); 7160 break; 7161 case PMCS_TAG_TYPE_NONE: 7162 #ifdef DEBUG 7163 pmcs_check_iomb_status(pwp, iomb); 7164 #endif 7165 pmcs_pwork(pwp, pwrk); 7166 break; 7167 default: 7168 /* 7169 * We will leak a structure here if we don't know 7170 * what happened 7171 */ 7172 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 7173 "%s: Unknown PMCS_TAG_TYPE (%x)", 7174 __func__, PMCS_TAG_TYPE(pwrk->htag)); 7175 break; 7176 } 7177 } 7178 7179 /* 7180 * Determine if iport still has targets. During detach(9E), if SCSA is 7181 * successfull in its guarantee of tran_tgt_free(9E) before detach(9E), 7182 * this should always return B_FALSE. 7183 */ 7184 boolean_t 7185 pmcs_iport_has_targets(pmcs_hw_t *pwp, pmcs_iport_t *iport) 7186 { 7187 pmcs_xscsi_t *xp; 7188 int i; 7189 7190 mutex_enter(&pwp->lock); 7191 7192 if (!pwp->targets || !pwp->max_dev) { 7193 mutex_exit(&pwp->lock); 7194 return (B_FALSE); 7195 } 7196 7197 for (i = 0; i < pwp->max_dev; i++) { 7198 xp = pwp->targets[i]; 7199 if ((xp == NULL) || (xp->phy == NULL) || 7200 (xp->phy->iport != iport)) { 7201 continue; 7202 } 7203 7204 mutex_exit(&pwp->lock); 7205 return (B_TRUE); 7206 } 7207 7208 mutex_exit(&pwp->lock); 7209 return (B_FALSE); 7210 } 7211 7212 /* 7213 * Called with softstate lock held 7214 */ 7215 void 7216 pmcs_destroy_target(pmcs_xscsi_t *target) 7217 { 7218 pmcs_hw_t *pwp = target->pwp; 7219 pmcs_iport_t *iport; 7220 7221 ASSERT(pwp); 7222 ASSERT(mutex_owned(&pwp->lock)); 7223 7224 if (!target->ua) { 7225 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target, 7226 "%s: target %p iport address is null", 7227 __func__, (void *)target); 7228 } 7229 7230 iport = pmcs_get_iport_by_ua(pwp, target->ua); 7231 if (iport == NULL) { 7232 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target, 7233 "%s: no iport associated with tgt(0x%p)", 7234 __func__, (void *)target); 7235 return; 7236 } 7237 7238 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target, 7239 "%s: free target %p", __func__, (void *)target); 7240 if (target->ua) { 7241 strfree(target->ua); 7242 } 7243 7244 mutex_destroy(&target->wqlock); 7245 mutex_destroy(&target->aqlock); 7246 mutex_destroy(&target->statlock); 7247 cv_destroy(&target->reset_cv); 7248 cv_destroy(&target->abort_cv); 7249 ddi_soft_state_bystr_fini(&target->lun_sstate); 7250 ddi_soft_state_bystr_free(iport->tgt_sstate, target->unit_address); 7251 pmcs_rele_iport(iport); 7252 } 7253 7254 /* 7255 * pmcs_lock_phy_impl 7256 * 7257 * This function is what does the actual work for pmcs_lock_phy. It will 7258 * lock all PHYs from phyp down in a top-down fashion. 7259 * 7260 * Locking notes: 7261 * 1. level starts from 0 for the PHY ("parent") that's passed in. It is 7262 * not a reflection of the actual level of the PHY in the SAS topology. 7263 * 2. If parent is an expander, then parent is locked along with all its 7264 * descendents. 7265 * 3. Expander subsidiary PHYs at level 0 are not locked. It is the 7266 * responsibility of the caller to individually lock expander subsidiary PHYs 7267 * at level 0 if necessary. 7268 * 4. Siblings at level 0 are not traversed due to the possibility that we're 7269 * locking a PHY on the dead list. The siblings could be pointing to invalid 7270 * PHYs. We don't lock siblings at level 0 anyway. 7271 */ 7272 static void 7273 pmcs_lock_phy_impl(pmcs_phy_t *phyp, int level) 7274 { 7275 pmcs_phy_t *tphyp; 7276 7277 ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) || 7278 (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING)); 7279 7280 /* 7281 * Start walking the PHYs. 7282 */ 7283 tphyp = phyp; 7284 while (tphyp) { 7285 /* 7286 * If we're at the top level, only lock ourselves. For anything 7287 * at level > 0, traverse children while locking everything. 7288 */ 7289 if ((level > 0) || (tphyp == phyp)) { 7290 pmcs_prt(tphyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, tphyp, 7291 NULL, "%s: PHY 0x%p parent 0x%p path %s lvl %d", 7292 __func__, (void *)tphyp, (void *)tphyp->parent, 7293 tphyp->path, level); 7294 mutex_enter(&tphyp->phy_lock); 7295 7296 if (tphyp->children) { 7297 pmcs_lock_phy_impl(tphyp->children, level + 1); 7298 } 7299 } 7300 7301 if (level == 0) { 7302 return; 7303 } 7304 7305 tphyp = tphyp->sibling; 7306 } 7307 } 7308 7309 /* 7310 * pmcs_lock_phy 7311 * 7312 * This function is responsible for locking a PHY and all its descendents 7313 */ 7314 void 7315 pmcs_lock_phy(pmcs_phy_t *phyp) 7316 { 7317 #ifdef DEBUG 7318 char *callername = NULL; 7319 ulong_t off; 7320 7321 ASSERT(phyp != NULL); 7322 7323 callername = modgetsymname((uintptr_t)caller(), &off); 7324 7325 if (callername == NULL) { 7326 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7327 "%s: PHY 0x%p path %s caller: unknown", __func__, 7328 (void *)phyp, phyp->path); 7329 } else { 7330 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7331 "%s: PHY 0x%p path %s caller: %s+%lx", __func__, 7332 (void *)phyp, phyp->path, callername, off); 7333 } 7334 #else 7335 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7336 "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path); 7337 #endif 7338 pmcs_lock_phy_impl(phyp, 0); 7339 } 7340 7341 /* 7342 * pmcs_unlock_phy_impl 7343 * 7344 * Unlock all PHYs from phyp down in a bottom-up fashion. 7345 */ 7346 static void 7347 pmcs_unlock_phy_impl(pmcs_phy_t *phyp, int level) 7348 { 7349 pmcs_phy_t *phy_next; 7350 7351 ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) || 7352 (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING)); 7353 7354 /* 7355 * Recurse down to the bottom PHYs 7356 */ 7357 if (level == 0) { 7358 if (phyp->children) { 7359 pmcs_unlock_phy_impl(phyp->children, level + 1); 7360 } 7361 } else { 7362 phy_next = phyp; 7363 while (phy_next) { 7364 if (phy_next->children) { 7365 pmcs_unlock_phy_impl(phy_next->children, 7366 level + 1); 7367 } 7368 phy_next = phy_next->sibling; 7369 } 7370 } 7371 7372 /* 7373 * Iterate through PHYs unlocking all at level > 0 as well the top PHY 7374 */ 7375 phy_next = phyp; 7376 while (phy_next) { 7377 if ((level > 0) || (phy_next == phyp)) { 7378 pmcs_prt(phy_next->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, 7379 phy_next, NULL, 7380 "%s: PHY 0x%p parent 0x%p path %s lvl %d", 7381 __func__, (void *)phy_next, 7382 (void *)phy_next->parent, phy_next->path, level); 7383 mutex_exit(&phy_next->phy_lock); 7384 } 7385 7386 if (level == 0) { 7387 return; 7388 } 7389 7390 phy_next = phy_next->sibling; 7391 } 7392 } 7393 7394 /* 7395 * pmcs_unlock_phy 7396 * 7397 * Unlock a PHY and all its descendents 7398 */ 7399 void 7400 pmcs_unlock_phy(pmcs_phy_t *phyp) 7401 { 7402 #ifdef DEBUG 7403 char *callername = NULL; 7404 ulong_t off; 7405 7406 ASSERT(phyp != NULL); 7407 7408 callername = modgetsymname((uintptr_t)caller(), &off); 7409 7410 if (callername == NULL) { 7411 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7412 "%s: PHY 0x%p path %s caller: unknown", __func__, 7413 (void *)phyp, phyp->path); 7414 } else { 7415 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7416 "%s: PHY 0x%p path %s caller: %s+%lx", __func__, 7417 (void *)phyp, phyp->path, callername, off); 7418 } 7419 #else 7420 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL, 7421 "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path); 7422 #endif 7423 pmcs_unlock_phy_impl(phyp, 0); 7424 } 7425 7426 /* 7427 * pmcs_get_root_phy 7428 * 7429 * For a given phy pointer return its root phy. 7430 * This function must only be called during discovery in order to ensure that 7431 * the chain of PHYs from phyp up to the root PHY doesn't change. 7432 */ 7433 pmcs_phy_t * 7434 pmcs_get_root_phy(pmcs_phy_t *phyp) 7435 { 7436 ASSERT(phyp); 7437 7438 while (phyp) { 7439 if (IS_ROOT_PHY(phyp)) { 7440 break; 7441 } 7442 phyp = phyp->parent; 7443 } 7444 7445 return (phyp); 7446 } 7447 7448 /* 7449 * pmcs_free_dma_chunklist 7450 * 7451 * Free DMA S/G chunk list 7452 */ 7453 void 7454 pmcs_free_dma_chunklist(pmcs_hw_t *pwp) 7455 { 7456 pmcs_chunk_t *pchunk; 7457 7458 while (pwp->dma_chunklist) { 7459 pchunk = pwp->dma_chunklist; 7460 pwp->dma_chunklist = pwp->dma_chunklist->next; 7461 if (pchunk->dma_handle) { 7462 if (ddi_dma_unbind_handle(pchunk->dma_handle) != 7463 DDI_SUCCESS) { 7464 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 7465 "Condition failed at %s():%d", 7466 __func__, __LINE__); 7467 } 7468 ddi_dma_free_handle(&pchunk->dma_handle); 7469 ddi_dma_mem_free(&pchunk->acc_handle); 7470 } 7471 kmem_free(pchunk, sizeof (pmcs_chunk_t)); 7472 } 7473 } 7474 7475 /*ARGSUSED2*/ 7476 int 7477 pmcs_phy_constructor(void *buf, void *arg, int kmflags) 7478 { 7479 pmcs_hw_t *pwp = (pmcs_hw_t *)arg; 7480 pmcs_phy_t *phyp = (pmcs_phy_t *)buf; 7481 7482 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER, 7483 DDI_INTR_PRI(pwp->intr_pri)); 7484 cv_init(&phyp->abort_all_cv, NULL, CV_DRIVER, NULL); 7485 return (0); 7486 } 7487 7488 /*ARGSUSED1*/ 7489 void 7490 pmcs_phy_destructor(void *buf, void *arg) 7491 { 7492 pmcs_phy_t *phyp = (pmcs_phy_t *)buf; 7493 7494 cv_destroy(&phyp->abort_all_cv); 7495 mutex_destroy(&phyp->phy_lock); 7496 } 7497 7498 /* 7499 * Free all PHYs from the kmem_cache starting at phyp as well as everything 7500 * on the dead_phys list. 7501 * 7502 * NOTE: This function does not free root PHYs as they are not allocated 7503 * from the kmem_cache. 7504 * 7505 * No PHY locks are acquired as this should only be called during DDI_DETACH 7506 * or soft reset (while pmcs interrupts are disabled). 7507 */ 7508 void 7509 pmcs_free_all_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 7510 { 7511 pmcs_phy_t *tphyp, *nphyp; 7512 7513 if (phyp == NULL) { 7514 return; 7515 } 7516 7517 tphyp = phyp; 7518 while (tphyp) { 7519 nphyp = tphyp->sibling; 7520 7521 if (tphyp->children) { 7522 pmcs_free_all_phys(pwp, tphyp->children); 7523 tphyp->children = NULL; 7524 } 7525 if (!IS_ROOT_PHY(tphyp)) { 7526 kmem_cache_free(pwp->phy_cache, tphyp); 7527 } 7528 7529 tphyp = nphyp; 7530 } 7531 7532 mutex_enter(&pwp->dead_phylist_lock); 7533 tphyp = pwp->dead_phys; 7534 while (tphyp) { 7535 nphyp = tphyp->dead_next; 7536 kmem_cache_free(pwp->phy_cache, tphyp); 7537 tphyp = nphyp; 7538 } 7539 pwp->dead_phys = NULL; 7540 mutex_exit(&pwp->dead_phylist_lock); 7541 } 7542 7543 /* 7544 * Free a list of PHYs linked together by the sibling pointer back to the 7545 * kmem cache from whence they came. This function does not recurse, so the 7546 * caller must ensure there are no children. 7547 */ 7548 void 7549 pmcs_free_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 7550 { 7551 pmcs_phy_t *next_phy; 7552 7553 while (phyp) { 7554 next_phy = phyp->sibling; 7555 ASSERT(!mutex_owned(&phyp->phy_lock)); 7556 kmem_cache_free(pwp->phy_cache, phyp); 7557 phyp = next_phy; 7558 } 7559 } 7560 7561 /* 7562 * Make a copy of an existing PHY structure. This is used primarily in 7563 * discovery to compare the contents of an existing PHY with what gets 7564 * reported back by an expander. 7565 * 7566 * This function must not be called from any context where sleeping is 7567 * not possible. 7568 * 7569 * The new PHY is returned unlocked. 7570 */ 7571 static pmcs_phy_t * 7572 pmcs_clone_phy(pmcs_phy_t *orig_phy) 7573 { 7574 pmcs_phy_t *local; 7575 7576 local = kmem_cache_alloc(orig_phy->pwp->phy_cache, KM_SLEEP); 7577 7578 /* 7579 * Go ahead and just copy everything... 7580 */ 7581 *local = *orig_phy; 7582 7583 /* 7584 * But the following must be set appropriately for this copy 7585 */ 7586 local->sibling = NULL; 7587 local->children = NULL; 7588 mutex_init(&local->phy_lock, NULL, MUTEX_DRIVER, 7589 DDI_INTR_PRI(orig_phy->pwp->intr_pri)); 7590 7591 return (local); 7592 } 7593 7594 int 7595 pmcs_check_acc_handle(ddi_acc_handle_t handle) 7596 { 7597 ddi_fm_error_t de; 7598 7599 if (handle == NULL) { 7600 return (DDI_FAILURE); 7601 } 7602 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0); 7603 return (de.fme_status); 7604 } 7605 7606 int 7607 pmcs_check_dma_handle(ddi_dma_handle_t handle) 7608 { 7609 ddi_fm_error_t de; 7610 7611 if (handle == NULL) { 7612 return (DDI_FAILURE); 7613 } 7614 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0); 7615 return (de.fme_status); 7616 } 7617 7618 7619 void 7620 pmcs_fm_ereport(pmcs_hw_t *pwp, char *detail) 7621 { 7622 uint64_t ena; 7623 char buf[FM_MAX_CLASS]; 7624 7625 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 7626 ena = fm_ena_generate(0, FM_ENA_FMT1); 7627 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities)) { 7628 ddi_fm_ereport_post(pwp->dip, buf, ena, DDI_NOSLEEP, 7629 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 7630 } 7631 } 7632 7633 int 7634 pmcs_check_acc_dma_handle(pmcs_hw_t *pwp) 7635 { 7636 pmcs_chunk_t *pchunk; 7637 int i; 7638 7639 /* check all acc & dma handles allocated in attach */ 7640 if ((pmcs_check_acc_handle(pwp->pci_acc_handle) != DDI_SUCCESS) || 7641 (pmcs_check_acc_handle(pwp->msg_acc_handle) != DDI_SUCCESS) || 7642 (pmcs_check_acc_handle(pwp->top_acc_handle) != DDI_SUCCESS) || 7643 (pmcs_check_acc_handle(pwp->mpi_acc_handle) != DDI_SUCCESS) || 7644 (pmcs_check_acc_handle(pwp->gsm_acc_handle) != DDI_SUCCESS)) { 7645 goto check_failed; 7646 } 7647 7648 for (i = 0; i < PMCS_NIQ; i++) { 7649 if ((pmcs_check_dma_handle( 7650 pwp->iqp_handles[i]) != DDI_SUCCESS) || 7651 (pmcs_check_acc_handle( 7652 pwp->iqp_acchdls[i]) != DDI_SUCCESS)) { 7653 goto check_failed; 7654 } 7655 } 7656 7657 for (i = 0; i < PMCS_NOQ; i++) { 7658 if ((pmcs_check_dma_handle( 7659 pwp->oqp_handles[i]) != DDI_SUCCESS) || 7660 (pmcs_check_acc_handle( 7661 pwp->oqp_acchdls[i]) != DDI_SUCCESS)) { 7662 goto check_failed; 7663 } 7664 } 7665 7666 if ((pmcs_check_dma_handle(pwp->cip_handles) != DDI_SUCCESS) || 7667 (pmcs_check_acc_handle(pwp->cip_acchdls) != DDI_SUCCESS)) { 7668 goto check_failed; 7669 } 7670 7671 if (pwp->fwlog && 7672 ((pmcs_check_dma_handle(pwp->fwlog_hndl) != DDI_SUCCESS) || 7673 (pmcs_check_acc_handle(pwp->fwlog_acchdl) != DDI_SUCCESS))) { 7674 goto check_failed; 7675 } 7676 7677 if (pwp->regdump_hndl && pwp->regdump_acchdl && 7678 ((pmcs_check_dma_handle(pwp->regdump_hndl) != DDI_SUCCESS) || 7679 (pmcs_check_acc_handle(pwp->regdump_acchdl) 7680 != DDI_SUCCESS))) { 7681 goto check_failed; 7682 } 7683 7684 7685 pchunk = pwp->dma_chunklist; 7686 while (pchunk) { 7687 if ((pmcs_check_acc_handle(pchunk->acc_handle) 7688 != DDI_SUCCESS) || 7689 (pmcs_check_dma_handle(pchunk->dma_handle) 7690 != DDI_SUCCESS)) { 7691 goto check_failed; 7692 } 7693 pchunk = pchunk->next; 7694 } 7695 7696 return (0); 7697 7698 check_failed: 7699 7700 return (1); 7701 } 7702 7703 /* 7704 * pmcs_handle_dead_phys 7705 * 7706 * If the PHY has no outstanding work associated with it, remove it from 7707 * the dead PHY list and free it. 7708 * 7709 * If pwp->ds_err_recovering or pwp->configuring is set, don't run. 7710 * This keeps routines that need to submit work to the chip from having to 7711 * hold PHY locks to ensure that PHYs don't disappear while they do their work. 7712 */ 7713 void 7714 pmcs_handle_dead_phys(pmcs_hw_t *pwp) 7715 { 7716 pmcs_phy_t *phyp, *nphyp, *pphyp; 7717 7718 mutex_enter(&pwp->lock); 7719 mutex_enter(&pwp->config_lock); 7720 7721 if (pwp->configuring | pwp->ds_err_recovering) { 7722 mutex_exit(&pwp->config_lock); 7723 mutex_exit(&pwp->lock); 7724 return; 7725 } 7726 7727 /* 7728 * Check every PHY in the dead PHY list 7729 */ 7730 mutex_enter(&pwp->dead_phylist_lock); 7731 phyp = pwp->dead_phys; 7732 pphyp = NULL; /* Set previous PHY to NULL */ 7733 7734 while (phyp != NULL) { 7735 pmcs_lock_phy(phyp); 7736 ASSERT(phyp->dead); 7737 7738 nphyp = phyp->dead_next; 7739 7740 /* 7741 * Check for outstanding work 7742 */ 7743 if (phyp->ref_count > 0) { 7744 pmcs_unlock_phy(phyp); 7745 pphyp = phyp; /* This PHY becomes "previous" */ 7746 } else if (phyp->target) { 7747 pmcs_unlock_phy(phyp); 7748 pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, phyp->target, 7749 "%s: Not freeing PHY 0x%p: target 0x%p is not free", 7750 __func__, (void *)phyp, (void *)phyp->target); 7751 pphyp = phyp; 7752 } else { 7753 /* 7754 * No outstanding work or target references. Remove it 7755 * from the list and free it 7756 */ 7757 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 7758 "%s: Freeing inactive dead PHY 0x%p @ %s " 7759 "target = 0x%p", __func__, (void *)phyp, 7760 phyp->path, (void *)phyp->target); 7761 /* 7762 * If pphyp is NULL, then phyp was the head of the list, 7763 * so just reset the head to nphyp. Otherwise, the 7764 * previous PHY will now point to nphyp (the next PHY) 7765 */ 7766 if (pphyp == NULL) { 7767 pwp->dead_phys = nphyp; 7768 } else { 7769 pphyp->dead_next = nphyp; 7770 } 7771 /* 7772 * If the target still points to this PHY, remove 7773 * that linkage now. 7774 */ 7775 if (phyp->target) { 7776 mutex_enter(&phyp->target->statlock); 7777 if (phyp->target->phy == phyp) { 7778 phyp->target->phy = NULL; 7779 } 7780 mutex_exit(&phyp->target->statlock); 7781 } 7782 pmcs_unlock_phy(phyp); 7783 kmem_cache_free(pwp->phy_cache, phyp); 7784 } 7785 7786 phyp = nphyp; 7787 } 7788 7789 mutex_exit(&pwp->dead_phylist_lock); 7790 mutex_exit(&pwp->config_lock); 7791 mutex_exit(&pwp->lock); 7792 } 7793 7794 void 7795 pmcs_inc_phy_ref_count(pmcs_phy_t *phyp) 7796 { 7797 atomic_inc_32(&phyp->ref_count); 7798 } 7799 7800 void 7801 pmcs_dec_phy_ref_count(pmcs_phy_t *phyp) 7802 { 7803 ASSERT(phyp->ref_count != 0); 7804 atomic_dec_32(&phyp->ref_count); 7805 } 7806 7807 /* 7808 * pmcs_reap_dead_phy 7809 * 7810 * This function is called from pmcs_new_tport when we have a PHY 7811 * without a target pointer. It's possible in that case that this PHY 7812 * may have a "brother" on the dead_phys list. That is, it may be the same as 7813 * this one but with a different root PHY number (e.g. pp05 vs. pp04). If 7814 * that's the case, update the dead PHY and this new PHY. If that's not the 7815 * case, we should get a tran_tgt_init on this after it's reported to SCSA. 7816 * 7817 * Called with PHY locked. 7818 */ 7819 static void 7820 pmcs_reap_dead_phy(pmcs_phy_t *phyp) 7821 { 7822 pmcs_hw_t *pwp = phyp->pwp; 7823 pmcs_phy_t *ctmp; 7824 pmcs_iport_t *iport_cmp; 7825 7826 ASSERT(mutex_owned(&phyp->phy_lock)); 7827 7828 /* 7829 * Check the dead PHYs list 7830 */ 7831 mutex_enter(&pwp->dead_phylist_lock); 7832 ctmp = pwp->dead_phys; 7833 while (ctmp) { 7834 /* 7835 * If the iport is NULL, compare against last_iport. 7836 */ 7837 if (ctmp->iport) { 7838 iport_cmp = ctmp->iport; 7839 } else { 7840 iport_cmp = ctmp->last_iport; 7841 } 7842 7843 if ((iport_cmp != phyp->iport) || 7844 (memcmp((void *)&ctmp->sas_address[0], 7845 (void *)&phyp->sas_address[0], 8))) { 7846 ctmp = ctmp->dead_next; 7847 continue; 7848 } 7849 7850 /* 7851 * Same SAS address on same iport. Now check to see if 7852 * the PHY path is the same with the possible exception 7853 * of the root PHY number. 7854 * The "5" is the string length of "pp00." 7855 */ 7856 if ((strnlen(phyp->path, 5) >= 5) && 7857 (strnlen(ctmp->path, 5) >= 5)) { 7858 if (memcmp((void *)&phyp->path[5], 7859 (void *)&ctmp->path[5], 7860 strnlen(phyp->path, 32) - 5) == 0) { 7861 break; 7862 } 7863 } 7864 7865 ctmp = ctmp->dead_next; 7866 } 7867 mutex_exit(&pwp->dead_phylist_lock); 7868 7869 /* 7870 * Found a match. Remove the target linkage and drop the 7871 * ref count on the old PHY. Then, increment the ref count 7872 * on the new PHY to compensate. 7873 */ 7874 if (ctmp) { 7875 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, 7876 "%s: Found match in dead PHY list (0x%p) for new PHY %s", 7877 __func__, (void *)ctmp, phyp->path); 7878 /* 7879 * If there is a pointer to the target in the dead PHY, move 7880 * all reference counts to the new PHY. 7881 */ 7882 if (ctmp->target) { 7883 mutex_enter(&ctmp->target->statlock); 7884 phyp->target = ctmp->target; 7885 7886 while (ctmp->ref_count != 0) { 7887 pmcs_inc_phy_ref_count(phyp); 7888 pmcs_dec_phy_ref_count(ctmp); 7889 } 7890 /* 7891 * Update the target's linkage as well 7892 */ 7893 phyp->target->phy = phyp; 7894 phyp->target->dtype = phyp->dtype; 7895 ctmp->target = NULL; 7896 mutex_exit(&phyp->target->statlock); 7897 } 7898 } 7899 } 7900 7901 /* 7902 * Called with iport lock held 7903 */ 7904 void 7905 pmcs_add_phy_to_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp) 7906 { 7907 ASSERT(mutex_owned(&iport->lock)); 7908 ASSERT(phyp); 7909 ASSERT(!list_link_active(&phyp->list_node)); 7910 7911 iport->nphy++; 7912 list_insert_tail(&iport->phys, phyp); 7913 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 7914 &iport->nphy); 7915 mutex_enter(&phyp->phy_lock); 7916 pmcs_create_one_phy_stats(iport, phyp); 7917 mutex_exit(&phyp->phy_lock); 7918 mutex_enter(&iport->refcnt_lock); 7919 iport->refcnt++; 7920 mutex_exit(&iport->refcnt_lock); 7921 } 7922 7923 /* 7924 * Called with the iport lock held 7925 */ 7926 void 7927 pmcs_remove_phy_from_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp) 7928 { 7929 pmcs_phy_t *pptr, *next_pptr; 7930 7931 ASSERT(mutex_owned(&iport->lock)); 7932 7933 /* 7934 * If phyp is NULL, remove all PHYs from the iport 7935 */ 7936 if (phyp == NULL) { 7937 for (pptr = list_head(&iport->phys); pptr != NULL; 7938 pptr = next_pptr) { 7939 next_pptr = list_next(&iport->phys, pptr); 7940 mutex_enter(&pptr->phy_lock); 7941 if (pptr->phy_stats != NULL) { 7942 kstat_delete(pptr->phy_stats); 7943 pptr->phy_stats = NULL; 7944 } 7945 pptr->iport = NULL; 7946 pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp, 7947 pptr->tgt_port_pm_tmp, B_FALSE); 7948 mutex_exit(&pptr->phy_lock); 7949 pmcs_rele_iport(iport); 7950 list_remove(&iport->phys, pptr); 7951 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, 7952 PMCS_NUM_PHYS, &iport->nphy); 7953 } 7954 iport->nphy = 0; 7955 return; 7956 } 7957 7958 ASSERT(phyp); 7959 ASSERT(iport->nphy > 0); 7960 ASSERT(list_link_active(&phyp->list_node)); 7961 iport->nphy--; 7962 list_remove(&iport->phys, phyp); 7963 pmcs_update_phy_pm_props(phyp, phyp->att_port_pm_tmp, 7964 phyp->tgt_port_pm_tmp, B_FALSE); 7965 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 7966 &iport->nphy); 7967 pmcs_rele_iport(iport); 7968 } 7969 7970 /* 7971 * This function checks to see if the target pointed to by phyp is still 7972 * correct. This is done by comparing the target's unit address with the 7973 * SAS address in phyp. 7974 * 7975 * Called with PHY locked and target statlock held 7976 */ 7977 static boolean_t 7978 pmcs_phy_target_match(pmcs_phy_t *phyp) 7979 { 7980 uint64_t wwn; 7981 char unit_address[PMCS_MAX_UA_SIZE]; 7982 boolean_t rval = B_FALSE; 7983 7984 ASSERT(phyp); 7985 ASSERT(phyp->target); 7986 ASSERT(mutex_owned(&phyp->phy_lock)); 7987 ASSERT(mutex_owned(&phyp->target->statlock)); 7988 7989 wwn = pmcs_barray2wwn(phyp->sas_address); 7990 (void) scsi_wwn_to_wwnstr(wwn, 1, unit_address); 7991 7992 if (memcmp((void *)unit_address, (void *)phyp->target->unit_address, 7993 strnlen(phyp->target->unit_address, PMCS_MAX_UA_SIZE)) == 0) { 7994 rval = B_TRUE; 7995 } 7996 7997 return (rval); 7998 } 7999 /* 8000 * Commands used to serialize SMP requests. 8001 * 8002 * The SPC only allows 2 SMP commands per SMP target: 1 cmd pending and 1 cmd 8003 * queued for the same SMP target. If a third SMP cmd is sent to the SPC for an 8004 * SMP target that already has a SMP cmd pending and one queued, then the 8005 * SPC responds with the ERROR_INTERNAL_SMP_RESOURCE response. 8006 * 8007 * Additionally, the SPC has an 8 entry deep cmd queue and the number of SMP 8008 * cmds that can be queued is controlled by the PORT_CONTROL IOMB. The 8009 * SPC default is 1 SMP command/port (iport). These 2 queued SMP cmds would 8010 * have to be for different SMP targets. The INTERNAL_SMP_RESOURCE error will 8011 * also be returned if a 2nd SMP cmd is sent to the controller when there is 8012 * already 1 SMP cmd queued for that port or if a 3rd SMP cmd is sent to the 8013 * queue if there are already 2 queued SMP cmds. 8014 */ 8015 void 8016 pmcs_smp_acquire(pmcs_iport_t *iport) 8017 { 8018 if (iport == NULL) { 8019 return; 8020 } 8021 8022 mutex_enter(&iport->smp_lock); 8023 while (iport->smp_active) { 8024 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 8025 "%s: SMP is active on thread 0x%p, waiting", __func__, 8026 (void *)iport->smp_active_thread); 8027 cv_wait(&iport->smp_cv, &iport->smp_lock); 8028 } 8029 iport->smp_active = B_TRUE; 8030 iport->smp_active_thread = curthread; 8031 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL, 8032 "%s: SMP acquired by thread 0x%p", __func__, 8033 (void *)iport->smp_active_thread); 8034 mutex_exit(&iport->smp_lock); 8035 } 8036 8037 void 8038 pmcs_smp_release(pmcs_iport_t *iport) 8039 { 8040 if (iport == NULL) { 8041 return; 8042 } 8043 8044 mutex_enter(&iport->smp_lock); 8045 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL, 8046 "%s: SMP released by thread 0x%p", __func__, (void *)curthread); 8047 iport->smp_active = B_FALSE; 8048 iport->smp_active_thread = NULL; 8049 cv_signal(&iport->smp_cv); 8050 mutex_exit(&iport->smp_lock); 8051 } 8052 8053 /* 8054 * Update a PHY's attached-port-pm and target-port-pm properties 8055 * 8056 * phyp: PHY whose properties are to be updated 8057 * 8058 * att_bv: Bit value of the attached-port-pm property to be updated in the 8059 * 64-bit holding area for the PHY. 8060 * 8061 * tgt_bv: Bit value of the target-port-pm property to update in the 64-bit 8062 * holding area for the PHY. 8063 * 8064 * prop_add_val: If TRUE, we're adding bits into the property value. 8065 * Otherwise, we're taking them out. Either way, the properties for this 8066 * PHY will be updated. 8067 */ 8068 void 8069 pmcs_update_phy_pm_props(pmcs_phy_t *phyp, uint64_t att_bv, uint64_t tgt_bv, 8070 boolean_t prop_add_val) 8071 { 8072 if (prop_add_val) { 8073 /* 8074 * If the values are currently 0, then we're setting the 8075 * phymask for just this PHY as well. 8076 */ 8077 if (phyp->att_port_pm_tmp == 0) { 8078 phyp->att_port_pm = att_bv; 8079 phyp->tgt_port_pm = tgt_bv; 8080 } 8081 phyp->att_port_pm_tmp |= att_bv; 8082 phyp->tgt_port_pm_tmp |= tgt_bv; 8083 (void) snprintf(phyp->att_port_pm_str, PMCS_PM_MAX_NAMELEN, 8084 "%"PRIx64, phyp->att_port_pm_tmp); 8085 (void) snprintf(phyp->tgt_port_pm_str, PMCS_PM_MAX_NAMELEN, 8086 "%"PRIx64, phyp->tgt_port_pm_tmp); 8087 } else { 8088 phyp->att_port_pm_tmp &= ~att_bv; 8089 phyp->tgt_port_pm_tmp &= ~tgt_bv; 8090 if (phyp->att_port_pm_tmp) { 8091 (void) snprintf(phyp->att_port_pm_str, 8092 PMCS_PM_MAX_NAMELEN, "%"PRIx64, 8093 phyp->att_port_pm_tmp); 8094 } else { 8095 phyp->att_port_pm_str[0] = '\0'; 8096 phyp->att_port_pm = 0; 8097 } 8098 if (phyp->tgt_port_pm_tmp) { 8099 (void) snprintf(phyp->tgt_port_pm_str, 8100 PMCS_PM_MAX_NAMELEN, "%"PRIx64, 8101 phyp->tgt_port_pm_tmp); 8102 } else { 8103 phyp->tgt_port_pm_str[0] = '\0'; 8104 phyp->tgt_port_pm = 0; 8105 } 8106 } 8107 8108 if (phyp->target == NULL) { 8109 return; 8110 } 8111 8112 mutex_enter(&phyp->target->statlock); 8113 if (!list_is_empty(&phyp->target->lun_list)) { 8114 pmcs_lun_t *lunp; 8115 8116 lunp = list_head(&phyp->target->lun_list); 8117 while (lunp) { 8118 (void) scsi_device_prop_update_string(lunp->sd, 8119 SCSI_DEVICE_PROP_PATH, 8120 SCSI_ADDR_PROP_ATTACHED_PORT_PM, 8121 phyp->att_port_pm_str); 8122 (void) scsi_device_prop_update_string(lunp->sd, 8123 SCSI_DEVICE_PROP_PATH, 8124 SCSI_ADDR_PROP_TARGET_PORT_PM, 8125 phyp->tgt_port_pm_str); 8126 lunp = list_next(&phyp->target->lun_list, lunp); 8127 } 8128 } else if (phyp->target->smpd) { 8129 (void) smp_device_prop_update_string(phyp->target->smpd, 8130 SCSI_ADDR_PROP_ATTACHED_PORT_PM, 8131 phyp->att_port_pm_str); 8132 (void) smp_device_prop_update_string(phyp->target->smpd, 8133 SCSI_ADDR_PROP_TARGET_PORT_PM, 8134 phyp->tgt_port_pm_str); 8135 } 8136 mutex_exit(&phyp->target->statlock); 8137 } 8138 8139 /* ARGSUSED */ 8140 void 8141 pmcs_deregister_device_work(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 8142 { 8143 pmcs_phy_t *pptr; 8144 8145 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) { 8146 pmcs_lock_phy(pptr); 8147 if (pptr->deregister_wait) { 8148 pmcs_deregister_device(pwp, pptr); 8149 } 8150 pmcs_unlock_phy(pptr); 8151 } 8152 } 8153 8154 /* 8155 * pmcs_iport_active 8156 * 8157 * Mark this iport as active. Called with the iport lock held. 8158 */ 8159 static void 8160 pmcs_iport_active(pmcs_iport_t *iport) 8161 { 8162 ASSERT(mutex_owned(&iport->lock)); 8163 8164 iport->ua_state = UA_ACTIVE; 8165 iport->smp_active = B_FALSE; 8166 iport->smp_active_thread = NULL; 8167 } 8168 8169 /* ARGSUSED */ 8170 static void 8171 pmcs_tgtmap_activate_cb(void *tgtmap_priv, char *tgt_addr, 8172 scsi_tgtmap_tgt_type_t tgt_type, void **tgt_privp) 8173 { 8174 pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv; 8175 pmcs_hw_t *pwp = iport->pwp; 8176 pmcs_xscsi_t *target; 8177 8178 /* 8179 * Look up the target. If there is one, and it doesn't have a PHY 8180 * pointer, re-establish that linkage here. 8181 */ 8182 mutex_enter(&pwp->lock); 8183 target = pmcs_get_target(iport, tgt_addr, B_FALSE); 8184 mutex_exit(&pwp->lock); 8185 8186 /* 8187 * If we got a target, it will now have a PHY pointer and the PHY 8188 * will point to the target. The PHY will be locked, so we'll need 8189 * to unlock it. 8190 */ 8191 if (target) { 8192 pmcs_unlock_phy(target->phy); 8193 } 8194 8195 /* 8196 * Update config_restart_time so we don't try to restart discovery 8197 * while enumeration is still in progress. 8198 */ 8199 mutex_enter(&pwp->config_lock); 8200 pwp->config_restart_time = ddi_get_lbolt() + 8201 drv_usectohz(PMCS_REDISCOVERY_DELAY); 8202 mutex_exit(&pwp->config_lock); 8203 } 8204 8205 /* ARGSUSED */ 8206 static boolean_t 8207 pmcs_tgtmap_deactivate_cb(void *tgtmap_priv, char *tgt_addr, 8208 scsi_tgtmap_tgt_type_t tgt_type, void *tgt_priv, 8209 scsi_tgtmap_deact_rsn_t tgt_deact_rsn) 8210 { 8211 pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv; 8212 pmcs_phy_t *phyp; 8213 boolean_t rediscover = B_FALSE; 8214 8215 ASSERT(iport); 8216 8217 phyp = pmcs_find_phy_by_sas_address(iport->pwp, iport, NULL, tgt_addr); 8218 if (phyp == NULL) { 8219 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 8220 "%s: Couldn't find PHY at %s", __func__, tgt_addr); 8221 return (rediscover); 8222 } 8223 /* phyp is locked */ 8224 8225 if (!phyp->reenumerate && phyp->configured) { 8226 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, phyp->target, 8227 "%s: PHY @ %s is configured... re-enumerate", __func__, 8228 tgt_addr); 8229 phyp->reenumerate = 1; 8230 } 8231 8232 /* 8233 * Check to see if reenumerate is set, and if so, if we've reached our 8234 * maximum number of retries. 8235 */ 8236 if (phyp->reenumerate) { 8237 if (phyp->enum_attempts == PMCS_MAX_REENUMERATE) { 8238 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, 8239 phyp->target, 8240 "%s: No more enumeration attempts for %s", __func__, 8241 tgt_addr); 8242 } else { 8243 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, 8244 phyp->target, "%s: Re-attempt enumeration for %s", 8245 __func__, tgt_addr); 8246 ++phyp->enum_attempts; 8247 rediscover = B_TRUE; 8248 } 8249 8250 phyp->reenumerate = 0; 8251 } 8252 8253 pmcs_unlock_phy(phyp); 8254 8255 mutex_enter(&iport->pwp->config_lock); 8256 iport->pwp->config_restart_time = ddi_get_lbolt() + 8257 drv_usectohz(PMCS_REDISCOVERY_DELAY); 8258 if (rediscover) { 8259 iport->pwp->config_restart = B_TRUE; 8260 } else if (iport->pwp->config_restart == B_TRUE) { 8261 /* 8262 * If we aren't asking for rediscovery because of this PHY, 8263 * check to see if we're already asking for it on behalf of 8264 * some other PHY. If so, we'll want to return TRUE, so reset 8265 * "rediscover" here. 8266 */ 8267 rediscover = B_TRUE; 8268 } 8269 8270 mutex_exit(&iport->pwp->config_lock); 8271 8272 return (rediscover); 8273 } 8274 8275 void 8276 pmcs_status_disposition(pmcs_phy_t *phyp, uint32_t status) 8277 { 8278 ASSERT(phyp); 8279 ASSERT(!mutex_owned(&phyp->phy_lock)); 8280 8281 if (phyp == NULL) { 8282 return; 8283 } 8284 8285 pmcs_lock_phy(phyp); 8286 8287 /* 8288 * XXX: Do we need to call this function from an SSP_EVENT? 8289 */ 8290 8291 switch (status) { 8292 case PMCOUT_STATUS_NO_DEVICE: 8293 case PMCOUT_STATUS_ERROR_HW_TIMEOUT: 8294 case PMCOUT_STATUS_XFER_ERR_BREAK: 8295 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 8296 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 8297 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 8298 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 8299 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 8300 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 8301 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 8302 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 8303 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 8304 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 8305 case PMCOUT_STATUS_XFER_ERROR_RX_FRAME: 8306 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 8307 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE: 8308 case PMCOUT_STATUS_IO_PORT_IN_RESET: 8309 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 8310 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 8311 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 8312 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 8313 "%s: status = 0x%x for " SAS_ADDR_FMT ", reenumerate", 8314 __func__, status, SAS_ADDR_PRT(phyp->sas_address)); 8315 phyp->reenumerate = 1; 8316 break; 8317 8318 default: 8319 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 8320 "%s: status = 0x%x for " SAS_ADDR_FMT ", no reenumeration", 8321 __func__, status, SAS_ADDR_PRT(phyp->sas_address)); 8322 break; 8323 } 8324 8325 pmcs_unlock_phy(phyp); 8326 } 8327 8328 /* 8329 * Add the list of PHYs pointed to by phyp to the dead_phys_list 8330 * 8331 * Called with all PHYs in the list locked 8332 */ 8333 static void 8334 pmcs_add_dead_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp) 8335 { 8336 mutex_enter(&pwp->dead_phylist_lock); 8337 while (phyp) { 8338 pmcs_phy_t *nxt = phyp->sibling; 8339 ASSERT(phyp->dead); 8340 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL, 8341 "%s: dead PHY 0x%p (%s) (ref_count %d)", __func__, 8342 (void *)phyp, phyp->path, phyp->ref_count); 8343 /* 8344 * Put this PHY on the dead PHY list for the watchdog to 8345 * clean up after any outstanding work has completed. 8346 */ 8347 phyp->dead_next = pwp->dead_phys; 8348 pwp->dead_phys = phyp; 8349 pmcs_unlock_phy(phyp); 8350 phyp = nxt; 8351 } 8352 mutex_exit(&pwp->dead_phylist_lock); 8353 } 8354 8355 static void 8356 pmcs_get_fw_version(pmcs_hw_t *pwp) 8357 { 8358 uint32_t ila_len, ver_hi, ver_lo; 8359 uint8_t ila_ver_string[9], img_flag; 8360 char uc, *ucp = &uc; 8361 unsigned long ila_ver; 8362 uint64_t ver_hilo; 8363 8364 /* Firmware version is easy. */ 8365 pwp->fw = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FW); 8366 8367 /* 8368 * Get the image size (2nd to last dword) 8369 * NOTE: The GSM registers are mapped little-endian, but the data 8370 * on the flash is actually big-endian, so we need to swap these values 8371 * regardless of which platform we're on. 8372 */ 8373 ila_len = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8374 GSM_FLASH_BASE + GSM_SM_BLKSZ - (2 << 2))); 8375 if (ila_len > 65535) { 8376 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 8377 "%s: Invalid ILA image size (0x%x)?", __func__, ila_len); 8378 return; 8379 } 8380 8381 /* 8382 * The numeric version is at ila_len - PMCS_ILA_VER_OFFSET 8383 */ 8384 ver_hi = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8385 GSM_FLASH_BASE + ila_len - PMCS_ILA_VER_OFFSET)); 8386 ver_lo = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8387 GSM_FLASH_BASE + ila_len - PMCS_ILA_VER_OFFSET + 4)); 8388 ver_hilo = BE_64(((uint64_t)ver_hi << 32) | ver_lo); 8389 bcopy((const void *)&ver_hilo, &ila_ver_string[0], 8); 8390 ila_ver_string[8] = '\0'; 8391 8392 (void) ddi_strtoul((const char *)ila_ver_string, &ucp, 16, &ila_ver); 8393 pwp->ila_ver = (int)(ila_ver & 0xffffffff); 8394 8395 img_flag = (BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER, 8396 GSM_FLASH_IMG_FLAGS)) & 0xff000000) >> 24; 8397 if (img_flag & PMCS_IMG_FLAG_A) { 8398 pwp->fw_active_img = 1; 8399 } else { 8400 pwp->fw_active_img = 0; 8401 } 8402 } 8403