xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_subr.c (revision a25672a1f5bcd6aa4bbce28adab51d84ae202323)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  *
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * This file contains various support routines.
28  */
29 
30 #include <sys/scsi/adapters/pmcs/pmcs.h>
31 
32 /*
33  * Local static data
34  */
35 static int tgtmap_usec = MICROSEC;
36 
37 /*
38  * SAS Topology Configuration
39  */
40 static void pmcs_new_tport(pmcs_hw_t *, pmcs_phy_t *);
41 static void pmcs_configure_expander(pmcs_hw_t *, pmcs_phy_t *, pmcs_iport_t *);
42 
43 static void pmcs_check_expanders(pmcs_hw_t *, pmcs_phy_t *);
44 static void pmcs_check_expander(pmcs_hw_t *, pmcs_phy_t *);
45 static void pmcs_clear_expander(pmcs_hw_t *, pmcs_phy_t *, int);
46 
47 static int pmcs_expander_get_nphy(pmcs_hw_t *, pmcs_phy_t *);
48 static int pmcs_expander_content_discover(pmcs_hw_t *, pmcs_phy_t *,
49     pmcs_phy_t *);
50 
51 static int pmcs_smp_function_result(pmcs_hw_t *, smp_response_frame_t *);
52 static boolean_t pmcs_validate_devid(pmcs_phy_t *, pmcs_phy_t *, uint32_t);
53 static void pmcs_clear_phys(pmcs_hw_t *, pmcs_phy_t *);
54 static int pmcs_configure_new_devices(pmcs_hw_t *, pmcs_phy_t *);
55 static void pmcs_begin_observations(pmcs_hw_t *);
56 static boolean_t pmcs_report_observations(pmcs_hw_t *);
57 static boolean_t pmcs_report_iport_observations(pmcs_hw_t *, pmcs_iport_t *,
58     pmcs_phy_t *);
59 static pmcs_phy_t *pmcs_find_phy_needing_work(pmcs_hw_t *, pmcs_phy_t *);
60 static int pmcs_kill_devices(pmcs_hw_t *, pmcs_phy_t *);
61 static void pmcs_lock_phy_impl(pmcs_phy_t *, int);
62 static void pmcs_unlock_phy_impl(pmcs_phy_t *, int);
63 static pmcs_phy_t *pmcs_clone_phy(pmcs_phy_t *);
64 static boolean_t pmcs_configure_phy(pmcs_hw_t *, pmcs_phy_t *);
65 static void pmcs_reap_dead_phy(pmcs_phy_t *);
66 static pmcs_iport_t *pmcs_get_iport_by_ua(pmcs_hw_t *, char *);
67 static boolean_t pmcs_phy_target_match(pmcs_phy_t *);
68 static void pmcs_iport_active(pmcs_iport_t *);
69 static void pmcs_tgtmap_activate_cb(void *, char *, scsi_tgtmap_tgt_type_t,
70     void **);
71 static boolean_t pmcs_tgtmap_deactivate_cb(void *, char *,
72     scsi_tgtmap_tgt_type_t, void *, scsi_tgtmap_deact_rsn_t);
73 static void pmcs_add_dead_phys(pmcs_hw_t *, pmcs_phy_t *);
74 
75 /*
76  * Often used strings
77  */
78 const char pmcs_nowrk[] = "%s: unable to get work structure";
79 const char pmcs_nomsg[] = "%s: unable to get Inbound Message entry";
80 const char pmcs_timeo[] = "%s: command timed out";
81 
82 extern const ddi_dma_attr_t pmcs_dattr;
83 
84 /*
85  * Some Initial setup steps.
86  */
87 
88 int
89 pmcs_setup(pmcs_hw_t *pwp)
90 {
91 	uint32_t barval = pwp->mpibar;
92 	uint32_t i, scratch, regbar, regoff, barbar, baroff;
93 	uint32_t new_ioq_depth, ferr = 0;
94 
95 	/*
96 	 * Check current state. If we're not at READY state,
97 	 * we can't go further.
98 	 */
99 	scratch = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
100 	if ((scratch & PMCS_MSGU_AAP_STATE_MASK) == PMCS_MSGU_AAP_STATE_ERROR) {
101 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
102 		    "%s: AAP Error State (0x%x)",
103 		    __func__, pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
104 		    PMCS_MSGU_AAP_ERROR_MASK);
105 		pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE);
106 		ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
107 		return (-1);
108 	}
109 	if ((scratch & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) {
110 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
111 		    "%s: AAP unit not ready (state 0x%x)",
112 		    __func__, scratch & PMCS_MSGU_AAP_STATE_MASK);
113 		pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE);
114 		ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
115 		return (-1);
116 	}
117 
118 	/*
119 	 * Read the offset from the Message Unit scratchpad 0 register.
120 	 * This allows us to read the MPI Configuration table.
121 	 *
122 	 * Check its signature for validity.
123 	 */
124 	baroff = barval;
125 	barbar = barval >> PMCS_MSGU_MPI_BAR_SHIFT;
126 	baroff &= PMCS_MSGU_MPI_OFFSET_MASK;
127 
128 	regoff = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0);
129 	regbar = regoff >> PMCS_MSGU_MPI_BAR_SHIFT;
130 	regoff &= PMCS_MSGU_MPI_OFFSET_MASK;
131 
132 	if (regoff > baroff) {
133 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
134 		    "%s: bad MPI Table Length (register offset=0x%08x, "
135 		    "passed offset=0x%08x)", __func__, regoff, baroff);
136 		return (-1);
137 	}
138 	if (regbar != barbar) {
139 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
140 		    "%s: bad MPI BAR (register BAROFF=0x%08x, "
141 		    "passed BAROFF=0x%08x)", __func__, regbar, barbar);
142 		return (-1);
143 	}
144 	pwp->mpi_offset = regoff;
145 	if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS) != PMCS_SIGNATURE) {
146 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
147 		    "%s: Bad MPI Configuration Table Signature 0x%x", __func__,
148 		    pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS));
149 		return (-1);
150 	}
151 
152 	if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR) != PMCS_MPI_REVISION1) {
153 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
154 		    "%s: Bad MPI Configuration Revision 0x%x", __func__,
155 		    pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR));
156 		return (-1);
157 	}
158 
159 	/*
160 	 * Generate offsets for the General System, Inbound Queue Configuration
161 	 * and Outbound Queue configuration tables. This way the macros to
162 	 * access those tables will work correctly.
163 	 */
164 	pwp->mpi_gst_offset =
165 	    pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_GSTO);
166 	pwp->mpi_iqc_offset =
167 	    pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IQCTO);
168 	pwp->mpi_oqc_offset =
169 	    pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_OQCTO);
170 
171 	pwp->fw = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FW);
172 
173 	pwp->max_cmd = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MOIO);
174 	pwp->max_dev = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO0) >> 16;
175 
176 	pwp->max_iq = PMCS_MNIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
177 	pwp->max_oq = PMCS_MNOQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
178 	pwp->nphy = PMCS_NPHY(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
179 	if (pwp->max_iq <= PMCS_NIQ) {
180 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
181 		    "%s: not enough Inbound Queues supported "
182 		    "(need %d, max_oq=%d)", __func__, pwp->max_iq, PMCS_NIQ);
183 		return (-1);
184 	}
185 	if (pwp->max_oq <= PMCS_NOQ) {
186 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
187 		    "%s: not enough Outbound Queues supported "
188 		    "(need %d, max_oq=%d)", __func__, pwp->max_oq, PMCS_NOQ);
189 		return (-1);
190 	}
191 	if (pwp->nphy == 0) {
192 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
193 		    "%s: zero phys reported", __func__);
194 		return (-1);
195 	}
196 	if (PMCS_HPIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1))) {
197 		pwp->hipri_queue = (1 << PMCS_IQ_OTHER);
198 	}
199 
200 
201 	for (i = 0; i < pwp->nphy; i++) {
202 		PMCS_MPI_EVQSET(pwp, PMCS_OQ_EVENTS, i);
203 		PMCS_MPI_NCQSET(pwp, PMCS_OQ_EVENTS, i);
204 	}
205 
206 	pmcs_wr_mpi_tbl(pwp, PMCS_MPI_INFO2,
207 	    (PMCS_OQ_EVENTS << GENERAL_EVENT_OQ_SHIFT) |
208 	    (PMCS_OQ_EVENTS << DEVICE_HANDLE_REMOVED_SHIFT));
209 
210 	/*
211 	 * Verify that ioq_depth is valid (> 0 and not so high that it
212 	 * would cause us to overrun the chip with commands).
213 	 */
214 	if (pwp->ioq_depth == 0) {
215 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
216 		    "%s: I/O queue depth set to 0. Setting to %d",
217 		    __func__, PMCS_NQENTRY);
218 		pwp->ioq_depth = PMCS_NQENTRY;
219 	}
220 
221 	if (pwp->ioq_depth < PMCS_MIN_NQENTRY) {
222 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
223 		    "%s: I/O queue depth set too low (%d). Setting to %d",
224 		    __func__, pwp->ioq_depth, PMCS_MIN_NQENTRY);
225 		pwp->ioq_depth = PMCS_MIN_NQENTRY;
226 	}
227 
228 	if (pwp->ioq_depth > (pwp->max_cmd / (PMCS_IO_IQ_MASK + 1))) {
229 		new_ioq_depth = pwp->max_cmd / (PMCS_IO_IQ_MASK + 1);
230 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
231 		    "%s: I/O queue depth set too high (%d). Setting to %d",
232 		    __func__, pwp->ioq_depth, new_ioq_depth);
233 		pwp->ioq_depth = new_ioq_depth;
234 	}
235 
236 	/*
237 	 * Allocate consistent memory for OQs and IQs.
238 	 */
239 	pwp->iqp_dma_attr = pwp->oqp_dma_attr = pmcs_dattr;
240 	pwp->iqp_dma_attr.dma_attr_align =
241 	    pwp->oqp_dma_attr.dma_attr_align = PMCS_QENTRY_SIZE;
242 
243 	/*
244 	 * The Rev C chip has the ability to do PIO to or from consistent
245 	 * memory anywhere in a 64 bit address space, but the firmware is
246 	 * not presently set up to do so.
247 	 */
248 	pwp->iqp_dma_attr.dma_attr_addr_hi =
249 	    pwp->oqp_dma_attr.dma_attr_addr_hi = 0x000000FFFFFFFFFFull;
250 
251 	for (i = 0; i < PMCS_NIQ; i++) {
252 		if (pmcs_dma_setup(pwp, &pwp->iqp_dma_attr,
253 		    &pwp->iqp_acchdls[i],
254 		    &pwp->iqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth,
255 		    (caddr_t *)&pwp->iqp[i], &pwp->iqaddr[i]) == B_FALSE) {
256 			pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
257 			    "Failed to setup DMA for iqp[%d]", i);
258 			return (-1);
259 		}
260 		bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
261 	}
262 
263 	for (i = 0; i < PMCS_NOQ; i++) {
264 		if (pmcs_dma_setup(pwp, &pwp->oqp_dma_attr,
265 		    &pwp->oqp_acchdls[i],
266 		    &pwp->oqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth,
267 		    (caddr_t *)&pwp->oqp[i], &pwp->oqaddr[i]) == B_FALSE) {
268 			pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
269 			    "Failed to setup DMA for oqp[%d]", i);
270 			return (-1);
271 		}
272 		bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
273 	}
274 
275 	/*
276 	 * Install the IQ and OQ addresses (and null out the rest).
277 	 */
278 	for (i = 0; i < pwp->max_iq; i++) {
279 		pwp->iqpi_offset[i] = pmcs_rd_iqc_tbl(pwp, PMCS_IQPIOFFX(i));
280 		if (i < PMCS_NIQ) {
281 			if (i != PMCS_IQ_OTHER) {
282 				pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i),
283 				    pwp->ioq_depth | (PMCS_QENTRY_SIZE << 16));
284 			} else {
285 				pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i),
286 				    (1 << 30) | pwp->ioq_depth |
287 				    (PMCS_QENTRY_SIZE << 16));
288 			}
289 			pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i),
290 			    DWORD1(pwp->iqaddr[i]));
291 			pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i),
292 			    DWORD0(pwp->iqaddr[i]));
293 			pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i),
294 			    DWORD1(pwp->ciaddr+IQ_OFFSET(i)));
295 			pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i),
296 			    DWORD0(pwp->ciaddr+IQ_OFFSET(i)));
297 		} else {
298 			pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0);
299 			pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0);
300 			pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0);
301 			pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0);
302 			pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0);
303 		}
304 	}
305 
306 	for (i = 0; i < pwp->max_oq; i++) {
307 		pwp->oqci_offset[i] = pmcs_rd_oqc_tbl(pwp, PMCS_OQCIOFFX(i));
308 		if (i < PMCS_NOQ) {
309 			pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), pwp->ioq_depth |
310 			    (PMCS_QENTRY_SIZE << 16) | OQIEX);
311 			pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i),
312 			    DWORD1(pwp->oqaddr[i]));
313 			pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i),
314 			    DWORD0(pwp->oqaddr[i]));
315 			pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i),
316 			    DWORD1(pwp->ciaddr+OQ_OFFSET(i)));
317 			pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i),
318 			    DWORD0(pwp->ciaddr+OQ_OFFSET(i)));
319 			pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i),
320 			    pwp->oqvec[i] << 24);
321 			pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
322 		} else {
323 			pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0);
324 			pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0);
325 			pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0);
326 			pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0);
327 			pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0);
328 			pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0);
329 			pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
330 		}
331 	}
332 
333 	/*
334 	 * Set up logging, if defined.
335 	 */
336 	if (pwp->fwlog) {
337 		uint64_t logdma = pwp->fwaddr;
338 		pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAH, DWORD1(logdma));
339 		pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAL, DWORD0(logdma));
340 		pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBS, PMCS_FWLOG_SIZE >> 1);
341 		pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELSEV, pwp->fwlog);
342 		logdma += (PMCS_FWLOG_SIZE >> 1);
343 		pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAH, DWORD1(logdma));
344 		pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAL, DWORD0(logdma));
345 		pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBS, PMCS_FWLOG_SIZE >> 1);
346 		pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELSEV, pwp->fwlog);
347 	}
348 
349 	/*
350 	 * Interrupt vectors, outbound queues, and odb_auto_clear
351 	 *
352 	 * MSI/MSI-X:
353 	 * If we got 4 interrupt vectors, we'll assign one to each outbound
354 	 * queue as well as the fatal interrupt, and auto clear can be set
355 	 * for each.
356 	 *
357 	 * If we only got 2 vectors, one will be used for I/O completions
358 	 * and the other for the other two vectors.  In this case, auto_
359 	 * clear can only be set for I/Os, which is fine.  The fatal
360 	 * interrupt will be mapped to the PMCS_FATAL_INTERRUPT bit, which
361 	 * is not an interrupt vector.
362 	 *
363 	 * MSI/MSI-X/INT-X:
364 	 * If we only got 1 interrupt vector, auto_clear must be set to 0,
365 	 * and again the fatal interrupt will be mapped to the
366 	 * PMCS_FATAL_INTERRUPT bit (again, not an interrupt vector).
367 	 */
368 
369 	switch (pwp->int_type) {
370 	case PMCS_INT_MSIX:
371 	case PMCS_INT_MSI:
372 		switch (pwp->intr_cnt) {
373 		case 1:
374 			pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
375 			    (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
376 			pwp->odb_auto_clear = 0;
377 			break;
378 		case 2:
379 			pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
380 			    (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
381 			pwp->odb_auto_clear = (1 << PMCS_FATAL_INTERRUPT) |
382 			    (1 << PMCS_MSIX_IODONE);
383 			break;
384 		case 4:
385 			pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
386 			    (PMCS_MSIX_FATAL << PMCS_FERIV_SHIFT));
387 			pwp->odb_auto_clear = (1 << PMCS_MSIX_FATAL) |
388 			    (1 << PMCS_MSIX_GENERAL) | (1 << PMCS_MSIX_IODONE) |
389 			    (1 << PMCS_MSIX_EVENTS);
390 			break;
391 		}
392 		break;
393 
394 	case PMCS_INT_FIXED:
395 		pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR,
396 		    PMCS_FERRIE | (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
397 		pwp->odb_auto_clear = 0;
398 		break;
399 	}
400 
401 	/*
402 	 * Enable Interrupt Reassertion
403 	 * Default Delay 1000us
404 	 */
405 	ferr = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FERR);
406 	if ((ferr & PMCS_MPI_IRAE) == 0) {
407 		ferr &= ~(PMCS_MPI_IRAU | PMCS_MPI_IRAD_MASK);
408 		pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, ferr | PMCS_MPI_IRAE);
409 	}
410 
411 	pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, pwp->odb_auto_clear);
412 	pwp->mpi_table_setup = 1;
413 	return (0);
414 }
415 
416 /*
417  * Start the Message Passing protocol with the PMC chip.
418  */
419 int
420 pmcs_start_mpi(pmcs_hw_t *pwp)
421 {
422 	int i;
423 
424 	pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPIINI);
425 	for (i = 0; i < 1000; i++) {
426 		if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) &
427 		    PMCS_MSGU_IBDB_MPIINI) == 0) {
428 			break;
429 		}
430 		drv_usecwait(1000);
431 	}
432 	if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPIINI) {
433 		return (-1);
434 	}
435 	drv_usecwait(500000);
436 
437 	/*
438 	 * Check to make sure we got to INIT state.
439 	 */
440 	if (PMCS_MPI_S(pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE)) !=
441 	    PMCS_MPI_STATE_INIT) {
442 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
443 		    "%s: MPI launch failed (GST 0x%x DBCLR 0x%x)", __func__,
444 		    pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE),
445 		    pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB_CLEAR));
446 		return (-1);
447 	}
448 	return (0);
449 }
450 
451 /*
452  * Stop the Message Passing protocol with the PMC chip.
453  */
454 int
455 pmcs_stop_mpi(pmcs_hw_t *pwp)
456 {
457 	int i;
458 
459 	for (i = 0; i < pwp->max_iq; i++) {
460 		pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0);
461 		pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0);
462 		pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0);
463 		pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0);
464 		pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0);
465 	}
466 	for (i = 0; i < pwp->max_oq; i++) {
467 		pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0);
468 		pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0);
469 		pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0);
470 		pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0);
471 		pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0);
472 		pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0);
473 		pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
474 	}
475 	pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 0);
476 	pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPICTU);
477 	for (i = 0; i < 2000; i++) {
478 		if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) &
479 		    PMCS_MSGU_IBDB_MPICTU) == 0) {
480 			break;
481 		}
482 		drv_usecwait(1000);
483 	}
484 	if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPICTU) {
485 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
486 		    "%s: MPI stop failed", __func__);
487 		return (-1);
488 	}
489 	return (0);
490 }
491 
492 /*
493  * Do a sequence of ECHO messages to test for MPI functionality,
494  * all inbound and outbound queue functionality and interrupts.
495  */
496 int
497 pmcs_echo_test(pmcs_hw_t *pwp)
498 {
499 	echo_test_t fred;
500 	struct pmcwork *pwrk;
501 	uint32_t *msg, count;
502 	int iqe = 0, iqo = 0, result, rval = 0;
503 	int iterations;
504 	hrtime_t echo_start, echo_end, echo_total;
505 
506 	ASSERT(pwp->max_cmd > 0);
507 
508 	/*
509 	 * We want iterations to be max_cmd * 3 to ensure that we run the
510 	 * echo test enough times to iterate through every inbound queue
511 	 * at least twice.
512 	 */
513 	iterations = pwp->max_cmd * 3;
514 
515 	echo_total = 0;
516 	count = 0;
517 
518 	while (count < iterations) {
519 		pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
520 		if (pwrk == NULL) {
521 			pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
522 			    pmcs_nowrk, __func__);
523 			rval = -1;
524 			break;
525 		}
526 
527 		mutex_enter(&pwp->iqp_lock[iqe]);
528 		msg = GET_IQ_ENTRY(pwp, iqe);
529 		if (msg == NULL) {
530 			mutex_exit(&pwp->iqp_lock[iqe]);
531 			pmcs_pwork(pwp, pwrk);
532 			pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
533 			    pmcs_nomsg, __func__);
534 			rval = -1;
535 			break;
536 		}
537 
538 		bzero(msg, PMCS_QENTRY_SIZE);
539 
540 		if (iqe == PMCS_IQ_OTHER) {
541 			/* This is on the high priority queue */
542 			msg[0] = LE_32(PMCS_HIPRI(pwp, iqo, PMCIN_ECHO));
543 		} else {
544 			msg[0] = LE_32(PMCS_IOMB_IN_SAS(iqo, PMCIN_ECHO));
545 		}
546 		msg[1] = LE_32(pwrk->htag);
547 		fred.signature = 0xdeadbeef;
548 		fred.count = count;
549 		fred.ptr = &count;
550 		(void) memcpy(&msg[2], &fred, sizeof (fred));
551 		pwrk->state = PMCS_WORK_STATE_ONCHIP;
552 
553 		INC_IQ_ENTRY(pwp, iqe);
554 
555 		echo_start = gethrtime();
556 		DTRACE_PROBE2(pmcs__echo__test__wait__start,
557 		    hrtime_t, echo_start, uint32_t, pwrk->htag);
558 
559 		if (++iqe == PMCS_NIQ) {
560 			iqe = 0;
561 		}
562 		if (++iqo == PMCS_NOQ) {
563 			iqo = 0;
564 		}
565 
566 		WAIT_FOR(pwrk, 250, result);
567 
568 		echo_end = gethrtime();
569 		DTRACE_PROBE2(pmcs__echo__test__wait__end,
570 		    hrtime_t, echo_end, int, result);
571 
572 		echo_total += (echo_end - echo_start);
573 
574 		pmcs_pwork(pwp, pwrk);
575 		if (result) {
576 			pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
577 			    "%s: command timed out on echo test #%d",
578 			    __func__, count);
579 			rval = -1;
580 			break;
581 		}
582 	}
583 
584 	/*
585 	 * The intr_threshold is adjusted by PMCS_INTR_THRESHOLD in order to
586 	 * remove the overhead of things like the delay in getting signaled
587 	 * for completion.
588 	 */
589 	if (echo_total != 0) {
590 		pwp->io_intr_coal.intr_latency =
591 		    (echo_total / iterations) / 2;
592 		pwp->io_intr_coal.intr_threshold =
593 		    PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 /
594 		    pwp->io_intr_coal.intr_latency);
595 	}
596 
597 	return (rval);
598 }
599 
600 /*
601  * Start the (real) phys
602  */
603 int
604 pmcs_start_phy(pmcs_hw_t *pwp, int phynum, int linkmode, int speed)
605 {
606 	int result;
607 	uint32_t *msg;
608 	struct pmcwork *pwrk;
609 	pmcs_phy_t *pptr;
610 	sas_identify_af_t sap;
611 
612 	mutex_enter(&pwp->lock);
613 	pptr = pwp->root_phys + phynum;
614 	if (pptr == NULL) {
615 		mutex_exit(&pwp->lock);
616 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
617 		    "%s: cannot find port %d", __func__, phynum);
618 		return (0);
619 	}
620 
621 	pmcs_lock_phy(pptr);
622 	mutex_exit(&pwp->lock);
623 
624 	pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
625 	if (pwrk == NULL) {
626 		pmcs_unlock_phy(pptr);
627 		pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__);
628 		return (-1);
629 	}
630 
631 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
632 	msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
633 
634 	if (msg == NULL) {
635 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
636 		pmcs_unlock_phy(pptr);
637 		pmcs_pwork(pwp, pwrk);
638 		pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__);
639 		return (-1);
640 	}
641 	msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_START));
642 	msg[1] = LE_32(pwrk->htag);
643 	msg[2] = LE_32(linkmode | speed | phynum);
644 	bzero(&sap, sizeof (sap));
645 	sap.device_type = SAS_IF_DTYPE_ENDPOINT;
646 	sap.ssp_ini_port = 1;
647 
648 	if (pwp->separate_ports) {
649 		pmcs_wwn2barray(pwp->sas_wwns[phynum], sap.sas_address);
650 	} else {
651 		pmcs_wwn2barray(pwp->sas_wwns[0], sap.sas_address);
652 	}
653 
654 	ASSERT(phynum < SAS2_PHYNUM_MAX);
655 	sap.phy_identifier = phynum & SAS2_PHYNUM_MASK;
656 	(void) memcpy(&msg[3], &sap, sizeof (sas_identify_af_t));
657 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
658 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
659 
660 	pptr->state.prog_min_rate = (lowbit((ulong_t)speed) - 1);
661 	pptr->state.prog_max_rate = (highbit((ulong_t)speed) - 1);
662 	pptr->state.hw_min_rate = PMCS_HW_MIN_LINK_RATE;
663 	pptr->state.hw_max_rate = PMCS_HW_MAX_LINK_RATE;
664 
665 	pmcs_unlock_phy(pptr);
666 	WAIT_FOR(pwrk, 1000, result);
667 	pmcs_pwork(pwp, pwrk);
668 
669 	if (result) {
670 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
671 	} else {
672 		mutex_enter(&pwp->lock);
673 		pwp->phys_started |= (1 << phynum);
674 		mutex_exit(&pwp->lock);
675 	}
676 
677 	return (0);
678 }
679 
680 int
681 pmcs_start_phys(pmcs_hw_t *pwp)
682 {
683 	int i;
684 
685 	for (i = 0; i < pwp->nphy; i++) {
686 		if ((pwp->phyid_block_mask & (1 << i)) == 0) {
687 			if (pmcs_start_phy(pwp, i,
688 			    (pwp->phymode << PHY_MODE_SHIFT),
689 			    pwp->physpeed << PHY_LINK_SHIFT)) {
690 				return (-1);
691 			}
692 			if (pmcs_clear_diag_counters(pwp, i)) {
693 				pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
694 				    "%s: failed to reset counters on PHY (%d)",
695 				    __func__, i);
696 			}
697 		}
698 	}
699 	return (0);
700 }
701 
702 /*
703  * Called with PHY locked
704  */
705 int
706 pmcs_reset_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t type)
707 {
708 	uint32_t *msg;
709 	uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
710 	const char *mbar;
711 	uint32_t amt;
712 	uint32_t pdevid;
713 	uint32_t stsoff;
714 	uint32_t status;
715 	int result, level, phynum;
716 	struct pmcwork *pwrk;
717 	uint32_t htag;
718 
719 	ASSERT(mutex_owned(&pptr->phy_lock));
720 
721 	bzero(iomb, PMCS_QENTRY_SIZE);
722 	phynum = pptr->phynum;
723 	level = pptr->level;
724 	if (level > 0) {
725 		pdevid = pptr->parent->device_id;
726 	} else if ((level == 0) && (pptr->dtype == EXPANDER)) {
727 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target,
728 		    "%s: Not resetting HBA PHY @ %s", __func__, pptr->path);
729 		return (0);
730 	}
731 
732 	if (!pptr->iport || !pptr->valid_device_id) {
733 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target,
734 		    "%s: Can't reach PHY %s", __func__, pptr->path);
735 		return (0);
736 	}
737 
738 	pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
739 
740 	if (pwrk == NULL) {
741 		pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__);
742 		return (ENOMEM);
743 	}
744 
745 	pwrk->arg = iomb;
746 
747 	/*
748 	 * If level > 0, we need to issue an SMP_REQUEST with a PHY_CONTROL
749 	 * function to do either a link reset or hard reset.  If level == 0,
750 	 * then we do a LOCAL_PHY_CONTROL IOMB to do link/hard reset to the
751 	 * root (local) PHY
752 	 */
753 	if (level) {
754 		stsoff = 2;
755 		iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
756 		    PMCIN_SMP_REQUEST));
757 		iomb[1] = LE_32(pwrk->htag);
758 		iomb[2] = LE_32(pdevid);
759 		iomb[3] = LE_32(40 << SMP_REQUEST_LENGTH_SHIFT);
760 		/*
761 		 * Send SMP PHY CONTROL/HARD or LINK RESET
762 		 */
763 		iomb[4] = BE_32(0x40910000);
764 		iomb[5] = 0;
765 
766 		if (type == PMCS_PHYOP_HARD_RESET) {
767 			mbar = "SMP PHY CONTROL/HARD RESET";
768 			iomb[6] = BE_32((phynum << 24) |
769 			    (PMCS_PHYOP_HARD_RESET << 16));
770 		} else {
771 			mbar = "SMP PHY CONTROL/LINK RESET";
772 			iomb[6] = BE_32((phynum << 24) |
773 			    (PMCS_PHYOP_LINK_RESET << 16));
774 		}
775 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
776 		    "%s: sending %s to %s for phy 0x%x",
777 		    __func__, mbar, pptr->parent->path, pptr->phynum);
778 		amt = 7;
779 	} else {
780 		/*
781 		 * Unlike most other Outbound messages, status for
782 		 * a local phy operation is in DWORD 3.
783 		 */
784 		stsoff = 3;
785 		iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
786 		    PMCIN_LOCAL_PHY_CONTROL));
787 		iomb[1] = LE_32(pwrk->htag);
788 		if (type == PMCS_PHYOP_LINK_RESET) {
789 			mbar = "LOCAL PHY LINK RESET";
790 			iomb[2] = LE_32((PMCS_PHYOP_LINK_RESET << 8) | phynum);
791 		} else {
792 			mbar = "LOCAL PHY HARD RESET";
793 			iomb[2] = LE_32((PMCS_PHYOP_HARD_RESET << 8) | phynum);
794 		}
795 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
796 		    "%s: sending %s to %s", __func__, mbar, pptr->path);
797 		amt = 3;
798 	}
799 
800 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
801 	msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
802 	if (msg == NULL) {
803 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
804 		pmcs_pwork(pwp, pwrk);
805 		pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__);
806 		return (ENOMEM);
807 	}
808 	COPY_MESSAGE(msg, iomb, amt);
809 	htag = pwrk->htag;
810 
811 	/* SMP serialization */
812 	pmcs_smp_acquire(pptr->iport);
813 
814 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
815 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
816 
817 	pmcs_unlock_phy(pptr);
818 	WAIT_FOR(pwrk, 1000, result);
819 	pmcs_pwork(pwp, pwrk);
820 	/* Release SMP lock before reacquiring PHY lock */
821 	pmcs_smp_release(pptr->iport);
822 	pmcs_lock_phy(pptr);
823 
824 	if (result) {
825 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
826 
827 		if (pmcs_abort(pwp, pptr, htag, 0, 0)) {
828 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
829 			    "%s: Unable to issue SMP abort for htag 0x%08x",
830 			    __func__, htag);
831 		} else {
832 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
833 			    "%s: Issuing SMP ABORT for htag 0x%08x",
834 			    __func__, htag);
835 		}
836 		return (EIO);
837 	}
838 	status = LE_32(iomb[stsoff]);
839 
840 	if (status != PMCOUT_STATUS_OK) {
841 		char buf[32];
842 		const char *es =  pmcs_status_str(status);
843 		if (es == NULL) {
844 			(void) snprintf(buf, sizeof (buf), "Status 0x%x",
845 			    status);
846 			es = buf;
847 		}
848 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
849 		    "%s: %s action returned %s for %s", __func__, mbar, es,
850 		    pptr->path);
851 		return (status);
852 	}
853 
854 	return (0);
855 }
856 
857 /*
858  * Stop the (real) phys.  No PHY or softstate locks are required as this only
859  * happens during detach.
860  */
861 void
862 pmcs_stop_phy(pmcs_hw_t *pwp, int phynum)
863 {
864 	int result;
865 	pmcs_phy_t *pptr;
866 	uint32_t *msg;
867 	struct pmcwork *pwrk;
868 
869 	pptr =  pwp->root_phys + phynum;
870 	if (pptr == NULL) {
871 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
872 		    "%s: unable to find port %d", __func__, phynum);
873 		return;
874 	}
875 
876 	if (pwp->phys_started & (1 << phynum)) {
877 		pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
878 
879 		if (pwrk == NULL) {
880 			pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL,
881 			    pmcs_nowrk, __func__);
882 			return;
883 		}
884 
885 		mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
886 		msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
887 
888 		if (msg == NULL) {
889 			mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
890 			pmcs_pwork(pwp, pwrk);
891 			pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL,
892 			    pmcs_nomsg, __func__);
893 			return;
894 		}
895 
896 		msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_STOP));
897 		msg[1] = LE_32(pwrk->htag);
898 		msg[2] = LE_32(phynum);
899 		pwrk->state = PMCS_WORK_STATE_ONCHIP;
900 		/*
901 		 * Make this unconfigured now.
902 		 */
903 		INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
904 		WAIT_FOR(pwrk, 1000, result);
905 
906 		pmcs_pwork(pwp, pwrk);
907 		if (result) {
908 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
909 			    pptr, NULL, pmcs_timeo, __func__);
910 		}
911 
912 		pwp->phys_started &= ~(1 << phynum);
913 	}
914 
915 	pptr->configured = 0;
916 }
917 
918 /*
919  * No locks should be required as this is only called during detach
920  */
921 void
922 pmcs_stop_phys(pmcs_hw_t *pwp)
923 {
924 	int i;
925 	for (i = 0; i < pwp->nphy; i++) {
926 		if ((pwp->phyid_block_mask & (1 << i)) == 0) {
927 			pmcs_stop_phy(pwp, i);
928 		}
929 	}
930 }
931 
932 /*
933  * Run SAS_DIAG_EXECUTE with cmd and cmd_desc passed.
934  * 	ERR_CNT_RESET: return status of cmd
935  *	DIAG_REPORT_GET: return value of the counter
936  */
937 int
938 pmcs_sas_diag_execute(pmcs_hw_t *pwp, uint32_t cmd, uint32_t cmd_desc,
939     uint8_t phynum)
940 {
941 	uint32_t htag, *ptr, status, msg[PMCS_MSG_SIZE << 1];
942 	int result;
943 	struct pmcwork *pwrk;
944 
945 	pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
946 	if (pwrk == NULL) {
947 		pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__);
948 		return (DDI_FAILURE);
949 	}
950 	pwrk->arg = msg;
951 	htag = pwrk->htag;
952 	msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_SAS_DIAG_EXECUTE));
953 	msg[1] = LE_32(htag);
954 	msg[2] = LE_32((cmd << PMCS_DIAG_CMD_SHIFT) |
955 	    (cmd_desc << PMCS_DIAG_CMD_DESC_SHIFT) | phynum);
956 
957 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
958 	ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
959 	if (ptr == NULL) {
960 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
961 		pmcs_pwork(pwp, pwrk);
962 		pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__);
963 		return (DDI_FAILURE);
964 	}
965 	COPY_MESSAGE(ptr, msg, 3);
966 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
967 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
968 
969 	WAIT_FOR(pwrk, 1000, result);
970 
971 	pmcs_pwork(pwp, pwrk);
972 
973 	if (result) {
974 		pmcs_timed_out(pwp, htag, __func__);
975 		return (DDI_FAILURE);
976 	}
977 
978 	status = LE_32(msg[3]);
979 
980 	/* Return for counter reset */
981 	if (cmd == PMCS_ERR_CNT_RESET)
982 		return (status);
983 
984 	/* Return for counter value */
985 	if (status) {
986 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
987 		    "%s: failed, status (0x%x)", __func__, status);
988 		return (DDI_FAILURE);
989 	}
990 	return (LE_32(msg[4]));
991 }
992 
993 /* Get the current value of the counter for desc on phynum and return it. */
994 int
995 pmcs_get_diag_report(pmcs_hw_t *pwp, uint32_t desc, uint8_t phynum)
996 {
997 	return (pmcs_sas_diag_execute(pwp, PMCS_DIAG_REPORT_GET, desc, phynum));
998 }
999 
1000 /* Clear all of the counters for phynum. Returns the status of the command. */
1001 int
1002 pmcs_clear_diag_counters(pmcs_hw_t *pwp, uint8_t phynum)
1003 {
1004 	uint32_t	cmd = PMCS_ERR_CNT_RESET;
1005 	uint32_t	cmd_desc;
1006 
1007 	cmd_desc = PMCS_INVALID_DWORD_CNT;
1008 	if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
1009 		return (DDI_FAILURE);
1010 
1011 	cmd_desc = PMCS_DISPARITY_ERR_CNT;
1012 	if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
1013 		return (DDI_FAILURE);
1014 
1015 	cmd_desc = PMCS_LOST_DWORD_SYNC_CNT;
1016 	if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
1017 		return (DDI_FAILURE);
1018 
1019 	cmd_desc = PMCS_RESET_FAILED_CNT;
1020 	if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
1021 		return (DDI_FAILURE);
1022 
1023 	return (DDI_SUCCESS);
1024 }
1025 
1026 /*
1027  * Get firmware timestamp
1028  */
1029 int
1030 pmcs_get_time_stamp(pmcs_hw_t *pwp, uint64_t *ts)
1031 {
1032 	uint32_t htag, *ptr, msg[PMCS_MSG_SIZE << 1];
1033 	int result;
1034 	struct pmcwork *pwrk;
1035 
1036 	pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
1037 	if (pwrk == NULL) {
1038 		pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__);
1039 		return (-1);
1040 	}
1041 	pwrk->arg = msg;
1042 	htag = pwrk->htag;
1043 	msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_GET_TIME_STAMP));
1044 	msg[1] = LE_32(pwrk->htag);
1045 
1046 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1047 	ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1048 	if (ptr == NULL) {
1049 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1050 		pmcs_pwork(pwp, pwrk);
1051 		pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__);
1052 		return (-1);
1053 	}
1054 	COPY_MESSAGE(ptr, msg, 2);
1055 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
1056 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1057 
1058 	WAIT_FOR(pwrk, 1000, result);
1059 
1060 	pmcs_pwork(pwp, pwrk);
1061 
1062 	if (result) {
1063 		pmcs_timed_out(pwp, htag, __func__);
1064 		return (-1);
1065 	}
1066 	*ts = LE_32(msg[2]) | (((uint64_t)LE_32(msg[3])) << 32);
1067 	return (0);
1068 }
1069 
1070 /*
1071  * Dump all pertinent registers
1072  */
1073 
1074 void
1075 pmcs_register_dump(pmcs_hw_t *pwp)
1076 {
1077 	int i;
1078 	uint32_t val;
1079 
1080 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump start",
1081 	    ddi_get_instance(pwp->dip));
1082 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
1083 	    "OBDB (intr): 0x%08x (mask): 0x%08x (clear): 0x%08x",
1084 	    pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB),
1085 	    pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_MASK),
1086 	    pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR));
1087 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH0: 0x%08x",
1088 	    pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0));
1089 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH1: 0x%08x",
1090 	    pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1));
1091 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH2: 0x%08x",
1092 	    pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2));
1093 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH3: 0x%08x",
1094 	    pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH3));
1095 	for (i = 0; i < PMCS_NIQ; i++) {
1096 		pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "IQ %d: CI %u PI %u",
1097 		    i, pmcs_rd_iqci(pwp, i), pmcs_rd_iqpi(pwp, i));
1098 	}
1099 	for (i = 0; i < PMCS_NOQ; i++) {
1100 		pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "OQ %d: CI %u PI %u",
1101 		    i, pmcs_rd_oqci(pwp, i), pmcs_rd_oqpi(pwp, i));
1102 	}
1103 	val = pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE);
1104 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
1105 	    "GST TABLE BASE: 0x%08x (STATE=0x%x QF=%d GSTLEN=%d HMI_ERR=0x%x)",
1106 	    val, PMCS_MPI_S(val), PMCS_QF(val), PMCS_GSTLEN(val) * 4,
1107 	    PMCS_HMI_ERR(val));
1108 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ0: 0x%08x",
1109 	    pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ0));
1110 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ1: 0x%08x",
1111 	    pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ1));
1112 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE MSGU TICK: 0x%08x",
1113 	    pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK));
1114 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IOP TICK: 0x%08x",
1115 	    pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK));
1116 	for (i = 0; i < pwp->nphy; i++) {
1117 		uint32_t rerrf, pinfo, started = 0, link = 0;
1118 		pinfo = pmcs_rd_gst_tbl(pwp, PMCS_GST_PHY_INFO(i));
1119 		if (pinfo & 1) {
1120 			started = 1;
1121 			link = pinfo & 2;
1122 		}
1123 		rerrf = pmcs_rd_gst_tbl(pwp, PMCS_GST_RERR_INFO(i));
1124 		pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
1125 		    "GST TABLE PHY%d STARTED=%d LINK=%d RERR=0x%08x",
1126 		    i, started, link, rerrf);
1127 	}
1128 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump end",
1129 	    ddi_get_instance(pwp->dip));
1130 }
1131 
1132 /*
1133  * Handle SATA Abort and other error processing
1134  */
1135 int
1136 pmcs_abort_handler(pmcs_hw_t *pwp)
1137 {
1138 	pmcs_phy_t *pptr, *pnext, *pnext_uplevel[PMCS_MAX_XPND];
1139 	pmcs_xscsi_t *tgt;
1140 	int r, level = 0;
1141 
1142 	pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s", __func__);
1143 
1144 	mutex_enter(&pwp->lock);
1145 	pptr = pwp->root_phys;
1146 	mutex_exit(&pwp->lock);
1147 
1148 	while (pptr) {
1149 		/*
1150 		 * XXX: Need to make sure this doesn't happen
1151 		 * XXX: when non-NCQ commands are running.
1152 		 */
1153 		pmcs_lock_phy(pptr);
1154 		if (pptr->need_rl_ext) {
1155 			ASSERT(pptr->dtype == SATA);
1156 			if (pmcs_acquire_scratch(pwp, B_FALSE)) {
1157 				goto next_phy;
1158 			}
1159 			r = pmcs_sata_abort_ncq(pwp, pptr);
1160 			pmcs_release_scratch(pwp);
1161 			if (r == ENOMEM) {
1162 				goto next_phy;
1163 			}
1164 			if (r) {
1165 				r = pmcs_reset_phy(pwp, pptr,
1166 				    PMCS_PHYOP_LINK_RESET);
1167 				if (r == ENOMEM) {
1168 					goto next_phy;
1169 				}
1170 				/* what if other failures happened? */
1171 				pptr->abort_pending = 1;
1172 				pptr->abort_sent = 0;
1173 			}
1174 		}
1175 		if (pptr->abort_pending == 0 || pptr->abort_sent) {
1176 			goto next_phy;
1177 		}
1178 		pptr->abort_pending = 0;
1179 		if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) == ENOMEM) {
1180 			pptr->abort_pending = 1;
1181 			goto next_phy;
1182 		}
1183 		pptr->abort_sent = 1;
1184 
1185 		/*
1186 		 * If the iport is no longer active, flush the queues
1187 		 */
1188 		if ((pptr->iport == NULL) ||
1189 		    (pptr->iport->ua_state != UA_ACTIVE)) {
1190 			tgt = pptr->target;
1191 			if (tgt) {
1192 				pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt,
1193 				    "%s: Clearing target 0x%p, inactive iport",
1194 				    __func__, (void *) tgt);
1195 				mutex_enter(&tgt->statlock);
1196 				pmcs_clear_xp(pwp, tgt);
1197 				mutex_exit(&tgt->statlock);
1198 			}
1199 		}
1200 
1201 next_phy:
1202 		if (pptr->children) {
1203 			pnext = pptr->children;
1204 			pnext_uplevel[level++] = pptr->sibling;
1205 		} else {
1206 			pnext = pptr->sibling;
1207 			while ((pnext == NULL) && (level > 0)) {
1208 				pnext = pnext_uplevel[--level];
1209 			}
1210 		}
1211 
1212 		pmcs_unlock_phy(pptr);
1213 		pptr = pnext;
1214 	}
1215 
1216 	return (0);
1217 }
1218 
1219 /*
1220  * Register a device (get a device handle for it).
1221  * Called with PHY lock held.
1222  */
1223 int
1224 pmcs_register_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
1225 {
1226 	struct pmcwork *pwrk;
1227 	int result = 0;
1228 	uint32_t *msg;
1229 	uint32_t tmp, status;
1230 	uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
1231 
1232 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1233 	msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1234 
1235 	if (msg == NULL ||
1236 	    (pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) {
1237 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1238 		result = ENOMEM;
1239 		goto out;
1240 	}
1241 
1242 	pwrk->arg = iomb;
1243 	pwrk->dtype = pptr->dtype;
1244 
1245 	msg[1] = LE_32(pwrk->htag);
1246 	msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_REGISTER_DEVICE));
1247 	tmp = PMCS_DEVREG_TLR |
1248 	    (pptr->link_rate << PMCS_DEVREG_LINK_RATE_SHIFT);
1249 	if (IS_ROOT_PHY(pptr)) {
1250 		msg[2] = LE_32(pptr->portid |
1251 		    (pptr->phynum << PMCS_PHYID_SHIFT));
1252 	} else {
1253 		msg[2] = LE_32(pptr->portid);
1254 	}
1255 	if (pptr->dtype == SATA) {
1256 		if (IS_ROOT_PHY(pptr)) {
1257 			tmp |= PMCS_DEVREG_TYPE_SATA_DIRECT;
1258 		} else {
1259 			tmp |= PMCS_DEVREG_TYPE_SATA;
1260 		}
1261 	} else {
1262 		tmp |= PMCS_DEVREG_TYPE_SAS;
1263 	}
1264 	msg[3] = LE_32(tmp);
1265 	msg[4] = LE_32(PMCS_DEVREG_IT_NEXUS_TIMEOUT);
1266 	(void) memcpy(&msg[5], pptr->sas_address, 8);
1267 
1268 	CLEAN_MESSAGE(msg, 7);
1269 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
1270 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1271 
1272 	pmcs_unlock_phy(pptr);
1273 	WAIT_FOR(pwrk, 250, result);
1274 	pmcs_lock_phy(pptr);
1275 	pmcs_pwork(pwp, pwrk);
1276 
1277 	if (result) {
1278 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
1279 		result = ETIMEDOUT;
1280 		goto out;
1281 	}
1282 	status = LE_32(iomb[2]);
1283 	tmp = LE_32(iomb[3]);
1284 	switch (status) {
1285 	case PMCS_DEVREG_OK:
1286 	case PMCS_DEVREG_DEVICE_ALREADY_REGISTERED:
1287 	case PMCS_DEVREG_PHY_ALREADY_REGISTERED:
1288 		if (pmcs_validate_devid(pwp->root_phys, pptr, tmp) == B_FALSE) {
1289 			result = EEXIST;
1290 			goto out;
1291 		} else if (status != PMCS_DEVREG_OK) {
1292 			if (tmp == 0xffffffff) {	/* F/W bug */
1293 				pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL,
1294 				    "%s: phy %s already has bogus devid 0x%x",
1295 				    __func__, pptr->path, tmp);
1296 				result = EIO;
1297 				goto out;
1298 			} else {
1299 				pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL,
1300 				    "%s: phy %s already has a device id 0x%x",
1301 				    __func__, pptr->path, tmp);
1302 			}
1303 		}
1304 		break;
1305 	default:
1306 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
1307 		    "%s: status 0x%x when trying to register device %s",
1308 		    __func__, status, pptr->path);
1309 		result = EIO;
1310 		goto out;
1311 	}
1312 	pptr->device_id = tmp;
1313 	pptr->valid_device_id = 1;
1314 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Phy %s/" SAS_ADDR_FMT
1315 	    " registered with device_id 0x%x (portid %d)", pptr->path,
1316 	    SAS_ADDR_PRT(pptr->sas_address), tmp, pptr->portid);
1317 out:
1318 	return (result);
1319 }
1320 
1321 /*
1322  * Deregister a device (remove a device handle).
1323  * Called with PHY locked.
1324  */
1325 void
1326 pmcs_deregister_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
1327 {
1328 	struct pmcwork *pwrk;
1329 	uint32_t msg[PMCS_MSG_SIZE], *ptr, status;
1330 	uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
1331 	int result;
1332 
1333 	pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
1334 	if (pwrk == NULL) {
1335 		return;
1336 	}
1337 
1338 	pwrk->arg = iomb;
1339 	pwrk->dtype = pptr->dtype;
1340 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1341 	ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1342 	if (ptr == NULL) {
1343 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1344 		pmcs_pwork(pwp, pwrk);
1345 		return;
1346 	}
1347 	msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
1348 	    PMCIN_DEREGISTER_DEVICE_HANDLE));
1349 	msg[1] = LE_32(pwrk->htag);
1350 	msg[2] = LE_32(pptr->device_id);
1351 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
1352 	COPY_MESSAGE(ptr, msg, 3);
1353 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1354 
1355 	pmcs_unlock_phy(pptr);
1356 	WAIT_FOR(pwrk, 250, result);
1357 	pmcs_pwork(pwp, pwrk);
1358 	pmcs_lock_phy(pptr);
1359 
1360 	if (result) {
1361 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
1362 		return;
1363 	}
1364 	status = LE_32(iomb[2]);
1365 	if (status != PMCOUT_STATUS_OK) {
1366 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
1367 		    "%s: status 0x%x when trying to deregister device %s",
1368 		    __func__, status, pptr->path);
1369 	} else {
1370 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
1371 		    "%s: device %s deregistered", __func__, pptr->path);
1372 		pptr->valid_device_id = 0;
1373 		pptr->device_id = PMCS_INVALID_DEVICE_ID;
1374 		pptr->configured = 0;
1375 		pptr->deregister_wait = 0;
1376 	}
1377 }
1378 
1379 /*
1380  * Deregister all registered devices.
1381  */
1382 void
1383 pmcs_deregister_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
1384 {
1385 	/*
1386 	 * Start at the maximum level and walk back to level 0.  This only
1387 	 * gets done during detach after all threads and timers have been
1388 	 * destroyed, so there's no need to hold the softstate or PHY lock.
1389 	 */
1390 	while (phyp) {
1391 		if (phyp->children) {
1392 			pmcs_deregister_devices(pwp, phyp->children);
1393 		}
1394 		if (phyp->valid_device_id) {
1395 			pmcs_deregister_device(pwp, phyp);
1396 		}
1397 		phyp = phyp->sibling;
1398 	}
1399 }
1400 
1401 /*
1402  * Perform a 'soft' reset on the PMC chip
1403  */
1404 int
1405 pmcs_soft_reset(pmcs_hw_t *pwp, boolean_t no_restart)
1406 {
1407 	uint32_t s2, sfrbits, gsm, rapchk, wapchk, wdpchk, spc, tsmode;
1408 	pmcs_phy_t *pptr;
1409 	char *msg = NULL;
1410 	int i;
1411 
1412 	/*
1413 	 * Disable interrupts
1414 	 */
1415 	pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
1416 	pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
1417 
1418 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%s", __func__);
1419 
1420 	if (pwp->locks_initted) {
1421 		mutex_enter(&pwp->lock);
1422 	}
1423 	pwp->blocked = 1;
1424 
1425 	/*
1426 	 * Clear our softstate copies of the MSGU and IOP heartbeats.
1427 	 */
1428 	pwp->last_msgu_tick = pwp->last_iop_tick = 0;
1429 
1430 	/*
1431 	 * Step 1
1432 	 */
1433 	s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2);
1434 	if ((s2 & PMCS_MSGU_HOST_SOFT_RESET_READY) == 0) {
1435 		pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE);
1436 		pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE);
1437 		for (i = 0; i < 100; i++) {
1438 			s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
1439 			    PMCS_MSGU_HOST_SOFT_RESET_READY;
1440 			if (s2) {
1441 				break;
1442 			}
1443 			drv_usecwait(10000);
1444 		}
1445 		s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
1446 		    PMCS_MSGU_HOST_SOFT_RESET_READY;
1447 		if (s2 == 0) {
1448 			pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1449 			    "%s: PMCS_MSGU_HOST_SOFT_RESET_READY never came "
1450 			    "ready", __func__);
1451 			pmcs_register_dump(pwp);
1452 			if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
1453 			    PMCS_MSGU_CPU_SOFT_RESET_READY) == 0 ||
1454 			    (pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
1455 			    PMCS_MSGU_CPU_SOFT_RESET_READY) == 0) {
1456 				pwp->state = STATE_DEAD;
1457 				pwp->blocked = 0;
1458 				if (pwp->locks_initted) {
1459 					mutex_exit(&pwp->lock);
1460 				}
1461 				return (-1);
1462 			}
1463 		}
1464 	}
1465 
1466 	/*
1467 	 * Step 2
1468 	 */
1469 	pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_IOP, 0);
1470 	drv_usecwait(10);
1471 	pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_AAP1, 0);
1472 	drv_usecwait(10);
1473 	pmcs_wr_topunit(pwp, PMCS_EVENT_INT_ENABLE, 0);
1474 	drv_usecwait(10);
1475 	pmcs_wr_topunit(pwp, PMCS_EVENT_INT_STAT,
1476 	    pmcs_rd_topunit(pwp, PMCS_EVENT_INT_STAT));
1477 	drv_usecwait(10);
1478 	pmcs_wr_topunit(pwp, PMCS_ERROR_INT_ENABLE, 0);
1479 	drv_usecwait(10);
1480 	pmcs_wr_topunit(pwp, PMCS_ERROR_INT_STAT,
1481 	    pmcs_rd_topunit(pwp, PMCS_ERROR_INT_STAT));
1482 	drv_usecwait(10);
1483 
1484 	sfrbits = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
1485 	    PMCS_MSGU_AAP_SFR_PROGRESS;
1486 	sfrbits ^= PMCS_MSGU_AAP_SFR_PROGRESS;
1487 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "PMCS_MSGU_HOST_SCRATCH0 "
1488 	    "%08x -> %08x", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0),
1489 	    HST_SFT_RESET_SIG);
1490 	pmcs_wr_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0, HST_SFT_RESET_SIG);
1491 
1492 	/*
1493 	 * Step 3
1494 	 */
1495 	gsm = pmcs_rd_gsm_reg(pwp, GSM_CFG_AND_RESET);
1496 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm,
1497 	    gsm & ~PMCS_SOFT_RESET_BITS);
1498 	pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm & ~PMCS_SOFT_RESET_BITS);
1499 
1500 	/*
1501 	 * Step 4
1502 	 */
1503 	rapchk = pmcs_rd_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN);
1504 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN "
1505 	    "%08x -> %08x", rapchk, 0);
1506 	pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, 0);
1507 	wapchk = pmcs_rd_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN);
1508 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN "
1509 	    "%08x -> %08x", wapchk, 0);
1510 	pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, 0);
1511 	wdpchk = pmcs_rd_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN);
1512 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN "
1513 	    "%08x -> %08x", wdpchk, 0);
1514 	pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, 0);
1515 
1516 	/*
1517 	 * Step 5
1518 	 */
1519 	drv_usecwait(100);
1520 
1521 	/*
1522 	 * Step 5.5 (Temporary workaround for 1.07.xx Beta)
1523 	 */
1524 	tsmode = pmcs_rd_gsm_reg(pwp, PMCS_GPIO_TRISTATE_MODE_ADDR);
1525 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GPIO TSMODE %08x -> %08x",
1526 	    tsmode, tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1));
1527 	pmcs_wr_gsm_reg(pwp, PMCS_GPIO_TRISTATE_MODE_ADDR,
1528 	    tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1));
1529 	drv_usecwait(10);
1530 
1531 	/*
1532 	 * Step 6
1533 	 */
1534 	spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
1535 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
1536 	    spc, spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
1537 	pmcs_wr_topunit(pwp, PMCS_SPC_RESET,
1538 	    spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
1539 	drv_usecwait(10);
1540 
1541 	/*
1542 	 * Step 7
1543 	 */
1544 	spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
1545 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
1546 	    spc, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB));
1547 	pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB));
1548 
1549 	/*
1550 	 * Step 8
1551 	 */
1552 	drv_usecwait(100);
1553 
1554 	/*
1555 	 * Step 9
1556 	 */
1557 	spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
1558 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
1559 	    spc, spc | (BDMA_CORE_RSTB|OSSP_RSTB));
1560 	pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc | (BDMA_CORE_RSTB|OSSP_RSTB));
1561 
1562 	/*
1563 	 * Step 10
1564 	 */
1565 	drv_usecwait(100);
1566 
1567 	/*
1568 	 * Step 11
1569 	 */
1570 	gsm = pmcs_rd_gsm_reg(pwp, GSM_CFG_AND_RESET);
1571 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm,
1572 	    gsm | PMCS_SOFT_RESET_BITS);
1573 	pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm | PMCS_SOFT_RESET_BITS);
1574 	drv_usecwait(10);
1575 
1576 	/*
1577 	 * Step 12
1578 	 */
1579 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN "
1580 	    "%08x -> %08x", pmcs_rd_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN),
1581 	    rapchk);
1582 	pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, rapchk);
1583 	drv_usecwait(10);
1584 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN "
1585 	    "%08x -> %08x", pmcs_rd_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN),
1586 	    wapchk);
1587 	pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, wapchk);
1588 	drv_usecwait(10);
1589 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN "
1590 	    "%08x -> %08x", pmcs_rd_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN),
1591 	    wapchk);
1592 	pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, wdpchk);
1593 	drv_usecwait(10);
1594 
1595 	/*
1596 	 * Step 13
1597 	 */
1598 	spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
1599 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
1600 	    spc, spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
1601 	pmcs_wr_topunit(pwp, PMCS_SPC_RESET,
1602 	    spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
1603 
1604 	/*
1605 	 * Step 14
1606 	 */
1607 	drv_usecwait(100);
1608 
1609 	/*
1610 	 * Step 15
1611 	 */
1612 	for (spc = 0, i = 0; i < 1000; i++) {
1613 		drv_usecwait(1000);
1614 		spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
1615 		if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) == sfrbits) {
1616 			break;
1617 		}
1618 	}
1619 
1620 	if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) != sfrbits) {
1621 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1622 		    "SFR didn't toggle (sfr 0x%x)", spc);
1623 		pwp->state = STATE_DEAD;
1624 		pwp->blocked = 0;
1625 		if (pwp->locks_initted) {
1626 			mutex_exit(&pwp->lock);
1627 		}
1628 		return (-1);
1629 	}
1630 
1631 	/*
1632 	 * Step 16
1633 	 */
1634 	pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
1635 	pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
1636 
1637 	/*
1638 	 * Wait for up to 5 seconds for AAP state to come either ready or error.
1639 	 */
1640 	for (i = 0; i < 50; i++) {
1641 		spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
1642 		    PMCS_MSGU_AAP_STATE_MASK;
1643 		if (spc == PMCS_MSGU_AAP_STATE_ERROR ||
1644 		    spc == PMCS_MSGU_AAP_STATE_READY) {
1645 			break;
1646 		}
1647 		drv_usecwait(100000);
1648 	}
1649 	spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
1650 	if ((spc & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) {
1651 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1652 		    "soft reset failed (state 0x%x)", spc);
1653 		pwp->state = STATE_DEAD;
1654 		pwp->blocked = 0;
1655 		if (pwp->locks_initted) {
1656 			mutex_exit(&pwp->lock);
1657 		}
1658 		return (-1);
1659 	}
1660 
1661 	/* Clear the firmware log */
1662 	if (pwp->fwlogp) {
1663 		bzero(pwp->fwlogp, PMCS_FWLOG_SIZE);
1664 	}
1665 
1666 	/* Reset our queue indices and entries */
1667 	bzero(pwp->shadow_iqpi, sizeof (pwp->shadow_iqpi));
1668 	bzero(pwp->last_iqci, sizeof (pwp->last_iqci));
1669 	for (i = 0; i < PMCS_NIQ; i++) {
1670 		if (pwp->iqp[i]) {
1671 			bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
1672 			pmcs_wr_iqpi(pwp, i, 0);
1673 			pmcs_wr_iqci(pwp, i, 0);
1674 		}
1675 	}
1676 	for (i = 0; i < PMCS_NOQ; i++) {
1677 		if (pwp->oqp[i]) {
1678 			bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
1679 			pmcs_wr_oqpi(pwp, i, 0);
1680 			pmcs_wr_oqci(pwp, i, 0);
1681 		}
1682 
1683 	}
1684 
1685 	if (pwp->state == STATE_DEAD || pwp->state == STATE_UNPROBING ||
1686 	    pwp->state == STATE_PROBING || pwp->locks_initted == 0) {
1687 		pwp->blocked = 0;
1688 		if (pwp->locks_initted) {
1689 			mutex_exit(&pwp->lock);
1690 		}
1691 		return (0);
1692 	}
1693 
1694 	/*
1695 	 * Return at this point if we dont need to startup.
1696 	 */
1697 	if (no_restart) {
1698 		return (0);
1699 	}
1700 
1701 	ASSERT(pwp->locks_initted != 0);
1702 
1703 	/*
1704 	 * Flush the target queues and clear each target's PHY
1705 	 */
1706 	if (pwp->targets) {
1707 		for (i = 0; i < pwp->max_dev; i++) {
1708 			pmcs_xscsi_t *xp = pwp->targets[i];
1709 
1710 			if (xp == NULL) {
1711 				continue;
1712 			}
1713 
1714 			mutex_enter(&xp->statlock);
1715 			pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES);
1716 			xp->phy = NULL;
1717 			mutex_exit(&xp->statlock);
1718 		}
1719 	}
1720 
1721 	/*
1722 	 * Zero out the ports list, free non root phys, clear root phys
1723 	 */
1724 	bzero(pwp->ports, sizeof (pwp->ports));
1725 	pmcs_free_all_phys(pwp, pwp->root_phys);
1726 	for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
1727 		pmcs_lock_phy(pptr);
1728 		pmcs_clear_phy(pwp, pptr);
1729 		pptr->target = NULL;
1730 		pmcs_unlock_phy(pptr);
1731 	}
1732 
1733 	/*
1734 	 * Restore Interrupt Mask
1735 	 */
1736 	pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask);
1737 	pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
1738 
1739 	pwp->mpi_table_setup = 0;
1740 	mutex_exit(&pwp->lock);
1741 
1742 	/*
1743 	 * Set up MPI again.
1744 	 */
1745 	if (pmcs_setup(pwp)) {
1746 		msg = "unable to setup MPI tables again";
1747 		goto fail_restart;
1748 	}
1749 	pmcs_report_fwversion(pwp);
1750 
1751 	/*
1752 	 * Restart MPI
1753 	 */
1754 	if (pmcs_start_mpi(pwp)) {
1755 		msg = "unable to restart MPI again";
1756 		goto fail_restart;
1757 	}
1758 
1759 	mutex_enter(&pwp->lock);
1760 	SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
1761 	mutex_exit(&pwp->lock);
1762 
1763 	/*
1764 	 * Run any completions
1765 	 */
1766 	PMCS_CQ_RUN(pwp);
1767 
1768 	/*
1769 	 * Delay
1770 	 */
1771 	drv_usecwait(1000000);
1772 	return (0);
1773 
1774 fail_restart:
1775 	mutex_enter(&pwp->lock);
1776 	pwp->state = STATE_DEAD;
1777 	mutex_exit(&pwp->lock);
1778 	pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
1779 	    "%s: Failed: %s", __func__, msg);
1780 	return (-1);
1781 }
1782 
1783 
1784 /*
1785  * Perform a 'hot' reset, which will soft reset the chip and
1786  * restore the state back to pre-reset context. Called with pwp
1787  * lock held.
1788  */
1789 int
1790 pmcs_hot_reset(pmcs_hw_t *pwp)
1791 {
1792 	pmcs_iport_t	*iport;
1793 
1794 	ASSERT(mutex_owned(&pwp->lock));
1795 	pwp->state = STATE_IN_RESET;
1796 
1797 	/*
1798 	 * For any iports on this HBA, report empty target sets and
1799 	 * then tear them down.
1800 	 */
1801 	rw_enter(&pwp->iports_lock, RW_READER);
1802 	for (iport = list_head(&pwp->iports); iport != NULL;
1803 	    iport = list_next(&pwp->iports, iport)) {
1804 		mutex_enter(&iport->lock);
1805 		(void) scsi_hba_tgtmap_set_begin(iport->iss_tgtmap);
1806 		(void) scsi_hba_tgtmap_set_end(iport->iss_tgtmap, 0);
1807 		pmcs_iport_teardown_phys(iport);
1808 		mutex_exit(&iport->lock);
1809 	}
1810 	rw_exit(&pwp->iports_lock);
1811 
1812 	/* Grab a register dump, in the event that reset fails */
1813 	pmcs_register_dump_int(pwp);
1814 	mutex_exit(&pwp->lock);
1815 
1816 	/* Issue soft reset and clean up related softstate */
1817 	if (pmcs_soft_reset(pwp, B_FALSE)) {
1818 		/*
1819 		 * Disable interrupts, in case we got far enough along to
1820 		 * enable them, then fire off ereport and service impact.
1821 		 */
1822 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1823 		    "%s: failed soft reset", __func__);
1824 		pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
1825 		pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
1826 		pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE);
1827 		ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
1828 		mutex_enter(&pwp->lock);
1829 		pwp->state = STATE_DEAD;
1830 		return (DDI_FAILURE);
1831 	}
1832 
1833 	mutex_enter(&pwp->lock);
1834 	pwp->state = STATE_RUNNING;
1835 	mutex_exit(&pwp->lock);
1836 
1837 	/*
1838 	 * Finally, restart the phys, which will bring the iports back
1839 	 * up and eventually result in discovery running.
1840 	 */
1841 	if (pmcs_start_phys(pwp)) {
1842 		/* We should be up and running now, so retry */
1843 		if (pmcs_start_phys(pwp)) {
1844 			/* Apparently unable to restart PHYs, fail */
1845 			pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1846 			    "%s: failed to restart PHYs after soft reset",
1847 			    __func__);
1848 			mutex_enter(&pwp->lock);
1849 			return (DDI_FAILURE);
1850 		}
1851 	}
1852 
1853 	mutex_enter(&pwp->lock);
1854 	return (DDI_SUCCESS);
1855 }
1856 
1857 /*
1858  * Reset a device or a logical unit.
1859  */
1860 int
1861 pmcs_reset_dev(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint64_t lun)
1862 {
1863 	int rval = 0;
1864 
1865 	if (pptr == NULL) {
1866 		return (ENXIO);
1867 	}
1868 
1869 	pmcs_lock_phy(pptr);
1870 	if (pptr->dtype == SAS) {
1871 		/*
1872 		 * Some devices do not support SAS_I_T_NEXUS_RESET as
1873 		 * it is not a mandatory (in SAM4) task management
1874 		 * function, while LOGIC_UNIT_RESET is mandatory.
1875 		 *
1876 		 * The problem here is that we need to iterate over
1877 		 * all known LUNs to emulate the semantics of
1878 		 * "RESET_TARGET".
1879 		 *
1880 		 * XXX: FIX ME
1881 		 */
1882 		if (lun == (uint64_t)-1) {
1883 			lun = 0;
1884 		}
1885 		rval = pmcs_ssp_tmf(pwp, pptr, SAS_LOGICAL_UNIT_RESET, 0, lun,
1886 		    NULL);
1887 	} else if (pptr->dtype == SATA) {
1888 		if (lun != 0ull) {
1889 			pmcs_unlock_phy(pptr);
1890 			return (EINVAL);
1891 		}
1892 		rval = pmcs_reset_phy(pwp, pptr, PMCS_PHYOP_LINK_RESET);
1893 	} else {
1894 		pmcs_unlock_phy(pptr);
1895 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
1896 		    "%s: cannot reset a SMP device yet (%s)",
1897 		    __func__, pptr->path);
1898 		return (EINVAL);
1899 	}
1900 
1901 	/*
1902 	 * Now harvest any commands killed by this action
1903 	 * by issuing an ABORT for all commands on this device.
1904 	 *
1905 	 * We do this even if the the tmf or reset fails (in case there
1906 	 * are any dead commands around to be harvested *anyway*).
1907 	 * We don't have to await for the abort to complete.
1908 	 */
1909 	if (pmcs_abort(pwp, pptr, 0, 1, 0)) {
1910 		pptr->abort_pending = 1;
1911 		SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
1912 	}
1913 
1914 	pmcs_unlock_phy(pptr);
1915 	return (rval);
1916 }
1917 
1918 /*
1919  * Called with PHY locked.
1920  */
1921 static int
1922 pmcs_get_device_handle(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
1923 {
1924 	if (pptr->valid_device_id == 0) {
1925 		int result = pmcs_register_device(pwp, pptr);
1926 
1927 		/*
1928 		 * If we changed while registering, punt
1929 		 */
1930 		if (pptr->changed) {
1931 			RESTART_DISCOVERY(pwp);
1932 			return (-1);
1933 		}
1934 
1935 		/*
1936 		 * If we had a failure to register, check against errors.
1937 		 * An ENOMEM error means we just retry (temp resource shortage).
1938 		 */
1939 		if (result == ENOMEM) {
1940 			PHY_CHANGED(pwp, pptr);
1941 			RESTART_DISCOVERY(pwp);
1942 			return (-1);
1943 		}
1944 
1945 		/*
1946 		 * An ETIMEDOUT error means we retry (if our counter isn't
1947 		 * exhausted)
1948 		 */
1949 		if (result == ETIMEDOUT) {
1950 			if (ddi_get_lbolt() < pptr->config_stop) {
1951 				PHY_CHANGED(pwp, pptr);
1952 				RESTART_DISCOVERY(pwp);
1953 			} else {
1954 				pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
1955 				    "%s: Retries exhausted for %s, killing",
1956 				    __func__, pptr->path);
1957 				pptr->config_stop = 0;
1958 				pmcs_kill_changed(pwp, pptr, 0);
1959 			}
1960 			return (-1);
1961 		}
1962 		/*
1963 		 * Other errors or no valid device id is fatal, but don't
1964 		 * preclude a future action.
1965 		 */
1966 		if (result || pptr->valid_device_id == 0) {
1967 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
1968 			    "%s: %s could not be registered", __func__,
1969 			    pptr->path);
1970 			return (-1);
1971 		}
1972 	}
1973 	return (0);
1974 }
1975 
1976 int
1977 pmcs_iport_tgtmap_create(pmcs_iport_t *iport)
1978 {
1979 	ASSERT(iport);
1980 	if (iport == NULL)
1981 		return (B_FALSE);
1982 
1983 	pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__);
1984 
1985 	/* create target map */
1986 	if (scsi_hba_tgtmap_create(iport->dip, SCSI_TM_FULLSET, tgtmap_usec,
1987 	    (void *)iport, pmcs_tgtmap_activate_cb, pmcs_tgtmap_deactivate_cb,
1988 	    &iport->iss_tgtmap) != DDI_SUCCESS) {
1989 		pmcs_prt(iport->pwp, PMCS_PRT_DEBUG, NULL, NULL,
1990 		    "%s: failed to create tgtmap", __func__);
1991 		return (B_FALSE);
1992 	}
1993 	return (B_TRUE);
1994 }
1995 
1996 int
1997 pmcs_iport_tgtmap_destroy(pmcs_iport_t *iport)
1998 {
1999 	ASSERT(iport && iport->iss_tgtmap);
2000 	if ((iport == NULL) || (iport->iss_tgtmap == NULL))
2001 		return (B_FALSE);
2002 
2003 	pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__);
2004 
2005 	/* destroy target map */
2006 	scsi_hba_tgtmap_destroy(iport->iss_tgtmap);
2007 	return (B_TRUE);
2008 }
2009 
2010 /*
2011  * Remove all phys from an iport's phymap and empty it's phylist.
2012  * Called when a port has been reset by the host (see pmcs_intr.c)
2013  * or prior to issuing a soft reset if we detect a stall on the chip
2014  * (see pmcs_attach.c).
2015  */
2016 void
2017 pmcs_iport_teardown_phys(pmcs_iport_t *iport)
2018 {
2019 	pmcs_hw_t		*pwp;
2020 	sas_phymap_phys_t	*phys;
2021 	int			phynum;
2022 
2023 	ASSERT(iport);
2024 	ASSERT(mutex_owned(&iport->lock));
2025 	pwp = iport->pwp;
2026 	ASSERT(pwp);
2027 
2028 	/*
2029 	 * Remove all phys from the iport handle's phy list, unset its
2030 	 * primary phy and update its state.
2031 	 */
2032 	pmcs_remove_phy_from_iport(iport, NULL);
2033 	iport->pptr = NULL;
2034 	iport->ua_state = UA_PEND_DEACTIVATE;
2035 
2036 	/* Remove all phys from the phymap */
2037 	phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua);
2038 	if (phys) {
2039 		while ((phynum = sas_phymap_phys_next(phys)) != -1) {
2040 			(void) sas_phymap_phy_rem(pwp->hss_phymap, phynum);
2041 		}
2042 		sas_phymap_phys_free(phys);
2043 	}
2044 }
2045 
2046 /*
2047  * Query the phymap and populate the iport handle passed in.
2048  * Called with iport lock held.
2049  */
2050 int
2051 pmcs_iport_configure_phys(pmcs_iport_t *iport)
2052 {
2053 	pmcs_hw_t		*pwp;
2054 	pmcs_phy_t		*pptr;
2055 	sas_phymap_phys_t	*phys;
2056 	int			phynum;
2057 	int			inst;
2058 
2059 	ASSERT(iport);
2060 	ASSERT(mutex_owned(&iport->lock));
2061 	pwp = iport->pwp;
2062 	ASSERT(pwp);
2063 	inst = ddi_get_instance(iport->dip);
2064 
2065 	mutex_enter(&pwp->lock);
2066 	ASSERT(pwp->root_phys != NULL);
2067 
2068 	/*
2069 	 * Query the phymap regarding the phys in this iport and populate
2070 	 * the iport's phys list. Hereafter this list is maintained via
2071 	 * port up and down events in pmcs_intr.c
2072 	 */
2073 	ASSERT(list_is_empty(&iport->phys));
2074 	phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua);
2075 	ASSERT(phys != NULL);
2076 	while ((phynum = sas_phymap_phys_next(phys)) != -1) {
2077 		/* Grab the phy pointer from root_phys */
2078 		pptr = pwp->root_phys + phynum;
2079 		ASSERT(pptr);
2080 		pmcs_lock_phy(pptr);
2081 		ASSERT(pptr->phynum == phynum);
2082 
2083 		/*
2084 		 * Set a back pointer in the phy to this iport.
2085 		 */
2086 		pptr->iport = iport;
2087 
2088 		/*
2089 		 * If this phy is the primary, set a pointer to it on our
2090 		 * iport handle, and set our portid from it.
2091 		 */
2092 		if (!pptr->subsidiary) {
2093 			iport->pptr = pptr;
2094 			iport->portid = pptr->portid;
2095 		}
2096 
2097 		/*
2098 		 * Finally, insert the phy into our list
2099 		 */
2100 		pmcs_unlock_phy(pptr);
2101 		pmcs_add_phy_to_iport(iport, pptr);
2102 
2103 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: found "
2104 		    "phy %d [0x%p] on iport%d, refcnt(%d)", __func__, phynum,
2105 		    (void *)pptr, inst, iport->refcnt);
2106 	}
2107 	mutex_exit(&pwp->lock);
2108 	sas_phymap_phys_free(phys);
2109 	RESTART_DISCOVERY(pwp);
2110 	return (DDI_SUCCESS);
2111 }
2112 
2113 /*
2114  * Return the iport that ua is associated with, or NULL.  If an iport is
2115  * returned, it will be held and the caller must release the hold.
2116  */
2117 static pmcs_iport_t *
2118 pmcs_get_iport_by_ua(pmcs_hw_t *pwp, char *ua)
2119 {
2120 	pmcs_iport_t	*iport = NULL;
2121 
2122 	rw_enter(&pwp->iports_lock, RW_READER);
2123 	for (iport = list_head(&pwp->iports);
2124 	    iport != NULL;
2125 	    iport = list_next(&pwp->iports, iport)) {
2126 		mutex_enter(&iport->lock);
2127 		if (strcmp(iport->ua, ua) == 0) {
2128 			mutex_exit(&iport->lock);
2129 			mutex_enter(&iport->refcnt_lock);
2130 			iport->refcnt++;
2131 			mutex_exit(&iport->refcnt_lock);
2132 			break;
2133 		}
2134 		mutex_exit(&iport->lock);
2135 	}
2136 	rw_exit(&pwp->iports_lock);
2137 
2138 	return (iport);
2139 }
2140 
2141 /*
2142  * Return the iport that pptr is associated with, or NULL.
2143  * If an iport is returned, there is a hold that the caller must release.
2144  */
2145 pmcs_iport_t *
2146 pmcs_get_iport_by_wwn(pmcs_hw_t *pwp, uint64_t wwn)
2147 {
2148 	pmcs_iport_t	*iport = NULL;
2149 	char		*ua;
2150 
2151 	ua = sas_phymap_lookup_ua(pwp->hss_phymap, pwp->sas_wwns[0], wwn);
2152 	if (ua) {
2153 		iport = pmcs_get_iport_by_ua(pwp, ua);
2154 		if (iport) {
2155 			mutex_enter(&iport->lock);
2156 			pmcs_iport_active(iport);
2157 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: "
2158 			    "found iport [0x%p] on ua (%s), refcnt (%d)",
2159 			    __func__, (void *)iport, ua, iport->refcnt);
2160 			mutex_exit(&iport->lock);
2161 		}
2162 	}
2163 
2164 	return (iport);
2165 }
2166 
2167 /*
2168  * Promote the next phy on this port to primary, and return it.
2169  * Called when the primary PHY on a port is going down, but the port
2170  * remains up (see pmcs_intr.c).
2171  */
2172 pmcs_phy_t *
2173 pmcs_promote_next_phy(pmcs_phy_t *prev_primary)
2174 {
2175 	pmcs_hw_t	*pwp;
2176 	pmcs_iport_t	*iport;
2177 	pmcs_phy_t	*pptr, *child;
2178 	int		portid;
2179 
2180 	pmcs_lock_phy(prev_primary);
2181 	portid = prev_primary->portid;
2182 	iport  = prev_primary->iport;
2183 	pwp    = prev_primary->pwp;
2184 
2185 	/* Use the first available phy in this port */
2186 	for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
2187 		if ((pptr->portid == portid) && (pptr != prev_primary)) {
2188 			mutex_enter(&pptr->phy_lock);
2189 			break;
2190 		}
2191 	}
2192 
2193 	if (pptr == NULL) {
2194 		pmcs_unlock_phy(prev_primary);
2195 		return (NULL);
2196 	}
2197 
2198 	if (iport) {
2199 		mutex_enter(&iport->lock);
2200 		iport->pptr = pptr;
2201 		mutex_exit(&iport->lock);
2202 	}
2203 
2204 	/* Update the phy handle with the data from the previous primary */
2205 	pptr->children		= prev_primary->children;
2206 	child = pptr->children;
2207 	while (child) {
2208 		child->parent = pptr;
2209 		child = child->sibling;
2210 	}
2211 	pptr->ncphy		= prev_primary->ncphy;
2212 	pptr->width		= prev_primary->width;
2213 	pptr->dtype		= prev_primary->dtype;
2214 	pptr->pend_dtype	= prev_primary->pend_dtype;
2215 	pptr->tolerates_sas2	= prev_primary->tolerates_sas2;
2216 	pptr->atdt		= prev_primary->atdt;
2217 	pptr->portid		= prev_primary->portid;
2218 	pptr->link_rate		= prev_primary->link_rate;
2219 	pptr->configured	= prev_primary->configured;
2220 	pptr->iport		= prev_primary->iport;
2221 	pptr->target		= prev_primary->target;
2222 	if (pptr->target) {
2223 		pptr->target->phy = pptr;
2224 	}
2225 
2226 	/* Update the phy mask properties for the affected PHYs */
2227 	/* Clear the current values... */
2228 	pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp,
2229 	    pptr->tgt_port_pm_tmp, B_FALSE);
2230 	/* ...replace with the values from prev_primary... */
2231 	pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm_tmp,
2232 	    prev_primary->tgt_port_pm_tmp, B_TRUE);
2233 	/* ...then clear prev_primary's PHY values from the new primary */
2234 	pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm,
2235 	    prev_primary->tgt_port_pm, B_FALSE);
2236 	/* Clear the prev_primary's values */
2237 	pmcs_update_phy_pm_props(prev_primary, prev_primary->att_port_pm_tmp,
2238 	    prev_primary->tgt_port_pm_tmp, B_FALSE);
2239 
2240 	pptr->subsidiary = 0;
2241 
2242 	prev_primary->subsidiary = 1;
2243 	prev_primary->children = NULL;
2244 	prev_primary->target = NULL;
2245 	pptr->device_id = prev_primary->device_id;
2246 	pptr->valid_device_id = 1;
2247 	pmcs_unlock_phy(prev_primary);
2248 
2249 	/*
2250 	 * We call pmcs_unlock_phy() on pptr because it now contains the
2251 	 * list of children.
2252 	 */
2253 	pmcs_unlock_phy(pptr);
2254 
2255 	return (pptr);
2256 }
2257 
2258 void
2259 pmcs_rele_iport(pmcs_iport_t *iport)
2260 {
2261 	/*
2262 	 * Release a refcnt on this iport. If this is the last reference,
2263 	 * signal the potential waiter in pmcs_iport_unattach().
2264 	 */
2265 	ASSERT(iport->refcnt > 0);
2266 	mutex_enter(&iport->refcnt_lock);
2267 	iport->refcnt--;
2268 	mutex_exit(&iport->refcnt_lock);
2269 	if (iport->refcnt == 0) {
2270 		cv_signal(&iport->refcnt_cv);
2271 	}
2272 	pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: iport "
2273 	    "[0x%p] refcnt (%d)", __func__, (void *)iport, iport->refcnt);
2274 }
2275 
2276 void
2277 pmcs_phymap_activate(void *arg, char *ua, void **privp)
2278 {
2279 	_NOTE(ARGUNUSED(privp));
2280 	pmcs_hw_t	*pwp = arg;
2281 	pmcs_iport_t	*iport = NULL;
2282 
2283 	mutex_enter(&pwp->lock);
2284 	if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) {
2285 		mutex_exit(&pwp->lock);
2286 		return;
2287 	}
2288 	pwp->phymap_active++;
2289 	mutex_exit(&pwp->lock);
2290 
2291 	if (scsi_hba_iportmap_iport_add(pwp->hss_iportmap, ua, NULL) !=
2292 	    DDI_SUCCESS) {
2293 		pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to "
2294 		    "add iport handle on unit address [%s]", __func__, ua);
2295 	} else {
2296 		pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: "
2297 		    "phymap_active count (%d), added iport handle on unit "
2298 		    "address [%s]", __func__, pwp->phymap_active, ua);
2299 	}
2300 
2301 	/* Set the HBA softstate as our private data for this unit address */
2302 	*privp = (void *)pwp;
2303 
2304 	/*
2305 	 * We are waiting on attach for this iport node, unless it is still
2306 	 * attached. This can happen if a consumer has an outstanding open
2307 	 * on our iport node, but the port is down.  If this is the case, we
2308 	 * need to configure our iport here for reuse.
2309 	 */
2310 	iport = pmcs_get_iport_by_ua(pwp, ua);
2311 	if (iport) {
2312 		mutex_enter(&iport->lock);
2313 		if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) {
2314 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: "
2315 			    "failed to configure phys on iport [0x%p] at "
2316 			    "unit address (%s)", __func__, (void *)iport, ua);
2317 		}
2318 		pmcs_iport_active(iport);
2319 		pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
2320 		    &iport->nphy);
2321 		mutex_exit(&iport->lock);
2322 		pmcs_rele_iport(iport);
2323 	}
2324 
2325 }
2326 
2327 void
2328 pmcs_phymap_deactivate(void *arg, char *ua, void *privp)
2329 {
2330 	_NOTE(ARGUNUSED(privp));
2331 	pmcs_hw_t	*pwp = arg;
2332 	pmcs_iport_t	*iport;
2333 
2334 	mutex_enter(&pwp->lock);
2335 	pwp->phymap_active--;
2336 	mutex_exit(&pwp->lock);
2337 
2338 	if (scsi_hba_iportmap_iport_remove(pwp->hss_iportmap, ua) !=
2339 	    DDI_SUCCESS) {
2340 		pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to "
2341 		    "remove iport handle on unit address [%s]", __func__, ua);
2342 	} else {
2343 		pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: "
2344 		    "phymap_active count (%d), removed iport handle on unit "
2345 		    "address [%s]", __func__, pwp->phymap_active, ua);
2346 	}
2347 
2348 	iport = pmcs_get_iport_by_ua(pwp, ua);
2349 
2350 	if (iport == NULL) {
2351 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: failed "
2352 		    "lookup of iport handle on unit addr (%s)", __func__, ua);
2353 		return;
2354 	}
2355 
2356 	mutex_enter(&iport->lock);
2357 	iport->ua_state = UA_INACTIVE;
2358 	iport->portid = PMCS_IPORT_INVALID_PORT_ID;
2359 	pmcs_remove_phy_from_iport(iport, NULL);
2360 	mutex_exit(&iport->lock);
2361 	pmcs_rele_iport(iport);
2362 }
2363 
2364 /*
2365  * Top-level discovery function
2366  */
2367 void
2368 pmcs_discover(pmcs_hw_t *pwp)
2369 {
2370 	pmcs_phy_t		*pptr;
2371 	pmcs_phy_t		*root_phy;
2372 	int			phymap_active;
2373 
2374 	DTRACE_PROBE2(pmcs__discover__entry, ulong_t, pwp->work_flags,
2375 	    boolean_t, pwp->config_changed);
2376 
2377 	mutex_enter(&pwp->lock);
2378 
2379 	if (pwp->state != STATE_RUNNING) {
2380 		mutex_exit(&pwp->lock);
2381 		return;
2382 	}
2383 
2384 	/* Ensure we have at least one phymap active */
2385 	if (pwp->phymap_active == 0) {
2386 		mutex_exit(&pwp->lock);
2387 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2388 		    "%s: phymap inactive, exiting", __func__);
2389 		return;
2390 	}
2391 
2392 	phymap_active = pwp->phymap_active;
2393 	mutex_exit(&pwp->lock);
2394 
2395 	/*
2396 	 * If no iports have attached, but we have PHYs that are up, we
2397 	 * are waiting for iport attach to complete.  Restart discovery.
2398 	 */
2399 	rw_enter(&pwp->iports_lock, RW_READER);
2400 	if (!pwp->iports_attached) {
2401 		rw_exit(&pwp->iports_lock);
2402 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2403 		    "%s: no iports attached, retry discovery", __func__);
2404 		SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
2405 		return;
2406 	}
2407 	if (pwp->num_iports != phymap_active) {
2408 		rw_exit(&pwp->iports_lock);
2409 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2410 		    "%s: phymaps or iport maps not stable; retry discovery",
2411 		    __func__);
2412 		SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
2413 		return;
2414 	}
2415 	rw_exit(&pwp->iports_lock);
2416 
2417 	mutex_enter(&pwp->config_lock);
2418 	if (pwp->configuring) {
2419 		mutex_exit(&pwp->config_lock);
2420 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2421 		    "%s: configuration already in progress", __func__);
2422 		return;
2423 	}
2424 
2425 	if (pmcs_acquire_scratch(pwp, B_FALSE)) {
2426 		mutex_exit(&pwp->config_lock);
2427 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2428 		    "%s: cannot allocate scratch", __func__);
2429 		SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
2430 		return;
2431 	}
2432 
2433 	pwp->configuring = 1;
2434 	pwp->config_changed = B_FALSE;
2435 	mutex_exit(&pwp->config_lock);
2436 
2437 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery begin");
2438 
2439 	/*
2440 	 * First, tell SCSA that we're beginning set operations.
2441 	 */
2442 	pmcs_begin_observations(pwp);
2443 
2444 	/*
2445 	 * The order of the following traversals is important.
2446 	 *
2447 	 * The first one checks for changed expanders.
2448 	 *
2449 	 * The second one aborts commands for dead devices and deregisters them.
2450 	 *
2451 	 * The third one clears the contents of dead expanders from the tree
2452 	 *
2453 	 * The fourth one clears now dead devices in expanders that remain.
2454 	 */
2455 
2456 	/*
2457 	 * 1. Check expanders marked changed (but not dead) to see if they still
2458 	 * have the same number of phys and the same SAS address. Mark them,
2459 	 * their subsidiary phys (if wide) and their descendents dead if
2460 	 * anything has changed. Check the devices they contain to see if
2461 	 * *they* have changed. If they've changed from type NOTHING we leave
2462 	 * them marked changed to be configured later (picking up a new SAS
2463 	 * address and link rate if possible). Otherwise, any change in type,
2464 	 * SAS address or removal of target role will cause us to mark them
2465 	 * (and their descendents) as dead (and cause any pending commands
2466 	 * and associated devices to be removed).
2467 	 *
2468 	 * NOTE: We don't want to bail on discovery if the config has
2469 	 * changed until *after* we run pmcs_kill_devices.
2470 	 */
2471 	root_phy = pwp->root_phys;
2472 	pmcs_check_expanders(pwp, root_phy);
2473 
2474 	/*
2475 	 * 2. Descend the tree looking for dead devices and kill them
2476 	 * by aborting all active commands and then deregistering them.
2477 	 */
2478 	if (pmcs_kill_devices(pwp, root_phy)) {
2479 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2480 		    "%s: pmcs_kill_devices failed!", __func__);
2481 	}
2482 
2483 	/*
2484 	 * 3. Check for dead expanders and remove their children from the tree.
2485 	 * By the time we get here, the devices and commands for them have
2486 	 * already been terminated and removed.
2487 	 *
2488 	 * We do this independent of the configuration count changing so we can
2489 	 * free any dead device PHYs that were discovered while checking
2490 	 * expanders. We ignore any subsidiary phys as pmcs_clear_expander
2491 	 * will take care of those.
2492 	 *
2493 	 * NOTE: pmcs_clear_expander requires softstate lock
2494 	 */
2495 	mutex_enter(&pwp->lock);
2496 	for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
2497 		/*
2498 		 * Call pmcs_clear_expander for every root PHY.  It will
2499 		 * recurse and determine which (if any) expanders actually
2500 		 * need to be cleared.
2501 		 */
2502 		pmcs_lock_phy(pptr);
2503 		pmcs_clear_expander(pwp, pptr, 0);
2504 		pmcs_unlock_phy(pptr);
2505 	}
2506 	mutex_exit(&pwp->lock);
2507 
2508 	/*
2509 	 * 4. Check for dead devices and nullify them. By the time we get here,
2510 	 * the devices and commands for them have already been terminated
2511 	 * and removed. This is different from step 2 in that this just nulls
2512 	 * phys that are part of expanders that are still here but used to
2513 	 * be something but are no longer something (e.g., after a pulled
2514 	 * disk drive). Note that dead expanders had their contained phys
2515 	 * removed from the tree- here, the expanders themselves are
2516 	 * nullified (unless they were removed by being contained in another
2517 	 * expander phy).
2518 	 */
2519 	pmcs_clear_phys(pwp, root_phy);
2520 
2521 	/*
2522 	 * 5. Now check for and configure new devices.
2523 	 */
2524 	if (pmcs_configure_new_devices(pwp, root_phy)) {
2525 		goto restart;
2526 	}
2527 
2528 out:
2529 	DTRACE_PROBE2(pmcs__discover__exit, ulong_t, pwp->work_flags,
2530 	    boolean_t, pwp->config_changed);
2531 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery end");
2532 
2533 	mutex_enter(&pwp->config_lock);
2534 
2535 	if (pwp->config_changed == B_FALSE) {
2536 		/*
2537 		 * Observation is stable, report what we currently see to
2538 		 * the tgtmaps for delta processing. Start by setting
2539 		 * BEGIN on all tgtmaps.
2540 		 */
2541 		mutex_exit(&pwp->config_lock);
2542 		if (pmcs_report_observations(pwp) == B_FALSE) {
2543 			goto restart;
2544 		}
2545 		mutex_enter(&pwp->config_lock);
2546 	} else {
2547 		/*
2548 		 * If config_changed is TRUE, we need to reschedule
2549 		 * discovery now.
2550 		 */
2551 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2552 		    "%s: Config has changed, will re-run discovery", __func__);
2553 		SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
2554 	}
2555 
2556 	pmcs_release_scratch(pwp);
2557 	if (!pwp->quiesced) {
2558 		pwp->blocked = 0;
2559 	}
2560 	pwp->configuring = 0;
2561 	mutex_exit(&pwp->config_lock);
2562 
2563 #ifdef DEBUG
2564 	pptr = pmcs_find_phy_needing_work(pwp, pwp->root_phys);
2565 	if (pptr != NULL) {
2566 		if (!WORK_IS_SCHEDULED(pwp, PMCS_WORK_DISCOVER)) {
2567 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
2568 			    "PHY %s dead=%d changed=%d configured=%d "
2569 			    "but no work scheduled", pptr->path, pptr->dead,
2570 			    pptr->changed, pptr->configured);
2571 		}
2572 		pmcs_unlock_phy(pptr);
2573 	}
2574 #endif
2575 
2576 	return;
2577 
2578 restart:
2579 	/* Clean up and restart discovery */
2580 	pmcs_release_scratch(pwp);
2581 	mutex_enter(&pwp->config_lock);
2582 	pwp->configuring = 0;
2583 	RESTART_DISCOVERY_LOCKED(pwp);
2584 	mutex_exit(&pwp->config_lock);
2585 }
2586 
2587 /*
2588  * Return any PHY that needs to have scheduled work done.  The PHY is returned
2589  * locked.
2590  */
2591 static pmcs_phy_t *
2592 pmcs_find_phy_needing_work(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
2593 {
2594 	pmcs_phy_t *cphyp, *pnext;
2595 
2596 	while (pptr) {
2597 		pmcs_lock_phy(pptr);
2598 
2599 		if (pptr->changed || (pptr->dead && pptr->valid_device_id)) {
2600 			return (pptr);
2601 		}
2602 
2603 		pnext = pptr->sibling;
2604 
2605 		if (pptr->children) {
2606 			cphyp = pptr->children;
2607 			pmcs_unlock_phy(pptr);
2608 			cphyp = pmcs_find_phy_needing_work(pwp, cphyp);
2609 			if (cphyp) {
2610 				return (cphyp);
2611 			}
2612 		} else {
2613 			pmcs_unlock_phy(pptr);
2614 		}
2615 
2616 		pptr = pnext;
2617 	}
2618 
2619 	return (NULL);
2620 }
2621 
2622 /*
2623  * We may (or may not) report observations to SCSA.  This is prefaced by
2624  * issuing a set_begin for each iport target map.
2625  */
2626 static void
2627 pmcs_begin_observations(pmcs_hw_t *pwp)
2628 {
2629 	pmcs_iport_t		*iport;
2630 	scsi_hba_tgtmap_t	*tgtmap;
2631 
2632 	rw_enter(&pwp->iports_lock, RW_READER);
2633 	for (iport = list_head(&pwp->iports); iport != NULL;
2634 	    iport = list_next(&pwp->iports, iport)) {
2635 		/*
2636 		 * Unless we have at least one phy up, skip this iport.
2637 		 * Note we don't need to lock the iport for report_skip
2638 		 * since it is only used here.  We are doing the skip so that
2639 		 * the phymap and iportmap stabilization times are honored -
2640 		 * giving us the ability to recover port operation within the
2641 		 * stabilization time without unconfiguring targets using the
2642 		 * port.
2643 		 */
2644 		if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) {
2645 			iport->report_skip = 1;
2646 			continue;		/* skip set_begin */
2647 		}
2648 		iport->report_skip = 0;
2649 
2650 		tgtmap = iport->iss_tgtmap;
2651 		ASSERT(tgtmap);
2652 		if (scsi_hba_tgtmap_set_begin(tgtmap) != DDI_SUCCESS) {
2653 			pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL,
2654 			    "%s: cannot set_begin tgtmap ", __func__);
2655 			rw_exit(&pwp->iports_lock);
2656 			return;
2657 		}
2658 		pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL,
2659 		    "%s: set begin on tgtmap [0x%p]", __func__, (void *)tgtmap);
2660 	}
2661 	rw_exit(&pwp->iports_lock);
2662 }
2663 
2664 /*
2665  * Report current observations to SCSA.
2666  */
2667 static boolean_t
2668 pmcs_report_observations(pmcs_hw_t *pwp)
2669 {
2670 	pmcs_iport_t		*iport;
2671 	scsi_hba_tgtmap_t	*tgtmap;
2672 	char			*ap;
2673 	pmcs_phy_t		*pptr;
2674 	uint64_t		wwn;
2675 
2676 	/*
2677 	 * Observation is stable, report what we currently see to the tgtmaps
2678 	 * for delta processing.
2679 	 */
2680 	pptr = pwp->root_phys;
2681 
2682 	while (pptr) {
2683 		pmcs_lock_phy(pptr);
2684 
2685 		/*
2686 		 * Skip PHYs that have nothing attached or are dead.
2687 		 */
2688 		if ((pptr->dtype == NOTHING) || pptr->dead) {
2689 			pmcs_unlock_phy(pptr);
2690 			pptr = pptr->sibling;
2691 			continue;
2692 		}
2693 
2694 		if (pptr->changed) {
2695 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
2696 			    "%s: oops, PHY %s changed; restart discovery",
2697 			    __func__, pptr->path);
2698 			pmcs_unlock_phy(pptr);
2699 			return (B_FALSE);
2700 		}
2701 
2702 		/*
2703 		 * Get the iport for this root PHY, then call the helper
2704 		 * to report observations for this iport's targets
2705 		 */
2706 		wwn = pmcs_barray2wwn(pptr->sas_address);
2707 		pmcs_unlock_phy(pptr);
2708 		iport = pmcs_get_iport_by_wwn(pwp, wwn);
2709 		if (iport == NULL) {
2710 			/* No iport for this tgt */
2711 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2712 			    "%s: no iport for this target", __func__);
2713 			pptr = pptr->sibling;
2714 			continue;
2715 		}
2716 
2717 		pmcs_lock_phy(pptr);
2718 		if (!iport->report_skip) {
2719 			if (pmcs_report_iport_observations(
2720 			    pwp, iport, pptr) == B_FALSE) {
2721 				pmcs_rele_iport(iport);
2722 				pmcs_unlock_phy(pptr);
2723 				return (B_FALSE);
2724 			}
2725 		}
2726 		pmcs_rele_iport(iport);
2727 		pmcs_unlock_phy(pptr);
2728 		pptr = pptr->sibling;
2729 	}
2730 
2731 	/*
2732 	 * The observation is complete, end sets. Note we will skip any
2733 	 * iports that are active, but have no PHYs in them (i.e. awaiting
2734 	 * unconfigure). Set to restart discovery if we find this.
2735 	 */
2736 	rw_enter(&pwp->iports_lock, RW_READER);
2737 	for (iport = list_head(&pwp->iports);
2738 	    iport != NULL;
2739 	    iport = list_next(&pwp->iports, iport)) {
2740 
2741 		if (iport->report_skip)
2742 			continue;		/* skip set_end */
2743 
2744 		tgtmap = iport->iss_tgtmap;
2745 		ASSERT(tgtmap);
2746 		if (scsi_hba_tgtmap_set_end(tgtmap, 0) != DDI_SUCCESS) {
2747 			pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL,
2748 			    "%s: cannot set_end tgtmap ", __func__);
2749 			rw_exit(&pwp->iports_lock);
2750 			return (B_FALSE);
2751 		}
2752 		pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL,
2753 		    "%s: set end on tgtmap [0x%p]", __func__, (void *)tgtmap);
2754 	}
2755 
2756 	/*
2757 	 * Now that discovery is complete, set up the necessary
2758 	 * DDI properties on each iport node.
2759 	 */
2760 	for (iport = list_head(&pwp->iports); iport != NULL;
2761 	    iport = list_next(&pwp->iports, iport)) {
2762 		/* Set up the 'attached-port' property on the iport */
2763 		ap = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP);
2764 		mutex_enter(&iport->lock);
2765 		pptr = iport->pptr;
2766 		mutex_exit(&iport->lock);
2767 		if (pptr == NULL) {
2768 			/*
2769 			 * This iport is down, but has not been
2770 			 * removed from our list (unconfigured).
2771 			 * Set our value to '0'.
2772 			 */
2773 			(void) snprintf(ap, 1, "%s", "0");
2774 		} else {
2775 			/* Otherwise, set it to remote phy's wwn */
2776 			pmcs_lock_phy(pptr);
2777 			wwn = pmcs_barray2wwn(pptr->sas_address);
2778 			(void) scsi_wwn_to_wwnstr(wwn, 1, ap);
2779 			pmcs_unlock_phy(pptr);
2780 		}
2781 		if (ndi_prop_update_string(DDI_DEV_T_NONE, iport->dip,
2782 		    SCSI_ADDR_PROP_ATTACHED_PORT, ap) != DDI_SUCCESS) {
2783 			pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed "
2784 			    "to set prop ("SCSI_ADDR_PROP_ATTACHED_PORT")",
2785 			    __func__);
2786 		}
2787 		kmem_free(ap, PMCS_MAX_UA_SIZE);
2788 	}
2789 	rw_exit(&pwp->iports_lock);
2790 
2791 	return (B_TRUE);
2792 }
2793 
2794 /*
2795  * Report observations into a particular iport's target map
2796  *
2797  * Called with phyp (and all descendents) locked
2798  */
2799 static boolean_t
2800 pmcs_report_iport_observations(pmcs_hw_t *pwp, pmcs_iport_t *iport,
2801     pmcs_phy_t *phyp)
2802 {
2803 	pmcs_phy_t		*lphyp;
2804 	scsi_hba_tgtmap_t	*tgtmap;
2805 	scsi_tgtmap_tgt_type_t	tgt_type;
2806 	char			*ua;
2807 	uint64_t		wwn;
2808 
2809 	tgtmap = iport->iss_tgtmap;
2810 	ASSERT(tgtmap);
2811 
2812 	lphyp = phyp;
2813 	while (lphyp) {
2814 		switch (lphyp->dtype) {
2815 		default:		/* Skip unknown PHYs. */
2816 			/* for non-root phys, skip to sibling */
2817 			goto next_phy;
2818 
2819 		case SATA:
2820 		case SAS:
2821 			tgt_type = SCSI_TGT_SCSI_DEVICE;
2822 			break;
2823 
2824 		case EXPANDER:
2825 			tgt_type = SCSI_TGT_SMP_DEVICE;
2826 			break;
2827 		}
2828 
2829 		if (lphyp->dead || !lphyp->configured) {
2830 			goto next_phy;
2831 		}
2832 
2833 		/*
2834 		 * Validate the PHY's SAS address
2835 		 */
2836 		if (((lphyp->sas_address[0] & 0xf0) >> 4) != NAA_IEEE_REG) {
2837 			pmcs_prt(pwp, PMCS_PRT_ERR, lphyp, NULL,
2838 			    "PHY 0x%p (%s) has invalid SAS address; "
2839 			    "will not enumerate", (void *)lphyp, lphyp->path);
2840 			goto next_phy;
2841 		}
2842 
2843 		wwn = pmcs_barray2wwn(lphyp->sas_address);
2844 		ua = scsi_wwn_to_wwnstr(wwn, 1, NULL);
2845 
2846 		pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, lphyp, NULL,
2847 		    "iport_observation: adding %s on tgtmap [0x%p] phy [0x%p]",
2848 		    ua, (void *)tgtmap, (void*)lphyp);
2849 
2850 		if (scsi_hba_tgtmap_set_add(tgtmap, tgt_type, ua, NULL) !=
2851 		    DDI_SUCCESS) {
2852 			pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP,  NULL, NULL,
2853 			    "%s: failed to add address %s", __func__, ua);
2854 			scsi_free_wwnstr(ua);
2855 			return (B_FALSE);
2856 		}
2857 		scsi_free_wwnstr(ua);
2858 
2859 		if (lphyp->children) {
2860 			if (pmcs_report_iport_observations(pwp, iport,
2861 			    lphyp->children) == B_FALSE) {
2862 				return (B_FALSE);
2863 			}
2864 		}
2865 
2866 		/* for non-root phys, report siblings too */
2867 next_phy:
2868 		if (IS_ROOT_PHY(lphyp)) {
2869 			lphyp = NULL;
2870 		} else {
2871 			lphyp = lphyp->sibling;
2872 		}
2873 	}
2874 
2875 	return (B_TRUE);
2876 }
2877 
2878 /*
2879  * Check for and configure new devices.
2880  *
2881  * If the changed device is a SATA device, add a SATA device.
2882  *
2883  * If the changed device is a SAS device, add a SAS device.
2884  *
2885  * If the changed device is an EXPANDER device, do a REPORT
2886  * GENERAL SMP command to find out the number of contained phys.
2887  *
2888  * For each number of contained phys, allocate a phy, do a
2889  * DISCOVERY SMP command to find out what kind of device it
2890  * is and add it to the linked list of phys on the *next* level.
2891  *
2892  * NOTE: pptr passed in by the caller will be a root PHY
2893  */
2894 static int
2895 pmcs_configure_new_devices(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
2896 {
2897 	int rval = 0;
2898 	pmcs_iport_t *iport;
2899 	pmcs_phy_t *pnext, *orig_pptr = pptr, *root_phy, *pchild;
2900 	uint64_t wwn;
2901 
2902 	/*
2903 	 * First, walk through each PHY at this level
2904 	 */
2905 	while (pptr) {
2906 		pmcs_lock_phy(pptr);
2907 		pnext = pptr->sibling;
2908 
2909 		/*
2910 		 * Set the new dtype if it has changed
2911 		 */
2912 		if ((pptr->pend_dtype != NEW) &&
2913 		    (pptr->pend_dtype != pptr->dtype)) {
2914 			pptr->dtype = pptr->pend_dtype;
2915 		}
2916 
2917 		if (pptr->changed == 0 || pptr->dead || pptr->configured) {
2918 			goto next_phy;
2919 		}
2920 
2921 		/*
2922 		 * Confirm that this target's iport is configured
2923 		 */
2924 		root_phy = pmcs_get_root_phy(pptr);
2925 		wwn = pmcs_barray2wwn(root_phy->sas_address);
2926 		pmcs_unlock_phy(pptr);
2927 		iport = pmcs_get_iport_by_wwn(pwp, wwn);
2928 		if (iport == NULL) {
2929 			/* No iport for this tgt, restart */
2930 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2931 			    "%s: iport not yet configured, "
2932 			    "retry discovery", __func__);
2933 			pnext = NULL;
2934 			rval = -1;
2935 			pmcs_lock_phy(pptr);
2936 			goto next_phy;
2937 		}
2938 
2939 		pmcs_lock_phy(pptr);
2940 		switch (pptr->dtype) {
2941 		case NOTHING:
2942 			pptr->changed = 0;
2943 			break;
2944 		case SATA:
2945 		case SAS:
2946 			pptr->iport = iport;
2947 			pmcs_new_tport(pwp, pptr);
2948 			break;
2949 		case EXPANDER:
2950 			pmcs_configure_expander(pwp, pptr, iport);
2951 			break;
2952 		}
2953 		pmcs_rele_iport(iport);
2954 
2955 		mutex_enter(&pwp->config_lock);
2956 		if (pwp->config_changed) {
2957 			mutex_exit(&pwp->config_lock);
2958 			pnext = NULL;
2959 			goto next_phy;
2960 		}
2961 		mutex_exit(&pwp->config_lock);
2962 
2963 next_phy:
2964 		pmcs_unlock_phy(pptr);
2965 		pptr = pnext;
2966 	}
2967 
2968 	if (rval != 0) {
2969 		return (rval);
2970 	}
2971 
2972 	/*
2973 	 * Now walk through each PHY again, recalling ourselves if they
2974 	 * have children
2975 	 */
2976 	pptr = orig_pptr;
2977 	while (pptr) {
2978 		pmcs_lock_phy(pptr);
2979 		pnext = pptr->sibling;
2980 		pchild = pptr->children;
2981 		pmcs_unlock_phy(pptr);
2982 
2983 		if (pchild) {
2984 			rval = pmcs_configure_new_devices(pwp, pchild);
2985 			if (rval != 0) {
2986 				break;
2987 			}
2988 		}
2989 
2990 		pptr = pnext;
2991 	}
2992 
2993 	return (rval);
2994 }
2995 
2996 /*
2997  * Set all phys and descendent phys as changed if changed == B_TRUE, otherwise
2998  * mark them all as not changed.
2999  *
3000  * Called with parent PHY locked.
3001  */
3002 void
3003 pmcs_set_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, boolean_t changed,
3004     int level)
3005 {
3006 	pmcs_phy_t *pptr;
3007 
3008 	if (level == 0) {
3009 		if (changed) {
3010 			PHY_CHANGED(pwp, parent);
3011 		} else {
3012 			parent->changed = 0;
3013 		}
3014 		if (parent->dtype == EXPANDER && parent->level) {
3015 			parent->width = 1;
3016 		}
3017 		if (parent->children) {
3018 			pmcs_set_changed(pwp, parent->children, changed,
3019 			    level + 1);
3020 		}
3021 	} else {
3022 		pptr = parent;
3023 		while (pptr) {
3024 			if (changed) {
3025 				PHY_CHANGED(pwp, pptr);
3026 			} else {
3027 				pptr->changed = 0;
3028 			}
3029 			if (pptr->dtype == EXPANDER && pptr->level) {
3030 				pptr->width = 1;
3031 			}
3032 			if (pptr->children) {
3033 				pmcs_set_changed(pwp, pptr->children, changed,
3034 				    level + 1);
3035 			}
3036 			pptr = pptr->sibling;
3037 		}
3038 	}
3039 }
3040 
3041 /*
3042  * Take the passed phy mark it and its descendants as dead.
3043  * Fire up reconfiguration to abort commands and bury it.
3044  *
3045  * Called with the parent PHY locked.
3046  */
3047 void
3048 pmcs_kill_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, int level)
3049 {
3050 	pmcs_phy_t *pptr = parent;
3051 
3052 	while (pptr) {
3053 		pptr->link_rate = 0;
3054 		pptr->abort_sent = 0;
3055 		pptr->abort_pending = 1;
3056 		SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
3057 		pptr->need_rl_ext = 0;
3058 
3059 		if (pptr->dead == 0) {
3060 			PHY_CHANGED(pwp, pptr);
3061 			RESTART_DISCOVERY(pwp);
3062 		}
3063 
3064 		pptr->dead = 1;
3065 
3066 		if (pptr->children) {
3067 			pmcs_kill_changed(pwp, pptr->children, level + 1);
3068 		}
3069 
3070 		/*
3071 		 * Only kill siblings at level > 0
3072 		 */
3073 		if (level == 0) {
3074 			return;
3075 		}
3076 
3077 		pptr = pptr->sibling;
3078 	}
3079 }
3080 
3081 /*
3082  * Go through every PHY and clear any that are dead (unless they're expanders)
3083  */
3084 static void
3085 pmcs_clear_phys(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
3086 {
3087 	pmcs_phy_t *pnext, *phyp;
3088 
3089 	phyp = pptr;
3090 	while (phyp) {
3091 		if (IS_ROOT_PHY(phyp)) {
3092 			pmcs_lock_phy(phyp);
3093 		}
3094 
3095 		if ((phyp->dtype != EXPANDER) && phyp->dead) {
3096 			pmcs_clear_phy(pwp, phyp);
3097 		}
3098 
3099 		if (phyp->children) {
3100 			pmcs_clear_phys(pwp, phyp->children);
3101 		}
3102 
3103 		pnext = phyp->sibling;
3104 
3105 		if (IS_ROOT_PHY(phyp)) {
3106 			pmcs_unlock_phy(phyp);
3107 		}
3108 
3109 		phyp = pnext;
3110 	}
3111 }
3112 
3113 /*
3114  * Clear volatile parts of a phy.  Called with PHY locked.
3115  */
3116 void
3117 pmcs_clear_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
3118 {
3119 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: %s",
3120 	    __func__, pptr->path);
3121 	ASSERT(mutex_owned(&pptr->phy_lock));
3122 	/* keep sibling */
3123 	/* keep children */
3124 	/* keep parent */
3125 	pptr->device_id = PMCS_INVALID_DEVICE_ID;
3126 	/* keep hw_event_ack */
3127 	pptr->ncphy = 0;
3128 	/* keep phynum */
3129 	pptr->width = 0;
3130 	pptr->ds_recovery_retries = 0;
3131 	pptr->ds_prev_good_recoveries = 0;
3132 	pptr->last_good_recovery = 0;
3133 	pptr->prev_recovery = 0;
3134 
3135 	/* keep dtype */
3136 	pptr->config_stop = 0;
3137 	pptr->spinup_hold = 0;
3138 	pptr->atdt = 0;
3139 	/* keep portid */
3140 	pptr->link_rate = 0;
3141 	pptr->valid_device_id = 0;
3142 	pptr->abort_sent = 0;
3143 	pptr->abort_pending = 0;
3144 	pptr->need_rl_ext = 0;
3145 	pptr->subsidiary = 0;
3146 	pptr->configured = 0;
3147 	pptr->deregister_wait = 0;
3148 	pptr->reenumerate = 0;
3149 	/* Only mark dead if it's not a root PHY and its dtype isn't NOTHING */
3150 	/* XXX: What about directly attached disks? */
3151 	if (!IS_ROOT_PHY(pptr) && (pptr->dtype != NOTHING))
3152 		pptr->dead = 1;
3153 	pptr->changed = 0;
3154 	/* keep SAS address */
3155 	/* keep path */
3156 	/* keep ref_count */
3157 	/* Don't clear iport on root PHYs - they are handled in pmcs_intr.c */
3158 	if (!IS_ROOT_PHY(pptr)) {
3159 		pptr->last_iport = pptr->iport;
3160 		pptr->iport = NULL;
3161 	}
3162 	/* keep target */
3163 }
3164 
3165 /*
3166  * Allocate softstate for this target if there isn't already one.  If there
3167  * is, just redo our internal configuration.  If it is actually "new", we'll
3168  * soon get a tran_tgt_init for it.
3169  *
3170  * Called with PHY locked.
3171  */
3172 static void
3173 pmcs_new_tport(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
3174 {
3175 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: phy 0x%p @ %s",
3176 	    __func__, (void *)pptr, pptr->path);
3177 
3178 	if (pmcs_configure_phy(pwp, pptr) == B_FALSE) {
3179 		/*
3180 		 * If the config failed, mark the PHY as changed.
3181 		 */
3182 		PHY_CHANGED(pwp, pptr);
3183 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3184 		    "%s: pmcs_configure_phy failed for phy 0x%p", __func__,
3185 		    (void *)pptr);
3186 		return;
3187 	}
3188 
3189 	/* Mark PHY as no longer changed */
3190 	pptr->changed = 0;
3191 
3192 	/*
3193 	 * If the PHY has no target pointer:
3194 	 *
3195 	 * If it's a root PHY, see if another PHY in the iport holds the
3196 	 * target pointer (primary PHY changed).  If so, move it over.
3197 	 *
3198 	 * If it's not a root PHY, see if there's a PHY on the dead_phys
3199 	 * list that matches.
3200 	 */
3201 	if (pptr->target == NULL) {
3202 		if (IS_ROOT_PHY(pptr)) {
3203 			pmcs_phy_t *rphy = pwp->root_phys;
3204 
3205 			while (rphy) {
3206 				if (rphy == pptr) {
3207 					rphy = rphy->sibling;
3208 					continue;
3209 				}
3210 
3211 				mutex_enter(&rphy->phy_lock);
3212 				if ((rphy->iport == pptr->iport) &&
3213 				    (rphy->target != NULL)) {
3214 					mutex_enter(&rphy->target->statlock);
3215 					pptr->target = rphy->target;
3216 					rphy->target = NULL;
3217 					pptr->target->phy = pptr;
3218 					/* The target is now on pptr */
3219 					mutex_exit(&pptr->target->statlock);
3220 					mutex_exit(&rphy->phy_lock);
3221 					pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
3222 					    pptr, pptr->target,
3223 					    "%s: Moved target from %s to %s",
3224 					    __func__, rphy->path, pptr->path);
3225 					break;
3226 				}
3227 				mutex_exit(&rphy->phy_lock);
3228 
3229 				rphy = rphy->sibling;
3230 			}
3231 		} else {
3232 			pmcs_reap_dead_phy(pptr);
3233 		}
3234 	}
3235 
3236 	/*
3237 	 * Only assign the device if there is a target for this PHY with a
3238 	 * matching SAS address.  If an iport is disconnected from one piece
3239 	 * of storage and connected to another within the iport stabilization
3240 	 * time, we can get the PHY/target mismatch situation.
3241 	 *
3242 	 * Otherwise, it'll get done in tran_tgt_init.
3243 	 */
3244 	if (pptr->target) {
3245 		mutex_enter(&pptr->target->statlock);
3246 		if (pmcs_phy_target_match(pptr) == B_FALSE) {
3247 			mutex_exit(&pptr->target->statlock);
3248 			if (!IS_ROOT_PHY(pptr)) {
3249 				pmcs_dec_phy_ref_count(pptr);
3250 			}
3251 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
3252 			    "%s: Not assigning existing tgt %p for PHY %p "
3253 			    "(WWN mismatch)", __func__, (void *)pptr->target,
3254 			    (void *)pptr);
3255 			pptr->target = NULL;
3256 			return;
3257 		}
3258 
3259 		if (!pmcs_assign_device(pwp, pptr->target)) {
3260 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target,
3261 			    "%s: pmcs_assign_device failed for target 0x%p",
3262 			    __func__, (void *)pptr->target);
3263 		}
3264 		mutex_exit(&pptr->target->statlock);
3265 	}
3266 }
3267 
3268 /*
3269  * Called with PHY lock held.
3270  */
3271 static boolean_t
3272 pmcs_configure_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
3273 {
3274 	char *dtype;
3275 
3276 	ASSERT(mutex_owned(&pptr->phy_lock));
3277 
3278 	/*
3279 	 * Mark this device as no longer changed.
3280 	 */
3281 	pptr->changed = 0;
3282 
3283 	/*
3284 	 * If we don't have a device handle, get one.
3285 	 */
3286 	if (pmcs_get_device_handle(pwp, pptr)) {
3287 		return (B_FALSE);
3288 	}
3289 
3290 	pptr->configured = 1;
3291 
3292 	switch (pptr->dtype) {
3293 	case SAS:
3294 		dtype = "SAS";
3295 		break;
3296 	case SATA:
3297 		dtype = "SATA";
3298 		break;
3299 	case EXPANDER:
3300 		dtype = "SMP";
3301 		break;
3302 	default:
3303 		dtype = "???";
3304 	}
3305 
3306 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "config_dev: %s "
3307 	    "dev %s " SAS_ADDR_FMT " dev id 0x%x lr 0x%x", dtype, pptr->path,
3308 	    SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate);
3309 
3310 	return (B_TRUE);
3311 }
3312 
3313 /*
3314  * Called with PHY locked
3315  */
3316 static void
3317 pmcs_configure_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, pmcs_iport_t *iport)
3318 {
3319 	pmcs_phy_t *ctmp, *clist = NULL, *cnext;
3320 	int result, i, nphy = 0;
3321 	boolean_t root_phy = B_FALSE;
3322 
3323 	ASSERT(iport);
3324 
3325 	/*
3326 	 * Step 1- clear our "changed" bit. If we need to retry/restart due
3327 	 * to resource shortages, we'll set it again. While we're doing
3328 	 * configuration, other events may set it again as well.  If the PHY
3329 	 * is a root PHY and is currently marked as having changed, reset the
3330 	 * config_stop timer as well.
3331 	 */
3332 	if (IS_ROOT_PHY(pptr) && pptr->changed) {
3333 		pptr->config_stop = ddi_get_lbolt() +
3334 		    drv_usectohz(PMCS_MAX_CONFIG_TIME);
3335 	}
3336 	pptr->changed = 0;
3337 
3338 	/*
3339 	 * Step 2- make sure we don't overflow
3340 	 */
3341 	if (pptr->level == PMCS_MAX_XPND-1) {
3342 		pmcs_prt(pwp, PMCS_PRT_WARN, pptr, NULL,
3343 		    "%s: SAS expansion tree too deep", __func__);
3344 		return;
3345 	}
3346 
3347 	/*
3348 	 * Step 3- Check if this expander is part of a wide phy that has
3349 	 * already been configured.
3350 	 *
3351 	 * This is known by checking this level for another EXPANDER device
3352 	 * with the same SAS address and isn't already marked as a subsidiary
3353 	 * phy and a parent whose SAS address is the same as our SAS address
3354 	 * (if there are parents).
3355 	 */
3356 	if (!IS_ROOT_PHY(pptr)) {
3357 		/*
3358 		 * No need to lock the parent here because we're in discovery
3359 		 * and the only time a PHY's children pointer can change is
3360 		 * in discovery; either in pmcs_clear_expander (which has
3361 		 * already been called) or here, down below.  Plus, trying to
3362 		 * grab the parent's lock here can cause deadlock.
3363 		 */
3364 		ctmp = pptr->parent->children;
3365 	} else {
3366 		ctmp = pwp->root_phys;
3367 		root_phy = B_TRUE;
3368 	}
3369 
3370 	while (ctmp) {
3371 		/*
3372 		 * If we've checked all PHYs up to pptr, we stop. Otherwise,
3373 		 * we'll be checking for a primary PHY with a higher PHY
3374 		 * number than pptr, which will never happen.  The primary
3375 		 * PHY on non-root expanders will ALWAYS be the lowest
3376 		 * numbered PHY.
3377 		 */
3378 		if (ctmp == pptr) {
3379 			break;
3380 		}
3381 
3382 		/*
3383 		 * If pptr and ctmp are root PHYs, just grab the mutex on
3384 		 * ctmp.  No need to lock the entire tree.  If they are not
3385 		 * root PHYs, there is no need to lock since a non-root PHY's
3386 		 * SAS address and other characteristics can only change in
3387 		 * discovery anyway.
3388 		 */
3389 		if (root_phy) {
3390 			mutex_enter(&ctmp->phy_lock);
3391 		}
3392 
3393 		if (ctmp->dtype == EXPANDER && ctmp->width &&
3394 		    memcmp(ctmp->sas_address, pptr->sas_address, 8) == 0) {
3395 			int widephy = 0;
3396 			/*
3397 			 * If these phys are not root PHYs, compare their SAS
3398 			 * addresses too.
3399 			 */
3400 			if (!root_phy) {
3401 				if (memcmp(ctmp->parent->sas_address,
3402 				    pptr->parent->sas_address, 8) == 0) {
3403 					widephy = 1;
3404 				}
3405 			} else {
3406 				widephy = 1;
3407 			}
3408 			if (widephy) {
3409 				ctmp->width++;
3410 				pptr->subsidiary = 1;
3411 
3412 				/*
3413 				 * Update the primary PHY's attached-port-pm
3414 				 * and target-port-pm information with the info
3415 				 * from this subsidiary
3416 				 */
3417 				pmcs_update_phy_pm_props(ctmp,
3418 				    pptr->att_port_pm_tmp,
3419 				    pptr->tgt_port_pm_tmp, B_TRUE);
3420 
3421 				pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3422 				    "%s: PHY %s part of wide PHY %s "
3423 				    "(now %d wide)", __func__, pptr->path,
3424 				    ctmp->path, ctmp->width);
3425 				if (root_phy) {
3426 					mutex_exit(&ctmp->phy_lock);
3427 				}
3428 				return;
3429 			}
3430 		}
3431 
3432 		cnext = ctmp->sibling;
3433 		if (root_phy) {
3434 			mutex_exit(&ctmp->phy_lock);
3435 		}
3436 		ctmp = cnext;
3437 	}
3438 
3439 	/*
3440 	 * Step 4- If we don't have a device handle, get one.  Since this
3441 	 * is the primary PHY, make sure subsidiary is cleared.
3442 	 */
3443 	pptr->subsidiary = 0;
3444 	pptr->iport = iport;
3445 	if (pmcs_get_device_handle(pwp, pptr)) {
3446 		goto out;
3447 	}
3448 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Config expander %s "
3449 	    SAS_ADDR_FMT " dev id 0x%x lr 0x%x", pptr->path,
3450 	    SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate);
3451 
3452 	/*
3453 	 * Step 5- figure out how many phys are in this expander.
3454 	 */
3455 	nphy = pmcs_expander_get_nphy(pwp, pptr);
3456 	if (nphy <= 0) {
3457 		if (nphy == 0 && ddi_get_lbolt() < pptr->config_stop) {
3458 			PHY_CHANGED(pwp, pptr);
3459 			RESTART_DISCOVERY(pwp);
3460 		} else {
3461 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3462 			    "%s: Retries exhausted for %s, killing", __func__,
3463 			    pptr->path);
3464 			pptr->config_stop = 0;
3465 			pmcs_kill_changed(pwp, pptr, 0);
3466 		}
3467 		goto out;
3468 	}
3469 
3470 	/*
3471 	 * Step 6- Allocate a list of phys for this expander and figure out
3472 	 * what each one is.
3473 	 */
3474 	for (i = 0; i < nphy; i++) {
3475 		ctmp = kmem_cache_alloc(pwp->phy_cache, KM_SLEEP);
3476 		bzero(ctmp, sizeof (pmcs_phy_t));
3477 		ctmp->device_id = PMCS_INVALID_DEVICE_ID;
3478 		ctmp->sibling = clist;
3479 		ctmp->pend_dtype = NEW;	/* Init pending dtype */
3480 		ctmp->config_stop = ddi_get_lbolt() +
3481 		    drv_usectohz(PMCS_MAX_CONFIG_TIME);
3482 		clist = ctmp;
3483 	}
3484 
3485 	mutex_enter(&pwp->config_lock);
3486 	if (pwp->config_changed) {
3487 		RESTART_DISCOVERY_LOCKED(pwp);
3488 		mutex_exit(&pwp->config_lock);
3489 		/*
3490 		 * Clean up the newly allocated PHYs and return
3491 		 */
3492 		while (clist) {
3493 			ctmp = clist->sibling;
3494 			kmem_cache_free(pwp->phy_cache, clist);
3495 			clist = ctmp;
3496 		}
3497 		return;
3498 	}
3499 	mutex_exit(&pwp->config_lock);
3500 
3501 	/*
3502 	 * Step 7- Now fill in the rest of the static portions of the phy.
3503 	 */
3504 	for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) {
3505 		ctmp->parent = pptr;
3506 		ctmp->pwp = pwp;
3507 		ctmp->level = pptr->level+1;
3508 		ctmp->portid = pptr->portid;
3509 		if (ctmp->tolerates_sas2) {
3510 			ASSERT(i < SAS2_PHYNUM_MAX);
3511 			ctmp->phynum = i & SAS2_PHYNUM_MASK;
3512 		} else {
3513 			ASSERT(i < SAS_PHYNUM_MAX);
3514 			ctmp->phynum = i & SAS_PHYNUM_MASK;
3515 		}
3516 		pmcs_phy_name(pwp, ctmp, ctmp->path, sizeof (ctmp->path));
3517 		pmcs_lock_phy(ctmp);
3518 	}
3519 
3520 	/*
3521 	 * Step 8- Discover things about each phy in the expander.
3522 	 */
3523 	for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) {
3524 		result = pmcs_expander_content_discover(pwp, pptr, ctmp);
3525 		if (result <= 0) {
3526 			if (ddi_get_lbolt() < pptr->config_stop) {
3527 				PHY_CHANGED(pwp, pptr);
3528 				RESTART_DISCOVERY(pwp);
3529 			} else {
3530 				pptr->config_stop = 0;
3531 				pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3532 				    "%s: Retries exhausted for %s, killing",
3533 				    __func__, pptr->path);
3534 				pmcs_kill_changed(pwp, pptr, 0);
3535 			}
3536 			goto out;
3537 		}
3538 
3539 		/* Set pend_dtype to dtype for 1st time initialization */
3540 		ctmp->pend_dtype = ctmp->dtype;
3541 	}
3542 
3543 	/*
3544 	 * Step 9: Install the new list on the next level. There should
3545 	 * typically be no children pointer on this PHY.  There is one known
3546 	 * case where this can happen, though.  If a root PHY goes down and
3547 	 * comes back up before discovery can run, we will fail to remove the
3548 	 * children from that PHY since it will no longer be marked dead.
3549 	 * However, in this case, all children should also be marked dead.  If
3550 	 * we see that, take those children and put them on the dead_phys list.
3551 	 */
3552 	if (pptr->children != NULL) {
3553 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
3554 		    "%s: Expander @ %s still has children: Clean up",
3555 		    __func__, pptr->path);
3556 		pmcs_add_dead_phys(pwp, pptr->children);
3557 	}
3558 
3559 	/*
3560 	 * Set the new children pointer for this expander
3561 	 */
3562 	pptr->children = clist;
3563 	clist = NULL;
3564 	pptr->ncphy = nphy;
3565 	pptr->configured = 1;
3566 
3567 	/*
3568 	 * We only set width if we're greater than level 0.
3569 	 */
3570 	if (pptr->level) {
3571 		pptr->width = 1;
3572 	}
3573 
3574 	/*
3575 	 * Now tell the rest of the world about us, as an SMP node.
3576 	 */
3577 	pptr->iport = iport;
3578 	pmcs_new_tport(pwp, pptr);
3579 
3580 out:
3581 	while (clist) {
3582 		ctmp = clist->sibling;
3583 		pmcs_unlock_phy(clist);
3584 		kmem_cache_free(pwp->phy_cache, clist);
3585 		clist = ctmp;
3586 	}
3587 }
3588 
3589 /*
3590  * 2. Check expanders marked changed (but not dead) to see if they still have
3591  * the same number of phys and the same SAS address. Mark them, their subsidiary
3592  * phys (if wide) and their descendents dead if anything has changed. Check the
3593  * the devices they contain to see if *they* have changed. If they've changed
3594  * from type NOTHING we leave them marked changed to be configured later
3595  * (picking up a new SAS address and link rate if possible). Otherwise, any
3596  * change in type, SAS address or removal of target role will cause us to
3597  * mark them (and their descendents) as dead and cause any pending commands
3598  * and associated devices to be removed.
3599  *
3600  * Called with PHY (pptr) locked.
3601  */
3602 
3603 static void
3604 pmcs_check_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
3605 {
3606 	int nphy, result;
3607 	pmcs_phy_t *ctmp, *local, *local_list = NULL, *local_tail = NULL;
3608 	boolean_t kill_changed, changed;
3609 
3610 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3611 	    "%s: check %s", __func__, pptr->path);
3612 
3613 	/*
3614 	 * Step 1: Mark phy as not changed. We will mark it changed if we need
3615 	 * to retry.
3616 	 */
3617 	pptr->changed = 0;
3618 
3619 	/*
3620 	 * Reset the config_stop time. Although we're not actually configuring
3621 	 * anything here, we do want some indication of when to give up trying
3622 	 * if we can't communicate with the expander.
3623 	 */
3624 	pptr->config_stop = ddi_get_lbolt() +
3625 	    drv_usectohz(PMCS_MAX_CONFIG_TIME);
3626 
3627 	/*
3628 	 * Step 2: Figure out how many phys are in this expander. If
3629 	 * pmcs_expander_get_nphy returns 0 we ran out of resources,
3630 	 * so reschedule and try later. If it returns another error,
3631 	 * just return.
3632 	 */
3633 	nphy = pmcs_expander_get_nphy(pwp, pptr);
3634 	if (nphy <= 0) {
3635 		if ((nphy == 0) && (ddi_get_lbolt() < pptr->config_stop)) {
3636 			PHY_CHANGED(pwp, pptr);
3637 			RESTART_DISCOVERY(pwp);
3638 		} else {
3639 			pptr->config_stop = 0;
3640 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3641 			    "%s: Retries exhausted for %s, killing", __func__,
3642 			    pptr->path);
3643 			pmcs_kill_changed(pwp, pptr, 0);
3644 		}
3645 		return;
3646 	}
3647 
3648 	/*
3649 	 * Step 3: If the number of phys don't agree, kill the old sub-tree.
3650 	 */
3651 	if (nphy != pptr->ncphy) {
3652 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3653 		    "%s: number of contained phys for %s changed from %d to %d",
3654 		    __func__, pptr->path, pptr->ncphy, nphy);
3655 		/*
3656 		 * Force a rescan of this expander after dead contents
3657 		 * are cleared and removed.
3658 		 */
3659 		pmcs_kill_changed(pwp, pptr, 0);
3660 		return;
3661 	}
3662 
3663 	/*
3664 	 * Step 4: if we're at the bottom of the stack, we're done
3665 	 * (we can't have any levels below us)
3666 	 */
3667 	if (pptr->level == PMCS_MAX_XPND-1) {
3668 		return;
3669 	}
3670 
3671 	/*
3672 	 * Step 5: Discover things about each phy in this expander.  We do
3673 	 * this by walking the current list of contained phys and doing a
3674 	 * content discovery for it to a local phy.
3675 	 */
3676 	ctmp = pptr->children;
3677 	ASSERT(ctmp);
3678 	if (ctmp == NULL) {
3679 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3680 		    "%s: No children attached to expander @ %s?", __func__,
3681 		    pptr->path);
3682 		return;
3683 	}
3684 
3685 	while (ctmp) {
3686 		/*
3687 		 * Allocate a local PHY to contain the proposed new contents
3688 		 * and link it to the rest of the local PHYs so that they
3689 		 * can all be freed later.
3690 		 */
3691 		local = pmcs_clone_phy(ctmp);
3692 
3693 		if (local_list == NULL) {
3694 			local_list = local;
3695 			local_tail = local;
3696 		} else {
3697 			local_tail->sibling = local;
3698 			local_tail = local;
3699 		}
3700 
3701 		/*
3702 		 * Need to lock the local PHY since pmcs_expander_content_
3703 		 * discovery may call pmcs_clear_phy on it, which expects
3704 		 * the PHY to be locked.
3705 		 */
3706 		pmcs_lock_phy(local);
3707 		result = pmcs_expander_content_discover(pwp, pptr, local);
3708 		pmcs_unlock_phy(local);
3709 		if (result <= 0) {
3710 			if (ddi_get_lbolt() < pptr->config_stop) {
3711 				PHY_CHANGED(pwp, pptr);
3712 				RESTART_DISCOVERY(pwp);
3713 			} else {
3714 				pptr->config_stop = 0;
3715 				pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3716 				    "%s: Retries exhausted for %s, killing",
3717 				    __func__, pptr->path);
3718 				pmcs_kill_changed(pwp, pptr, 0);
3719 			}
3720 
3721 			/*
3722 			 * Release all the local PHYs that we allocated.
3723 			 */
3724 			pmcs_free_phys(pwp, local_list);
3725 			return;
3726 		}
3727 
3728 		ctmp = ctmp->sibling;
3729 	}
3730 
3731 	/*
3732 	 * Step 6: Compare the local PHY's contents to our current PHY.  If
3733 	 * there are changes, take the appropriate action.
3734 	 * This is done in two steps (step 5 above, and 6 here) so that if we
3735 	 * have to bail during this process (e.g. pmcs_expander_content_discover
3736 	 * fails), we haven't actually changed the state of any of the real
3737 	 * PHYs.  Next time we come through here, we'll be starting over from
3738 	 * scratch.  This keeps us from marking a changed PHY as no longer
3739 	 * changed, but then having to bail only to come back next time and
3740 	 * think that the PHY hadn't changed.  If this were to happen, we
3741 	 * would fail to properly configure the device behind this PHY.
3742 	 */
3743 	local = local_list;
3744 	ctmp = pptr->children;
3745 
3746 	while (ctmp) {
3747 		changed = B_FALSE;
3748 		kill_changed = B_FALSE;
3749 
3750 		/*
3751 		 * We set local to local_list prior to this loop so that we
3752 		 * can simply walk the local_list while we walk this list.  The
3753 		 * two lists should be completely in sync.
3754 		 *
3755 		 * Clear the changed flag here.
3756 		 */
3757 		ctmp->changed = 0;
3758 
3759 		if (ctmp->dtype != local->dtype) {
3760 			if (ctmp->dtype != NOTHING) {
3761 				pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
3762 				    "%s: %s type changed from %s to %s "
3763 				    "(killing)", __func__, ctmp->path,
3764 				    PHY_TYPE(ctmp), PHY_TYPE(local));
3765 				/*
3766 				 * Force a rescan of this expander after dead
3767 				 * contents are cleared and removed.
3768 				 */
3769 				changed = B_TRUE;
3770 				kill_changed = B_TRUE;
3771 			} else {
3772 				changed = B_TRUE;
3773 				pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
3774 				    "%s: %s type changed from NOTHING to %s",
3775 				    __func__, ctmp->path, PHY_TYPE(local));
3776 				/*
3777 				 * Since this PHY was nothing and is now
3778 				 * something, reset the config_stop timer.
3779 				 */
3780 				ctmp->config_stop = ddi_get_lbolt() +
3781 				    drv_usectohz(PMCS_MAX_CONFIG_TIME);
3782 			}
3783 
3784 		} else if (ctmp->atdt != local->atdt) {
3785 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, "%s: "
3786 			    "%s attached device type changed from %d to %d "
3787 			    "(killing)", __func__, ctmp->path, ctmp->atdt,
3788 			    local->atdt);
3789 			/*
3790 			 * Force a rescan of this expander after dead
3791 			 * contents are cleared and removed.
3792 			 */
3793 			changed = B_TRUE;
3794 
3795 			if (local->atdt == 0) {
3796 				kill_changed = B_TRUE;
3797 			}
3798 		} else if (ctmp->link_rate != local->link_rate) {
3799 			pmcs_prt(pwp, PMCS_PRT_INFO, ctmp, NULL, "%s: %s "
3800 			    "changed speed from %s to %s", __func__, ctmp->path,
3801 			    pmcs_get_rate(ctmp->link_rate),
3802 			    pmcs_get_rate(local->link_rate));
3803 			/* If the speed changed from invalid, force rescan */
3804 			if (!PMCS_VALID_LINK_RATE(ctmp->link_rate)) {
3805 				changed = B_TRUE;
3806 				RESTART_DISCOVERY(pwp);
3807 			} else {
3808 				/* Just update to the new link rate */
3809 				ctmp->link_rate = local->link_rate;
3810 			}
3811 
3812 			if (!PMCS_VALID_LINK_RATE(local->link_rate)) {
3813 				kill_changed = B_TRUE;
3814 			}
3815 		} else if (memcmp(ctmp->sas_address, local->sas_address,
3816 		    sizeof (ctmp->sas_address)) != 0) {
3817 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
3818 			    "%s: SAS Addr for %s changed from " SAS_ADDR_FMT
3819 			    "to " SAS_ADDR_FMT " (kill old tree)", __func__,
3820 			    ctmp->path, SAS_ADDR_PRT(ctmp->sas_address),
3821 			    SAS_ADDR_PRT(local->sas_address));
3822 			/*
3823 			 * Force a rescan of this expander after dead
3824 			 * contents are cleared and removed.
3825 			 */
3826 			changed = B_TRUE;
3827 		} else {
3828 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
3829 			    "%s: %s looks the same (type %s)",
3830 			    __func__, ctmp->path, PHY_TYPE(ctmp));
3831 			/*
3832 			 * If EXPANDER, still mark it changed so we
3833 			 * re-evaluate its contents.  If it's not an expander,
3834 			 * but it hasn't been configured, also mark it as
3835 			 * changed so that it will undergo configuration.
3836 			 */
3837 			if (ctmp->dtype == EXPANDER) {
3838 				changed = B_TRUE;
3839 			} else if ((ctmp->dtype != NOTHING) &&
3840 			    !ctmp->configured) {
3841 				ctmp->changed = 1;
3842 			} else {
3843 				/* It simply hasn't changed */
3844 				ctmp->changed = 0;
3845 			}
3846 		}
3847 
3848 		/*
3849 		 * If the PHY changed, call pmcs_kill_changed if indicated,
3850 		 * update its contents to reflect its current state and mark it
3851 		 * as changed.
3852 		 */
3853 		if (changed) {
3854 			/*
3855 			 * pmcs_kill_changed will mark the PHY as changed, so
3856 			 * only do PHY_CHANGED if we did not do kill_changed.
3857 			 */
3858 			if (kill_changed) {
3859 				pmcs_kill_changed(pwp, ctmp, 0);
3860 			} else {
3861 				/*
3862 				 * If we're not killing the device, it's not
3863 				 * dead.  Mark the PHY as changed.
3864 				 */
3865 				PHY_CHANGED(pwp, ctmp);
3866 
3867 				if (ctmp->dead) {
3868 					pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
3869 					    ctmp, NULL, "%s: Unmarking PHY %s "
3870 					    "dead, restarting discovery",
3871 					    __func__, ctmp->path);
3872 					ctmp->dead = 0;
3873 					RESTART_DISCOVERY(pwp);
3874 				}
3875 			}
3876 
3877 			/*
3878 			 * If the dtype of this PHY is now NOTHING, mark it as
3879 			 * unconfigured.  Set pend_dtype to what the new dtype
3880 			 * is.  It'll get updated at the end of the discovery
3881 			 * process.
3882 			 */
3883 			if (local->dtype == NOTHING) {
3884 				bzero(ctmp->sas_address,
3885 				    sizeof (local->sas_address));
3886 				ctmp->atdt = 0;
3887 				ctmp->link_rate = 0;
3888 				ctmp->pend_dtype = NOTHING;
3889 				ctmp->configured = 0;
3890 			} else {
3891 				(void) memcpy(ctmp->sas_address,
3892 				    local->sas_address,
3893 				    sizeof (local->sas_address));
3894 				ctmp->atdt = local->atdt;
3895 				ctmp->link_rate = local->link_rate;
3896 				ctmp->pend_dtype = local->dtype;
3897 			}
3898 		}
3899 
3900 		local = local->sibling;
3901 		ctmp = ctmp->sibling;
3902 	}
3903 
3904 	/*
3905 	 * If we got to here, that means we were able to see all the PHYs
3906 	 * and we can now update all of the real PHYs with the information
3907 	 * we got on the local PHYs.  Once that's done, free all the local
3908 	 * PHYs.
3909 	 */
3910 
3911 	pmcs_free_phys(pwp, local_list);
3912 }
3913 
3914 /*
3915  * Top level routine to check expanders.  We call pmcs_check_expander for
3916  * each expander.  Since we're not doing any configuration right now, it
3917  * doesn't matter if this is breadth-first.
3918  */
3919 static void
3920 pmcs_check_expanders(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
3921 {
3922 	pmcs_phy_t *phyp, *pnext, *pchild;
3923 
3924 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3925 	    "%s: %s", __func__, pptr->path);
3926 
3927 	/*
3928 	 * Check each expander at this level
3929 	 */
3930 	phyp = pptr;
3931 	while (phyp) {
3932 		pmcs_lock_phy(phyp);
3933 
3934 		if ((phyp->dtype == EXPANDER) && phyp->changed &&
3935 		    !phyp->dead && !phyp->subsidiary &&
3936 		    phyp->configured) {
3937 			pmcs_check_expander(pwp, phyp);
3938 		}
3939 
3940 		pnext = phyp->sibling;
3941 		pmcs_unlock_phy(phyp);
3942 		phyp = pnext;
3943 	}
3944 
3945 	/*
3946 	 * Now check the children
3947 	 */
3948 	phyp = pptr;
3949 	while (phyp) {
3950 		pmcs_lock_phy(phyp);
3951 		pnext = phyp->sibling;
3952 		pchild = phyp->children;
3953 		pmcs_unlock_phy(phyp);
3954 
3955 		if (pchild) {
3956 			pmcs_check_expanders(pwp, pchild);
3957 		}
3958 
3959 		phyp = pnext;
3960 	}
3961 }
3962 
3963 /*
3964  * Called with softstate and PHY locked
3965  */
3966 static void
3967 pmcs_clear_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, int level)
3968 {
3969 	pmcs_phy_t *ctmp;
3970 
3971 	ASSERT(mutex_owned(&pwp->lock));
3972 	ASSERT(mutex_owned(&pptr->phy_lock));
3973 	ASSERT(pptr->level < PMCS_MAX_XPND - 1);
3974 
3975 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3976 	    "%s: checking %s", __func__, pptr->path);
3977 
3978 	ctmp = pptr->children;
3979 	while (ctmp) {
3980 		/*
3981 		 * If the expander is dead, mark its children dead
3982 		 */
3983 		if (pptr->dead) {
3984 			ctmp->dead = 1;
3985 		}
3986 		if (ctmp->dtype == EXPANDER) {
3987 			pmcs_clear_expander(pwp, ctmp, level + 1);
3988 		}
3989 		ctmp = ctmp->sibling;
3990 	}
3991 
3992 	/*
3993 	 * If this expander is not dead, we're done here.
3994 	 */
3995 	if (!pptr->dead) {
3996 		return;
3997 	}
3998 
3999 	/*
4000 	 * Now snip out the list of children below us and release them
4001 	 */
4002 	if (pptr->children) {
4003 		pmcs_add_dead_phys(pwp, pptr->children);
4004 	}
4005 
4006 	pptr->children = NULL;
4007 
4008 	/*
4009 	 * Clear subsidiary phys as well.  Getting the parent's PHY lock
4010 	 * is only necessary if level == 0 since otherwise the parent is
4011 	 * already locked.
4012 	 */
4013 	if (!IS_ROOT_PHY(pptr)) {
4014 		if (level == 0) {
4015 			mutex_enter(&pptr->parent->phy_lock);
4016 		}
4017 		ctmp = pptr->parent->children;
4018 		if (level == 0) {
4019 			mutex_exit(&pptr->parent->phy_lock);
4020 		}
4021 	} else {
4022 		ctmp = pwp->root_phys;
4023 	}
4024 
4025 	while (ctmp) {
4026 		if (ctmp == pptr) {
4027 			ctmp = ctmp->sibling;
4028 			continue;
4029 		}
4030 		/*
4031 		 * We only need to lock subsidiary PHYs on the level 0
4032 		 * expander.  Any children of that expander, subsidiaries or
4033 		 * not, will already be locked.
4034 		 */
4035 		if (level == 0) {
4036 			pmcs_lock_phy(ctmp);
4037 		}
4038 		if (ctmp->dtype != EXPANDER || ctmp->subsidiary == 0 ||
4039 		    memcmp(ctmp->sas_address, pptr->sas_address,
4040 		    sizeof (ctmp->sas_address)) != 0) {
4041 			if (level == 0) {
4042 				pmcs_unlock_phy(ctmp);
4043 			}
4044 			ctmp = ctmp->sibling;
4045 			continue;
4046 		}
4047 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
4048 		    "%s: subsidiary %s", __func__, ctmp->path);
4049 		pmcs_clear_phy(pwp, ctmp);
4050 		if (level == 0) {
4051 			pmcs_unlock_phy(ctmp);
4052 		}
4053 		ctmp = ctmp->sibling;
4054 	}
4055 
4056 	pmcs_clear_phy(pwp, pptr);
4057 }
4058 
4059 /*
4060  * Called with PHY locked and with scratch acquired. We return 0 if
4061  * we fail to allocate resources or notice that the configuration
4062  * count changed while we were running the command. We return
4063  * less than zero if we had an I/O error or received an unsupported
4064  * configuration. Otherwise we return the number of phys in the
4065  * expander.
4066  */
4067 #define	DFM(m, y) if (m == NULL) m = y
4068 static int
4069 pmcs_expander_get_nphy(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
4070 {
4071 	struct pmcwork *pwrk;
4072 	char buf[64];
4073 	const uint_t rdoff = 0x100;	/* returned data offset */
4074 	smp_response_frame_t *srf;
4075 	smp_report_general_resp_t *srgr;
4076 	uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status, ival;
4077 	int result = 0;
4078 
4079 	ival = 0x40001100;
4080 
4081 again:
4082 	if (!pptr->iport || !pptr->valid_device_id) {
4083 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target,
4084 		    "%s: Can't reach PHY %s", __func__, pptr->path);
4085 		goto out;
4086 	}
4087 
4088 	pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
4089 	if (pwrk == NULL) {
4090 		goto out;
4091 	}
4092 	(void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE);
4093 	pwrk->arg = pwp->scratch;
4094 	pwrk->dtype = pptr->dtype;
4095 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4096 	ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4097 	if (ptr == NULL) {
4098 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4099 		pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, NULL,
4100 		    "%s: GET_IQ_ENTRY failed", __func__);
4101 		pmcs_pwork(pwp, pwrk);
4102 		goto out;
4103 	}
4104 
4105 	msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST));
4106 	msg[1] = LE_32(pwrk->htag);
4107 	msg[2] = LE_32(pptr->device_id);
4108 	msg[3] = LE_32((4 << SMP_REQUEST_LENGTH_SHIFT) | SMP_INDIRECT_RESPONSE);
4109 	/*
4110 	 * Send SMP REPORT GENERAL (of either SAS1.1 or SAS2 flavors).
4111 	 */
4112 	msg[4] = BE_32(ival);
4113 	msg[5] = 0;
4114 	msg[6] = 0;
4115 	msg[7] = 0;
4116 	msg[8] = 0;
4117 	msg[9] = 0;
4118 	msg[10] = 0;
4119 	msg[11] = 0;
4120 	msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff));
4121 	msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff));
4122 	msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff);
4123 	msg[15] = 0;
4124 
4125 	COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
4126 
4127 	/* SMP serialization */
4128 	pmcs_smp_acquire(pptr->iport);
4129 
4130 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
4131 	htag = pwrk->htag;
4132 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4133 
4134 	pmcs_unlock_phy(pptr);
4135 	WAIT_FOR(pwrk, 1000, result);
4136 	/* Release SMP lock before reacquiring PHY lock */
4137 	pmcs_smp_release(pptr->iport);
4138 	pmcs_lock_phy(pptr);
4139 
4140 	pmcs_pwork(pwp, pwrk);
4141 
4142 	mutex_enter(&pwp->config_lock);
4143 	if (pwp->config_changed) {
4144 		RESTART_DISCOVERY_LOCKED(pwp);
4145 		mutex_exit(&pwp->config_lock);
4146 		result = 0;
4147 		goto out;
4148 	}
4149 	mutex_exit(&pwp->config_lock);
4150 
4151 	if (result) {
4152 		pmcs_timed_out(pwp, htag, __func__);
4153 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4154 		    "%s: Issuing SMP ABORT for htag 0x%08x", __func__, htag);
4155 		if (pmcs_abort(pwp, pptr, htag, 0, 0)) {
4156 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4157 			    "%s: Unable to issue SMP ABORT for htag 0x%08x",
4158 			    __func__, htag);
4159 		} else {
4160 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4161 			    "%s: Issuing SMP ABORT for htag 0x%08x",
4162 			    __func__, htag);
4163 		}
4164 		result = 0;
4165 		goto out;
4166 	}
4167 	ptr = (void *)pwp->scratch;
4168 	status = LE_32(ptr[2]);
4169 	if (status == PMCOUT_STATUS_UNDERFLOW ||
4170 	    status == PMCOUT_STATUS_OVERFLOW) {
4171 		pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL,
4172 		    "%s: over/underflow", __func__);
4173 		status = PMCOUT_STATUS_OK;
4174 	}
4175 	srf = (smp_response_frame_t *)&((uint32_t *)pwp->scratch)[rdoff >> 2];
4176 	srgr = (smp_report_general_resp_t *)
4177 	    &((uint32_t *)pwp->scratch)[(rdoff >> 2)+1];
4178 
4179 	if (status != PMCOUT_STATUS_OK) {
4180 		char *nag = NULL;
4181 		(void) snprintf(buf, sizeof (buf),
4182 		    "%s: SMP op failed (0x%x)", __func__, status);
4183 		switch (status) {
4184 		case PMCOUT_STATUS_IO_PORT_IN_RESET:
4185 			DFM(nag, "I/O Port In Reset");
4186 			/* FALLTHROUGH */
4187 		case PMCOUT_STATUS_ERROR_HW_TIMEOUT:
4188 			DFM(nag, "Hardware Timeout");
4189 			/* FALLTHROUGH */
4190 		case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE:
4191 			DFM(nag, "Internal SMP Resource Failure");
4192 			/* FALLTHROUGH */
4193 		case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
4194 			DFM(nag, "PHY Not Ready");
4195 			/* FALLTHROUGH */
4196 		case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
4197 			DFM(nag, "Connection Rate Not Supported");
4198 			/* FALLTHROUGH */
4199 		case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
4200 			DFM(nag, "Open Retry Timeout");
4201 			/* FALLTHROUGH */
4202 		case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
4203 			DFM(nag, "HW Resource Busy");
4204 			/* FALLTHROUGH */
4205 		case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR:
4206 			DFM(nag, "Response Connection Error");
4207 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4208 			    "%s: expander %s SMP operation failed (%s)",
4209 			    __func__, pptr->path, nag);
4210 			break;
4211 
4212 		/*
4213 		 * For the IO_DS_NON_OPERATIONAL case, we need to kick off
4214 		 * device state recovery and return 0 so that the caller
4215 		 * doesn't assume this expander is dead for good.
4216 		 */
4217 		case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: {
4218 			pmcs_xscsi_t *xp = pptr->target;
4219 
4220 			pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, pptr, xp,
4221 			    "%s: expander %s device state non-operational",
4222 			    __func__, pptr->path);
4223 
4224 			if (xp == NULL) {
4225 				/*
4226 				 * Kick off recovery right now.
4227 				 */
4228 				SCHEDULE_WORK(pwp, PMCS_WORK_DS_ERR_RECOVERY);
4229 				(void) ddi_taskq_dispatch(pwp->tq, pmcs_worker,
4230 				    pwp, DDI_NOSLEEP);
4231 			} else {
4232 				mutex_enter(&xp->statlock);
4233 				pmcs_start_dev_state_recovery(xp, pptr);
4234 				mutex_exit(&xp->statlock);
4235 			}
4236 
4237 			break;
4238 		}
4239 
4240 		default:
4241 			pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr);
4242 			result = -EIO;
4243 			break;
4244 		}
4245 	} else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) {
4246 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4247 		    "%s: bad response frame type 0x%x",
4248 		    __func__, srf->srf_frame_type);
4249 		result = -EINVAL;
4250 	} else if (srf->srf_function != SMP_FUNC_REPORT_GENERAL) {
4251 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4252 		    "%s: bad response function 0x%x",
4253 		    __func__, srf->srf_function);
4254 		result = -EINVAL;
4255 	} else if (srf->srf_result != 0) {
4256 		/*
4257 		 * Check to see if we have a value of 3 for failure and
4258 		 * whether we were using a SAS2.0 allocation length value
4259 		 * and retry without it.
4260 		 */
4261 		if (srf->srf_result == 3 && (ival & 0xff00)) {
4262 			ival &= ~0xff00;
4263 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4264 			    "%s: err 0x%x with SAS2 request- retry with SAS1",
4265 			    __func__, srf->srf_result);
4266 			goto again;
4267 		}
4268 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4269 		    "%s: bad response 0x%x", __func__, srf->srf_result);
4270 		result = -EINVAL;
4271 	} else if (srgr->srgr_configuring) {
4272 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4273 		    "%s: expander at phy %s is still configuring",
4274 		    __func__, pptr->path);
4275 		result = 0;
4276 	} else {
4277 		result = srgr->srgr_number_of_phys;
4278 		if (ival & 0xff00) {
4279 			pptr->tolerates_sas2 = 1;
4280 		}
4281 		/*
4282 		 * Save off the REPORT_GENERAL response
4283 		 */
4284 		bcopy(srgr, &pptr->rg_resp, sizeof (smp_report_general_resp_t));
4285 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4286 		    "%s has %d phys and %s SAS2", pptr->path, result,
4287 		    pptr->tolerates_sas2? "tolerates" : "does not tolerate");
4288 	}
4289 out:
4290 	return (result);
4291 }
4292 
4293 /*
4294  * Called with expander locked (and thus, pptr) as well as all PHYs up to
4295  * the root, and scratch acquired. Return 0 if we fail to allocate resources
4296  * or notice that the configuration changed while we were running the command.
4297  *
4298  * We return less than zero if we had an I/O error or received an
4299  * unsupported configuration.
4300  */
4301 static int
4302 pmcs_expander_content_discover(pmcs_hw_t *pwp, pmcs_phy_t *expander,
4303     pmcs_phy_t *pptr)
4304 {
4305 	struct pmcwork *pwrk;
4306 	char buf[64];
4307 	uint8_t sas_address[8];
4308 	uint8_t att_sas_address[8];
4309 	smp_response_frame_t *srf;
4310 	smp_discover_resp_t *sdr;
4311 	const uint_t rdoff = 0x100;	/* returned data offset */
4312 	uint8_t *roff;
4313 	uint32_t status, *ptr, msg[PMCS_MSG_SIZE], htag;
4314 	int result = 0;
4315 	uint8_t	ini_support;
4316 	uint8_t	tgt_support;
4317 
4318 	if (!expander->iport || !expander->valid_device_id) {
4319 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, expander, expander->target,
4320 		    "%s: Can't reach PHY %s", __func__, expander->path);
4321 		goto out;
4322 	}
4323 
4324 	pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, expander);
4325 	if (pwrk == NULL) {
4326 		goto out;
4327 	}
4328 	(void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE);
4329 	pwrk->arg = pwp->scratch;
4330 	pwrk->dtype = expander->dtype;
4331 	msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST));
4332 	msg[1] = LE_32(pwrk->htag);
4333 	msg[2] = LE_32(expander->device_id);
4334 	msg[3] = LE_32((12 << SMP_REQUEST_LENGTH_SHIFT) |
4335 	    SMP_INDIRECT_RESPONSE);
4336 	/*
4337 	 * Send SMP DISCOVER (of either SAS1.1 or SAS2 flavors).
4338 	 */
4339 	if (expander->tolerates_sas2) {
4340 		msg[4] = BE_32(0x40101B00);
4341 	} else {
4342 		msg[4] = BE_32(0x40100000);
4343 	}
4344 	msg[5] = 0;
4345 	msg[6] = BE_32((pptr->phynum << 16));
4346 	msg[7] = 0;
4347 	msg[8] = 0;
4348 	msg[9] = 0;
4349 	msg[10] = 0;
4350 	msg[11] = 0;
4351 	msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff));
4352 	msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff));
4353 	msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff);
4354 	msg[15] = 0;
4355 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4356 	ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4357 	if (ptr == NULL) {
4358 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4359 		goto out;
4360 	}
4361 
4362 	COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
4363 
4364 	/* SMP serialization */
4365 	pmcs_smp_acquire(expander->iport);
4366 
4367 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
4368 	htag = pwrk->htag;
4369 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4370 
4371 	/*
4372 	 * Drop PHY lock while waiting so other completions aren't potentially
4373 	 * blocked.
4374 	 */
4375 	pmcs_unlock_phy(expander);
4376 	WAIT_FOR(pwrk, 1000, result);
4377 	/* Release SMP lock before reacquiring PHY lock */
4378 	pmcs_smp_release(expander->iport);
4379 	pmcs_lock_phy(expander);
4380 
4381 	pmcs_pwork(pwp, pwrk);
4382 
4383 	mutex_enter(&pwp->config_lock);
4384 	if (pwp->config_changed) {
4385 		RESTART_DISCOVERY_LOCKED(pwp);
4386 		mutex_exit(&pwp->config_lock);
4387 		result = 0;
4388 		goto out;
4389 	}
4390 	mutex_exit(&pwp->config_lock);
4391 
4392 	if (result) {
4393 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
4394 		if (pmcs_abort(pwp, expander, htag, 0, 0)) {
4395 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4396 			    "%s: Unable to issue SMP ABORT for htag 0x%08x",
4397 			    __func__, htag);
4398 		} else {
4399 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4400 			    "%s: Issuing SMP ABORT for htag 0x%08x",
4401 			    __func__, htag);
4402 		}
4403 		result = -ETIMEDOUT;
4404 		goto out;
4405 	}
4406 	ptr = (void *)pwp->scratch;
4407 	/*
4408 	 * Point roff to the DMA offset for returned data
4409 	 */
4410 	roff = pwp->scratch;
4411 	roff += rdoff;
4412 	srf = (smp_response_frame_t *)roff;
4413 	sdr = (smp_discover_resp_t *)(roff+4);
4414 	status = LE_32(ptr[2]);
4415 	if (status == PMCOUT_STATUS_UNDERFLOW ||
4416 	    status == PMCOUT_STATUS_OVERFLOW) {
4417 		pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL,
4418 		    "%s: over/underflow", __func__);
4419 		status = PMCOUT_STATUS_OK;
4420 	}
4421 	if (status != PMCOUT_STATUS_OK) {
4422 		char *nag = NULL;
4423 		(void) snprintf(buf, sizeof (buf),
4424 		    "%s: SMP op failed (0x%x)", __func__, status);
4425 		switch (status) {
4426 		case PMCOUT_STATUS_ERROR_HW_TIMEOUT:
4427 			DFM(nag, "Hardware Timeout");
4428 			/* FALLTHROUGH */
4429 		case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE:
4430 			DFM(nag, "Internal SMP Resource Failure");
4431 			/* FALLTHROUGH */
4432 		case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
4433 			DFM(nag, "PHY Not Ready");
4434 			/* FALLTHROUGH */
4435 		case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
4436 			DFM(nag, "Connection Rate Not Supported");
4437 			/* FALLTHROUGH */
4438 		case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
4439 			DFM(nag, "Open Retry Timeout");
4440 			/* FALLTHROUGH */
4441 		case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
4442 			DFM(nag, "HW Resource Busy");
4443 			/* FALLTHROUGH */
4444 		case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR:
4445 			DFM(nag, "Response Connection Error");
4446 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4447 			    "%s: expander %s SMP operation failed (%s)",
4448 			    __func__, pptr->path, nag);
4449 			break;
4450 		default:
4451 			pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr);
4452 			result = -EIO;
4453 			break;
4454 		}
4455 		goto out;
4456 	} else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) {
4457 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4458 		    "%s: bad response frame type 0x%x",
4459 		    __func__, srf->srf_frame_type);
4460 		result = -EINVAL;
4461 		goto out;
4462 	} else if (srf->srf_function != SMP_FUNC_DISCOVER) {
4463 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4464 		    "%s: bad response function 0x%x",
4465 		    __func__, srf->srf_function);
4466 		result = -EINVAL;
4467 		goto out;
4468 	} else if (srf->srf_result != SMP_RES_FUNCTION_ACCEPTED) {
4469 		result = pmcs_smp_function_result(pwp, srf);
4470 		/* Need not fail if PHY is Vacant */
4471 		if (result != SMP_RES_PHY_VACANT) {
4472 			result = -EINVAL;
4473 			goto out;
4474 		}
4475 	}
4476 
4477 	/*
4478 	 * Save off the DISCOVER response
4479 	 */
4480 	bcopy(sdr, &pptr->disc_resp, sizeof (smp_discover_resp_t));
4481 
4482 	ini_support = (sdr->sdr_attached_sata_host |
4483 	    (sdr->sdr_attached_smp_initiator << 1) |
4484 	    (sdr->sdr_attached_stp_initiator << 2) |
4485 	    (sdr->sdr_attached_ssp_initiator << 3));
4486 
4487 	tgt_support = (sdr->sdr_attached_sata_device |
4488 	    (sdr->sdr_attached_smp_target << 1) |
4489 	    (sdr->sdr_attached_stp_target << 2) |
4490 	    (sdr->sdr_attached_ssp_target << 3));
4491 
4492 	pmcs_wwn2barray(BE_64(sdr->sdr_sas_addr), sas_address);
4493 	pmcs_wwn2barray(BE_64(sdr->sdr_attached_sas_addr), att_sas_address);
4494 
4495 	/*
4496 	 * Set the routing attribute regardless of the PHY type.
4497 	 */
4498 	pptr->routing_attr = sdr->sdr_routing_attr;
4499 
4500 	switch (sdr->sdr_attached_device_type) {
4501 	case SAS_IF_DTYPE_ENDPOINT:
4502 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4503 		    "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS="
4504 		    SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x",
4505 		    pptr->path,
4506 		    sdr->sdr_attached_device_type,
4507 		    sdr->sdr_negotiated_logical_link_rate,
4508 		    ini_support,
4509 		    tgt_support,
4510 		    SAS_ADDR_PRT(sas_address),
4511 		    SAS_ADDR_PRT(att_sas_address),
4512 		    sdr->sdr_attached_phy_identifier);
4513 
4514 		if (sdr->sdr_attached_sata_device ||
4515 		    sdr->sdr_attached_stp_target) {
4516 			pptr->dtype = SATA;
4517 		} else if (sdr->sdr_attached_ssp_target) {
4518 			pptr->dtype = SAS;
4519 		} else if (tgt_support || ini_support) {
4520 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4521 			    "%s: %s has tgt support=%x init support=(%x)",
4522 			    __func__, pptr->path, tgt_support, ini_support);
4523 		}
4524 
4525 		switch (pptr->routing_attr) {
4526 		case SMP_ROUTING_SUBTRACTIVE:
4527 		case SMP_ROUTING_TABLE:
4528 		case SMP_ROUTING_DIRECT:
4529 			pptr->routing_method = SMP_ROUTING_DIRECT;
4530 			break;
4531 		default:
4532 			pptr->routing_method = 0xff;	/* Invalid method */
4533 			break;
4534 		}
4535 		pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum),
4536 		    (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE);
4537 		break;
4538 	case SAS_IF_DTYPE_EDGE:
4539 	case SAS_IF_DTYPE_FANOUT:
4540 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4541 		    "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS="
4542 		    SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x",
4543 		    pptr->path,
4544 		    sdr->sdr_attached_device_type,
4545 		    sdr->sdr_negotiated_logical_link_rate,
4546 		    ini_support,
4547 		    tgt_support,
4548 		    SAS_ADDR_PRT(sas_address),
4549 		    SAS_ADDR_PRT(att_sas_address),
4550 		    sdr->sdr_attached_phy_identifier);
4551 		if (sdr->sdr_attached_smp_target) {
4552 			/*
4553 			 * Avoid configuring phys that just point back
4554 			 * at a parent phy
4555 			 */
4556 			if (expander->parent &&
4557 			    memcmp(expander->parent->sas_address,
4558 			    att_sas_address,
4559 			    sizeof (expander->parent->sas_address)) == 0) {
4560 				pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, NULL,
4561 				    "%s: skipping port back to parent "
4562 				    "expander (%s)", __func__, pptr->path);
4563 				pptr->dtype = NOTHING;
4564 				break;
4565 			}
4566 			pptr->dtype = EXPANDER;
4567 
4568 		} else if (tgt_support || ini_support) {
4569 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4570 			    "%s has tgt support=%x init support=(%x)",
4571 			    pptr->path, tgt_support, ini_support);
4572 			pptr->dtype = EXPANDER;
4573 		}
4574 		if (pptr->routing_attr == SMP_ROUTING_DIRECT) {
4575 			pptr->routing_method = 0xff;	/* Invalid method */
4576 		} else {
4577 			pptr->routing_method = pptr->routing_attr;
4578 		}
4579 		pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum),
4580 		    (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE);
4581 		break;
4582 	default:
4583 		pptr->dtype = NOTHING;
4584 		break;
4585 	}
4586 	if (pptr->dtype != NOTHING) {
4587 		pmcs_phy_t *ctmp;
4588 
4589 		/*
4590 		 * If the attached device is a SATA device and the expander
4591 		 * is (possibly) a SAS2 compliant expander, check for whether
4592 		 * there is a NAA=5 WWN field starting at this offset and
4593 		 * use that for the SAS Address for this device.
4594 		 */
4595 		if (expander->tolerates_sas2 && pptr->dtype == SATA &&
4596 		    (roff[SAS_ATTACHED_NAME_OFFSET] >> 8) == NAA_IEEE_REG) {
4597 			(void) memcpy(pptr->sas_address,
4598 			    &roff[SAS_ATTACHED_NAME_OFFSET], 8);
4599 		} else {
4600 			(void) memcpy(pptr->sas_address, att_sas_address, 8);
4601 		}
4602 		pptr->atdt = (sdr->sdr_attached_device_type);
4603 		/*
4604 		 * Now run up from the expander's parent up to the top to
4605 		 * make sure we only use the least common link_rate.
4606 		 */
4607 		for (ctmp = expander->parent; ctmp; ctmp = ctmp->parent) {
4608 			if (ctmp->link_rate <
4609 			    sdr->sdr_negotiated_logical_link_rate) {
4610 				pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4611 				    "%s: derating link rate from %x to %x due "
4612 				    "to %s being slower", pptr->path,
4613 				    sdr->sdr_negotiated_logical_link_rate,
4614 				    ctmp->link_rate,
4615 				    ctmp->path);
4616 				sdr->sdr_negotiated_logical_link_rate =
4617 				    ctmp->link_rate;
4618 			}
4619 		}
4620 		pptr->link_rate = sdr->sdr_negotiated_logical_link_rate;
4621 		pptr->state.prog_min_rate = sdr->sdr_prog_min_phys_link_rate;
4622 		pptr->state.hw_min_rate = sdr->sdr_hw_min_phys_link_rate;
4623 		pptr->state.prog_max_rate = sdr->sdr_prog_max_phys_link_rate;
4624 		pptr->state.hw_max_rate = sdr->sdr_hw_max_phys_link_rate;
4625 		PHY_CHANGED(pwp, pptr);
4626 	} else {
4627 		pmcs_clear_phy(pwp, pptr);
4628 	}
4629 	result = 1;
4630 out:
4631 	return (result);
4632 }
4633 
4634 /*
4635  * Get a work structure and assign it a tag with type and serial number
4636  * If a structure is returned, it is returned locked.
4637  */
4638 pmcwork_t *
4639 pmcs_gwork(pmcs_hw_t *pwp, uint32_t tag_type, pmcs_phy_t *phyp)
4640 {
4641 	pmcwork_t *p;
4642 	uint16_t snum;
4643 	uint32_t off;
4644 
4645 	mutex_enter(&pwp->wfree_lock);
4646 	p = STAILQ_FIRST(&pwp->wf);
4647 	if (p == NULL) {
4648 		/*
4649 		 * If we couldn't get a work structure, it's time to bite
4650 		 * the bullet, grab the pfree_lock and copy over all the
4651 		 * work structures from the pending free list to the actual
4652 		 * free list (assuming it's not also empty).
4653 		 */
4654 		mutex_enter(&pwp->pfree_lock);
4655 		if (STAILQ_FIRST(&pwp->pf) == NULL) {
4656 			mutex_exit(&pwp->pfree_lock);
4657 			mutex_exit(&pwp->wfree_lock);
4658 			return (NULL);
4659 		}
4660 		pwp->wf.stqh_first = pwp->pf.stqh_first;
4661 		pwp->wf.stqh_last = pwp->pf.stqh_last;
4662 		STAILQ_INIT(&pwp->pf);
4663 		mutex_exit(&pwp->pfree_lock);
4664 
4665 		p = STAILQ_FIRST(&pwp->wf);
4666 		ASSERT(p != NULL);
4667 	}
4668 	STAILQ_REMOVE(&pwp->wf, p, pmcwork, next);
4669 	snum = pwp->wserno++;
4670 	mutex_exit(&pwp->wfree_lock);
4671 
4672 	off = p - pwp->work;
4673 
4674 	mutex_enter(&p->lock);
4675 	ASSERT(p->state == PMCS_WORK_STATE_NIL);
4676 	ASSERT(p->htag == PMCS_TAG_FREE);
4677 	p->htag = (tag_type << PMCS_TAG_TYPE_SHIFT) & PMCS_TAG_TYPE_MASK;
4678 	p->htag |= ((snum << PMCS_TAG_SERNO_SHIFT) & PMCS_TAG_SERNO_MASK);
4679 	p->htag |= ((off << PMCS_TAG_INDEX_SHIFT) & PMCS_TAG_INDEX_MASK);
4680 	p->start = gethrtime();
4681 	p->state = PMCS_WORK_STATE_READY;
4682 	p->ssp_event = 0;
4683 	p->dead = 0;
4684 
4685 	if (phyp) {
4686 		p->phy = phyp;
4687 		pmcs_inc_phy_ref_count(phyp);
4688 	}
4689 
4690 	return (p);
4691 }
4692 
4693 /*
4694  * Called with pwrk lock held.  Returned with lock released.
4695  */
4696 void
4697 pmcs_pwork(pmcs_hw_t *pwp, pmcwork_t *p)
4698 {
4699 	ASSERT(p != NULL);
4700 	ASSERT(mutex_owned(&p->lock));
4701 
4702 	p->last_ptr = p->ptr;
4703 	p->last_arg = p->arg;
4704 	p->last_phy = p->phy;
4705 	p->last_xp = p->xp;
4706 	p->last_htag = p->htag;
4707 	p->last_state = p->state;
4708 	p->finish = gethrtime();
4709 
4710 	if (p->phy) {
4711 		pmcs_dec_phy_ref_count(p->phy);
4712 	}
4713 
4714 	p->state = PMCS_WORK_STATE_NIL;
4715 	p->htag = PMCS_TAG_FREE;
4716 	p->xp = NULL;
4717 	p->ptr = NULL;
4718 	p->arg = NULL;
4719 	p->phy = NULL;
4720 	p->abt_htag = 0;
4721 	p->timer = 0;
4722 	mutex_exit(&p->lock);
4723 
4724 	if (mutex_tryenter(&pwp->wfree_lock) == 0) {
4725 		mutex_enter(&pwp->pfree_lock);
4726 		STAILQ_INSERT_TAIL(&pwp->pf, p, next);
4727 		mutex_exit(&pwp->pfree_lock);
4728 	} else {
4729 		STAILQ_INSERT_TAIL(&pwp->wf, p, next);
4730 		mutex_exit(&pwp->wfree_lock);
4731 	}
4732 }
4733 
4734 /*
4735  * Find a work structure based upon a tag and make sure that the tag
4736  * serial number matches the work structure we've found.
4737  * If a structure is found, its lock is held upon return.
4738  */
4739 pmcwork_t *
4740 pmcs_tag2wp(pmcs_hw_t *pwp, uint32_t htag)
4741 {
4742 	pmcwork_t *p;
4743 	uint32_t idx = PMCS_TAG_INDEX(htag);
4744 
4745 	p = &pwp->work[idx];
4746 
4747 	mutex_enter(&p->lock);
4748 	if (p->htag == htag) {
4749 		return (p);
4750 	}
4751 	mutex_exit(&p->lock);
4752 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL,
4753 	    "INDEX 0x%x HTAG 0x%x got p->htag 0x%x", idx, htag, p->htag);
4754 	return (NULL);
4755 }
4756 
4757 /*
4758  * Issue an abort for a command or for all commands.
4759  *
4760  * Since this can be called from interrupt context,
4761  * we don't wait for completion if wait is not set.
4762  *
4763  * Called with PHY lock held.
4764  */
4765 int
4766 pmcs_abort(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint32_t tag, int all_cmds,
4767     int wait)
4768 {
4769 	pmcwork_t *pwrk;
4770 	pmcs_xscsi_t *tgt;
4771 	uint32_t msg[PMCS_MSG_SIZE], *ptr;
4772 	int result, abt_type;
4773 	uint32_t abt_htag, status;
4774 
4775 	if (pptr->abort_all_start) {
4776 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "%s: ABORT_ALL for "
4777 		    "(%s) already in progress.", __func__, pptr->path);
4778 		return (EBUSY);
4779 	}
4780 
4781 	switch (pptr->dtype) {
4782 	case SAS:
4783 		abt_type = PMCIN_SSP_ABORT;
4784 		break;
4785 	case SATA:
4786 		abt_type = PMCIN_SATA_ABORT;
4787 		break;
4788 	case EXPANDER:
4789 		abt_type = PMCIN_SMP_ABORT;
4790 		break;
4791 	default:
4792 		return (0);
4793 	}
4794 
4795 	pwrk = pmcs_gwork(pwp, wait ? PMCS_TAG_TYPE_WAIT : PMCS_TAG_TYPE_NONE,
4796 	    pptr);
4797 
4798 	if (pwrk == NULL) {
4799 		pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__);
4800 		return (ENOMEM);
4801 	}
4802 
4803 	pwrk->dtype = pptr->dtype;
4804 	if (wait) {
4805 		pwrk->arg = msg;
4806 	}
4807 	if (pptr->valid_device_id == 0) {
4808 		pmcs_pwork(pwp, pwrk);
4809 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4810 		    "%s: Invalid DeviceID", __func__);
4811 		return (ENODEV);
4812 	}
4813 	msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, abt_type));
4814 	msg[1] = LE_32(pwrk->htag);
4815 	msg[2] = LE_32(pptr->device_id);
4816 	if (all_cmds) {
4817 		msg[3] = 0;
4818 		msg[4] = LE_32(1);
4819 		pwrk->ptr = NULL;
4820 		pptr->abort_all_start = gethrtime();
4821 	} else {
4822 		msg[3] = LE_32(tag);
4823 		msg[4] = 0;
4824 		pwrk->abt_htag = tag;
4825 	}
4826 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4827 	ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4828 	if (ptr == NULL) {
4829 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4830 		pmcs_pwork(pwp, pwrk);
4831 		pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__);
4832 		return (ENOMEM);
4833 	}
4834 
4835 	COPY_MESSAGE(ptr, msg, 5);
4836 	if (all_cmds) {
4837 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4838 		    "%s: aborting all commands for %s device %s. (htag=0x%x)",
4839 		    __func__, pmcs_get_typename(pptr->dtype), pptr->path,
4840 		    msg[1]);
4841 	} else {
4842 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4843 		    "%s: aborting tag 0x%x for %s device %s. (htag=0x%x)",
4844 		    __func__, tag, pmcs_get_typename(pptr->dtype), pptr->path,
4845 		    msg[1]);
4846 	}
4847 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
4848 
4849 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4850 	if (!wait) {
4851 		mutex_exit(&pwrk->lock);
4852 		return (0);
4853 	}
4854 
4855 	abt_htag = pwrk->htag;
4856 	pmcs_unlock_phy(pwrk->phy);
4857 	WAIT_FOR(pwrk, 1000, result);
4858 	pmcs_lock_phy(pwrk->phy);
4859 
4860 	tgt = pwrk->xp;
4861 	pmcs_pwork(pwp, pwrk);
4862 
4863 	if (tgt != NULL) {
4864 		mutex_enter(&tgt->aqlock);
4865 		if (!STAILQ_EMPTY(&tgt->aq)) {
4866 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
4867 			    "%s: Abort complete (result=0x%x), but "
4868 			    "aq not empty (tgt 0x%p), waiting",
4869 			    __func__, result, (void *)tgt);
4870 			cv_wait(&tgt->abort_cv, &tgt->aqlock);
4871 		}
4872 		mutex_exit(&tgt->aqlock);
4873 	}
4874 
4875 	if (all_cmds) {
4876 		pptr->abort_all_start = 0;
4877 		cv_signal(&pptr->abort_all_cv);
4878 	}
4879 
4880 	if (result) {
4881 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
4882 		    "%s: Abort (htag 0x%08x) request timed out",
4883 		    __func__, abt_htag);
4884 		if (tgt != NULL) {
4885 			mutex_enter(&tgt->statlock);
4886 			if ((tgt->dev_state != PMCS_DEVICE_STATE_IN_RECOVERY) &&
4887 			    (tgt->dev_state !=
4888 			    PMCS_DEVICE_STATE_NON_OPERATIONAL)) {
4889 				pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
4890 				    "%s: Trying DS error recovery for tgt 0x%p",
4891 				    __func__, (void *)tgt);
4892 				(void) pmcs_send_err_recovery_cmd(pwp,
4893 				    PMCS_DEVICE_STATE_IN_RECOVERY, pptr, tgt);
4894 			}
4895 			mutex_exit(&tgt->statlock);
4896 		}
4897 		return (ETIMEDOUT);
4898 	}
4899 
4900 	status = LE_32(msg[2]);
4901 	if (status != PMCOUT_STATUS_OK) {
4902 		/*
4903 		 * The only non-success status are IO_NOT_VALID &
4904 		 * IO_ABORT_IN_PROGRESS.
4905 		 * In case of IO_ABORT_IN_PROGRESS, the other ABORT cmd's
4906 		 * status is of concern and this duplicate cmd status can
4907 		 * be ignored.
4908 		 * If IO_NOT_VALID, that's not an error per-se.
4909 		 * For abort of single I/O complete the command anyway.
4910 		 * If, however, we were aborting all, that is a problem
4911 		 * as IO_NOT_VALID really means that the IO or device is
4912 		 * not there. So, discovery process will take of the cleanup.
4913 		 */
4914 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
4915 		    "%s: abort result 0x%x", __func__, LE_32(msg[2]));
4916 		if (all_cmds) {
4917 			PHY_CHANGED(pwp, pptr);
4918 			RESTART_DISCOVERY(pwp);
4919 		} else {
4920 			return (EINVAL);
4921 		}
4922 
4923 		return (0);
4924 	}
4925 
4926 	if (tgt != NULL) {
4927 		mutex_enter(&tgt->statlock);
4928 		if (tgt->dev_state == PMCS_DEVICE_STATE_IN_RECOVERY) {
4929 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
4930 			    "%s: Restoring OPERATIONAL dev_state for tgt 0x%p",
4931 			    __func__, (void *)tgt);
4932 			(void) pmcs_send_err_recovery_cmd(pwp,
4933 			    PMCS_DEVICE_STATE_OPERATIONAL, pptr, tgt);
4934 		}
4935 		mutex_exit(&tgt->statlock);
4936 	}
4937 
4938 	return (0);
4939 }
4940 
4941 /*
4942  * Issue a task management function to an SSP device.
4943  *
4944  * Called with PHY lock held.
4945  * statlock CANNOT be held upon entry.
4946  */
4947 int
4948 pmcs_ssp_tmf(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t tmf, uint32_t tag,
4949     uint64_t lun, uint32_t *response)
4950 {
4951 	int result, ds;
4952 	uint8_t local[PMCS_QENTRY_SIZE << 1], *xd;
4953 	sas_ssp_rsp_iu_t *rptr = (void *)local;
4954 	static const uint8_t ssp_rsp_evec[] = {
4955 		0x58, 0x61, 0x56, 0x72, 0x00
4956 	};
4957 	uint32_t msg[PMCS_MSG_SIZE], *ptr, status;
4958 	struct pmcwork *pwrk;
4959 	pmcs_xscsi_t *xp;
4960 
4961 	pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
4962 	if (pwrk == NULL) {
4963 		pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__);
4964 		return (ENOMEM);
4965 	}
4966 	/*
4967 	 * NB: We use the PMCS_OQ_GENERAL outbound queue
4968 	 * NB: so as to not get entangled in normal I/O
4969 	 * NB: processing.
4970 	 */
4971 	msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
4972 	    PMCIN_SSP_INI_TM_START));
4973 	msg[1] = LE_32(pwrk->htag);
4974 	msg[2] = LE_32(pptr->device_id);
4975 	if (tmf == SAS_ABORT_TASK || tmf == SAS_QUERY_TASK) {
4976 		msg[3] = LE_32(tag);
4977 	} else {
4978 		msg[3] = 0;
4979 	}
4980 	msg[4] = LE_32(tmf);
4981 	msg[5] = BE_32((uint32_t)lun);
4982 	msg[6] = BE_32((uint32_t)(lun >> 32));
4983 	msg[7] = LE_32(PMCIN_MESSAGE_REPORT);
4984 
4985 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4986 	ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4987 	if (ptr == NULL) {
4988 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4989 		pmcs_pwork(pwp, pwrk);
4990 		pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__);
4991 		return (ENOMEM);
4992 	}
4993 	COPY_MESSAGE(ptr, msg, 7);
4994 	pwrk->arg = msg;
4995 	pwrk->dtype = pptr->dtype;
4996 	xp = pptr->target;
4997 	pwrk->xp = xp;
4998 
4999 	if (xp != NULL) {
5000 		mutex_enter(&xp->statlock);
5001 		if (xp->dev_state == PMCS_DEVICE_STATE_NON_OPERATIONAL) {
5002 			mutex_exit(&xp->statlock);
5003 			mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5004 			pmcs_pwork(pwp, pwrk);
5005 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: Not "
5006 			    "sending '%s' because DS is '%s'", __func__,
5007 			    pmcs_tmf2str(tmf), pmcs_status_str
5008 			    (PMCOUT_STATUS_IO_DS_NON_OPERATIONAL));
5009 			return (EIO);
5010 		}
5011 		mutex_exit(&xp->statlock);
5012 	}
5013 
5014 	pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5015 	    "%s: sending '%s' to %s (lun %llu) tag 0x%x", __func__,
5016 	    pmcs_tmf2str(tmf), pptr->path, (unsigned long long) lun, tag);
5017 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
5018 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5019 
5020 	pmcs_unlock_phy(pptr);
5021 	/*
5022 	 * This is a command sent to the target device, so it can take
5023 	 * significant amount of time to complete when path & device is busy.
5024 	 * Set a timeout to 20 seconds
5025 	 */
5026 	WAIT_FOR(pwrk, 20000, result);
5027 	pmcs_lock_phy(pptr);
5028 	pmcs_pwork(pwp, pwrk);
5029 
5030 	if (result) {
5031 		if (xp == NULL) {
5032 			return (ETIMEDOUT);
5033 		}
5034 
5035 		mutex_enter(&xp->statlock);
5036 		pmcs_start_dev_state_recovery(xp, pptr);
5037 		mutex_exit(&xp->statlock);
5038 		return (ETIMEDOUT);
5039 	}
5040 
5041 	status = LE_32(msg[2]);
5042 	if (status != PMCOUT_STATUS_OK) {
5043 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5044 		    "%s: status %s for TMF %s action to %s, lun %llu",
5045 		    __func__, pmcs_status_str(status),  pmcs_tmf2str(tmf),
5046 		    pptr->path, (unsigned long long) lun);
5047 		if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) ||
5048 		    (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) ||
5049 		    (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) {
5050 			ds = PMCS_DEVICE_STATE_NON_OPERATIONAL;
5051 		} else if (status == PMCOUT_STATUS_IO_DS_IN_RECOVERY) {
5052 			/*
5053 			 * If the status is IN_RECOVERY, it's an indication
5054 			 * that it's now time for us to request to have the
5055 			 * device state set to OPERATIONAL since we're the ones
5056 			 * that requested recovery to begin with.
5057 			 */
5058 			ds = PMCS_DEVICE_STATE_OPERATIONAL;
5059 		} else {
5060 			ds = PMCS_DEVICE_STATE_IN_RECOVERY;
5061 		}
5062 		if (xp != NULL) {
5063 			mutex_enter(&xp->statlock);
5064 			if (xp->dev_state != ds) {
5065 				pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5066 				    "%s: Sending err recovery cmd"
5067 				    " for tgt 0x%p (status = %s)",
5068 				    __func__, (void *)xp,
5069 				    pmcs_status_str(status));
5070 				(void) pmcs_send_err_recovery_cmd(pwp, ds,
5071 				    pptr, xp);
5072 			}
5073 			mutex_exit(&xp->statlock);
5074 		}
5075 		return (EIO);
5076 	} else {
5077 		ds = PMCS_DEVICE_STATE_OPERATIONAL;
5078 		if (xp != NULL) {
5079 			mutex_enter(&xp->statlock);
5080 			if (xp->dev_state != ds) {
5081 				pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5082 				    "%s: Sending err recovery cmd"
5083 				    " for tgt 0x%p (status = %s)",
5084 				    __func__, (void *)xp,
5085 				    pmcs_status_str(status));
5086 				(void) pmcs_send_err_recovery_cmd(pwp, ds,
5087 				    pptr, xp);
5088 			}
5089 			mutex_exit(&xp->statlock);
5090 		}
5091 	}
5092 	if (LE_32(msg[3]) == 0) {
5093 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5094 		    "TMF completed with no response");
5095 		return (EIO);
5096 	}
5097 	pmcs_endian_transform(pwp, local, &msg[5], ssp_rsp_evec);
5098 	xd = (uint8_t *)(&msg[5]);
5099 	xd += SAS_RSP_HDR_SIZE;
5100 	if (rptr->datapres != SAS_RSP_DATAPRES_RESPONSE_DATA) {
5101 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5102 		    "%s: TMF response not RESPONSE DATA (0x%x)",
5103 		    __func__, rptr->datapres);
5104 		return (EIO);
5105 	}
5106 	if (rptr->response_data_length != 4) {
5107 		pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
5108 		    "Bad SAS RESPONSE DATA LENGTH", msg);
5109 		return (EIO);
5110 	}
5111 	(void) memcpy(&status, xd, sizeof (uint32_t));
5112 	status = BE_32(status);
5113 	if (response != NULL)
5114 		*response = status;
5115 	/*
5116 	 * The status is actually in the low-order byte.  The upper three
5117 	 * bytes contain additional information for the TMFs that support them.
5118 	 * However, at this time we do not issue any of those.  In the other
5119 	 * cases, the upper three bytes are supposed to be 0, but it appears
5120 	 * they aren't always.  Just mask them off.
5121 	 */
5122 	switch (status & 0xff) {
5123 	case SAS_RSP_TMF_COMPLETE:
5124 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5125 		    "%s: TMF complete", __func__);
5126 		result = 0;
5127 		break;
5128 	case SAS_RSP_TMF_SUCCEEDED:
5129 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5130 		    "%s: TMF succeeded", __func__);
5131 		result = 0;
5132 		break;
5133 	case SAS_RSP_INVALID_FRAME:
5134 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5135 		    "%s: TMF returned INVALID FRAME", __func__);
5136 		result = EIO;
5137 		break;
5138 	case SAS_RSP_TMF_NOT_SUPPORTED:
5139 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5140 		    "%s: TMF returned TMF NOT SUPPORTED", __func__);
5141 		result = EIO;
5142 		break;
5143 	case SAS_RSP_TMF_FAILED:
5144 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5145 		    "%s: TMF returned TMF FAILED", __func__);
5146 		result = EIO;
5147 		break;
5148 	case SAS_RSP_TMF_INCORRECT_LUN:
5149 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5150 		    "%s: TMF returned INCORRECT LUN", __func__);
5151 		result = EIO;
5152 		break;
5153 	case SAS_RSP_OVERLAPPED_OIPTTA:
5154 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5155 		    "%s: TMF returned OVERLAPPED INITIATOR PORT TRANSFER TAG "
5156 		    "ATTEMPTED", __func__);
5157 		result = EIO;
5158 		break;
5159 	default:
5160 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5161 		    "%s: TMF returned unknown code 0x%x", __func__, status);
5162 		result = EIO;
5163 		break;
5164 	}
5165 	return (result);
5166 }
5167 
5168 /*
5169  * Called with PHY lock held and scratch acquired
5170  */
5171 int
5172 pmcs_sata_abort_ncq(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
5173 {
5174 	const char *utag_fail_fmt = "%s: untagged NCQ command failure";
5175 	const char *tag_fail_fmt = "%s: NCQ command failure (tag 0x%x)";
5176 	uint32_t msg[PMCS_QENTRY_SIZE], *ptr, result, status;
5177 	uint8_t *fp = pwp->scratch, ds;
5178 	fis_t fis;
5179 	pmcwork_t *pwrk;
5180 	pmcs_xscsi_t *tgt;
5181 
5182 	pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
5183 	if (pwrk == NULL) {
5184 		return (ENOMEM);
5185 	}
5186 	msg[0] = LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE,
5187 	    PMCIN_SATA_HOST_IO_START));
5188 	msg[1] = LE_32(pwrk->htag);
5189 	msg[2] = LE_32(pptr->device_id);
5190 	msg[3] = LE_32(512);
5191 	msg[4] = LE_32(SATA_PROTOCOL_PIO | PMCIN_DATADIR_2_INI);
5192 	msg[5] = LE_32((READ_LOG_EXT << 16) | (C_BIT << 8) | FIS_REG_H2DEV);
5193 	msg[6] = LE_32(0x10);
5194 	msg[8] = LE_32(1);
5195 	msg[9] = 0;
5196 	msg[10] = 0;
5197 	msg[11] = 0;
5198 	msg[12] = LE_32(DWORD0(pwp->scratch_dma));
5199 	msg[13] = LE_32(DWORD1(pwp->scratch_dma));
5200 	msg[14] = LE_32(512);
5201 	msg[15] = 0;
5202 
5203 	pwrk->arg = msg;
5204 	pwrk->dtype = pptr->dtype;
5205 
5206 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5207 	ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5208 	if (ptr == NULL) {
5209 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5210 		pmcs_pwork(pwp, pwrk);
5211 		return (ENOMEM);
5212 	}
5213 	COPY_MESSAGE(ptr, msg, PMCS_QENTRY_SIZE);
5214 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
5215 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5216 
5217 	pmcs_unlock_phy(pptr);
5218 	WAIT_FOR(pwrk, 250, result);
5219 	pmcs_lock_phy(pptr);
5220 	pmcs_pwork(pwp, pwrk);
5221 
5222 	tgt = pptr->target;
5223 	if (result) {
5224 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, pmcs_timeo, __func__);
5225 		return (EIO);
5226 	}
5227 	status = LE_32(msg[2]);
5228 	if (status != PMCOUT_STATUS_OK || LE_32(msg[3])) {
5229 		if (tgt == NULL) {
5230 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
5231 			    "%s: cannot find target for phy 0x%p for "
5232 			    "dev state recovery", __func__, (void *)pptr);
5233 			return (EIO);
5234 		}
5235 
5236 		mutex_enter(&tgt->statlock);
5237 
5238 		pmcs_print_entry(pwp, PMCS_PRT_DEBUG, "READ LOG EXT", msg);
5239 		if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) ||
5240 		    (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) ||
5241 		    (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) {
5242 			ds = PMCS_DEVICE_STATE_NON_OPERATIONAL;
5243 		} else {
5244 			ds = PMCS_DEVICE_STATE_IN_RECOVERY;
5245 		}
5246 		if (tgt->dev_state != ds) {
5247 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, "%s: Trying "
5248 			    "SATA DS Recovery for tgt(0x%p) for status(%s)",
5249 			    __func__, (void *)tgt, pmcs_status_str(status));
5250 			(void) pmcs_send_err_recovery_cmd(pwp, ds, pptr, tgt);
5251 		}
5252 
5253 		mutex_exit(&tgt->statlock);
5254 		return (EIO);
5255 	}
5256 	fis[0] = (fp[4] << 24) | (fp[3] << 16) | (fp[2] << 8) | FIS_REG_D2H;
5257 	fis[1] = (fp[8] << 24) | (fp[7] << 16) | (fp[6] << 8) | fp[5];
5258 	fis[2] = (fp[12] << 24) | (fp[11] << 16) | (fp[10] << 8) | fp[9];
5259 	fis[3] = (fp[16] << 24) | (fp[15] << 16) | (fp[14] << 8) | fp[13];
5260 	fis[4] = 0;
5261 	if (fp[0] & 0x80) {
5262 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
5263 		    utag_fail_fmt, __func__);
5264 	} else {
5265 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
5266 		    tag_fail_fmt, __func__, fp[0] & 0x1f);
5267 	}
5268 	pmcs_fis_dump(pwp, fis);
5269 	pptr->need_rl_ext = 0;
5270 	return (0);
5271 }
5272 
5273 /*
5274  * Transform a structure from CPU to Device endian format, or
5275  * vice versa, based upon a transformation vector.
5276  *
5277  * A transformation vector is an array of bytes, each byte
5278  * of which is defined thusly:
5279  *
5280  *  bit 7: from CPU to desired endian, otherwise from desired endian
5281  *	   to CPU format
5282  *  bit 6: Big Endian, else Little Endian
5283  *  bits 5-4:
5284  *       00 Undefined
5285  *       01 One Byte quantities
5286  *       02 Two Byte quantities
5287  *       03 Four Byte quantities
5288  *
5289  *  bits 3-0:
5290  *       00 Undefined
5291  *       Number of quantities to transform
5292  *
5293  * The vector is terminated by a 0 value.
5294  */
5295 
5296 void
5297 pmcs_endian_transform(pmcs_hw_t *pwp, void *orig_out, void *orig_in,
5298     const uint8_t *xfvec)
5299 {
5300 	uint8_t c, *out = orig_out, *in = orig_in;
5301 
5302 	if (xfvec == NULL) {
5303 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
5304 		    "%s: null xfvec", __func__);
5305 		return;
5306 	}
5307 	if (out == NULL) {
5308 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
5309 		    "%s: null out", __func__);
5310 		return;
5311 	}
5312 	if (in == NULL) {
5313 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
5314 		    "%s: null in", __func__);
5315 		return;
5316 	}
5317 	while ((c = *xfvec++) != 0) {
5318 		int nbyt = (c & 0xf);
5319 		int size = (c >> 4) & 0x3;
5320 		int bige = (c >> 4) & 0x4;
5321 
5322 		switch (size) {
5323 		case 1:
5324 		{
5325 			while (nbyt-- > 0) {
5326 				*out++ = *in++;
5327 			}
5328 			break;
5329 		}
5330 		case 2:
5331 		{
5332 			uint16_t tmp;
5333 			while (nbyt-- > 0) {
5334 				(void) memcpy(&tmp, in, sizeof (uint16_t));
5335 				if (bige) {
5336 					tmp = BE_16(tmp);
5337 				} else {
5338 					tmp = LE_16(tmp);
5339 				}
5340 				(void) memcpy(out, &tmp, sizeof (uint16_t));
5341 				out += sizeof (uint16_t);
5342 				in += sizeof (uint16_t);
5343 			}
5344 			break;
5345 		}
5346 		case 3:
5347 		{
5348 			uint32_t tmp;
5349 			while (nbyt-- > 0) {
5350 				(void) memcpy(&tmp, in, sizeof (uint32_t));
5351 				if (bige) {
5352 					tmp = BE_32(tmp);
5353 				} else {
5354 					tmp = LE_32(tmp);
5355 				}
5356 				(void) memcpy(out, &tmp, sizeof (uint32_t));
5357 				out += sizeof (uint32_t);
5358 				in += sizeof (uint32_t);
5359 			}
5360 			break;
5361 		}
5362 		default:
5363 			pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
5364 			    "%s: bad size", __func__);
5365 			return;
5366 		}
5367 	}
5368 }
5369 
5370 const char *
5371 pmcs_get_rate(unsigned int linkrt)
5372 {
5373 	const char *rate;
5374 	switch (linkrt) {
5375 	case SAS_LINK_RATE_1_5GBIT:
5376 		rate = "1.5";
5377 		break;
5378 	case SAS_LINK_RATE_3GBIT:
5379 		rate = "3.0";
5380 		break;
5381 	case SAS_LINK_RATE_6GBIT:
5382 		rate = "6.0";
5383 		break;
5384 	default:
5385 		rate = "???";
5386 		break;
5387 	}
5388 	return (rate);
5389 }
5390 
5391 const char *
5392 pmcs_get_typename(pmcs_dtype_t type)
5393 {
5394 	switch (type) {
5395 	case NOTHING:
5396 		return ("NIL");
5397 	case SATA:
5398 		return ("SATA");
5399 	case SAS:
5400 		return ("SSP");
5401 	case EXPANDER:
5402 		return ("EXPANDER");
5403 	}
5404 	return ("????");
5405 }
5406 
5407 const char *
5408 pmcs_tmf2str(int tmf)
5409 {
5410 	switch (tmf) {
5411 	case SAS_ABORT_TASK:
5412 		return ("Abort Task");
5413 	case SAS_ABORT_TASK_SET:
5414 		return ("Abort Task Set");
5415 	case SAS_CLEAR_TASK_SET:
5416 		return ("Clear Task Set");
5417 	case SAS_LOGICAL_UNIT_RESET:
5418 		return ("Logical Unit Reset");
5419 	case SAS_I_T_NEXUS_RESET:
5420 		return ("I_T Nexus Reset");
5421 	case SAS_CLEAR_ACA:
5422 		return ("Clear ACA");
5423 	case SAS_QUERY_TASK:
5424 		return ("Query Task");
5425 	case SAS_QUERY_TASK_SET:
5426 		return ("Query Task Set");
5427 	case SAS_QUERY_UNIT_ATTENTION:
5428 		return ("Query Unit Attention");
5429 	default:
5430 		return ("Unknown");
5431 	}
5432 }
5433 
5434 const char *
5435 pmcs_status_str(uint32_t status)
5436 {
5437 	switch (status) {
5438 	case PMCOUT_STATUS_OK:
5439 		return ("OK");
5440 	case PMCOUT_STATUS_ABORTED:
5441 		return ("ABORTED");
5442 	case PMCOUT_STATUS_OVERFLOW:
5443 		return ("OVERFLOW");
5444 	case PMCOUT_STATUS_UNDERFLOW:
5445 		return ("UNDERFLOW");
5446 	case PMCOUT_STATUS_FAILED:
5447 		return ("FAILED");
5448 	case PMCOUT_STATUS_ABORT_RESET:
5449 		return ("ABORT_RESET");
5450 	case PMCOUT_STATUS_IO_NOT_VALID:
5451 		return ("IO_NOT_VALID");
5452 	case PMCOUT_STATUS_NO_DEVICE:
5453 		return ("NO_DEVICE");
5454 	case PMCOUT_STATUS_ILLEGAL_PARAMETER:
5455 		return ("ILLEGAL_PARAMETER");
5456 	case PMCOUT_STATUS_LINK_FAILURE:
5457 		return ("LINK_FAILURE");
5458 	case PMCOUT_STATUS_PROG_ERROR:
5459 		return ("PROG_ERROR");
5460 	case PMCOUT_STATUS_EDC_IN_ERROR:
5461 		return ("EDC_IN_ERROR");
5462 	case PMCOUT_STATUS_EDC_OUT_ERROR:
5463 		return ("EDC_OUT_ERROR");
5464 	case PMCOUT_STATUS_ERROR_HW_TIMEOUT:
5465 		return ("ERROR_HW_TIMEOUT");
5466 	case PMCOUT_STATUS_XFER_ERR_BREAK:
5467 		return ("XFER_ERR_BREAK");
5468 	case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
5469 		return ("XFER_ERR_PHY_NOT_READY");
5470 	case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED:
5471 		return ("OPEN_CNX_PROTOCOL_NOT_SUPPORTED");
5472 	case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION:
5473 		return ("OPEN_CNX_ERROR_ZONE_VIOLATION");
5474 	case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK:
5475 		return ("OPEN_CNX_ERROR_BREAK");
5476 	case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
5477 		return ("OPEN_CNX_ERROR_IT_NEXUS_LOSS");
5478 	case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION:
5479 		return ("OPENCNX_ERROR_BAD_DESTINATION");
5480 	case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
5481 		return ("OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED");
5482 	case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
5483 		return ("OPEN_CNX_ERROR_STP_RESOURCES_BUSY");
5484 	case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION:
5485 		return ("OPEN_CNX_ERROR_WRONG_DESTINATION");
5486 	case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR:
5487 		return ("OPEN_CNX_ERROR_UNKNOWN_ERROR");
5488 	case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED:
5489 		return ("IO_XFER_ERROR_NAK_RECEIVED");
5490 	case PMCOUT_STATUS_XFER_ERROR_ACK_NAK_TIMEOUT:
5491 		return ("XFER_ERROR_ACK_NAK_TIMEOUT");
5492 	case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED:
5493 		return ("XFER_ERROR_PEER_ABORTED");
5494 	case PMCOUT_STATUS_XFER_ERROR_RX_FRAME:
5495 		return ("XFER_ERROR_RX_FRAME");
5496 	case PMCOUT_STATUS_IO_XFER_ERROR_DMA:
5497 		return ("IO_XFER_ERROR_DMA");
5498 	case PMCOUT_STATUS_XFER_ERROR_CREDIT_TIMEOUT:
5499 		return ("XFER_ERROR_CREDIT_TIMEOUT");
5500 	case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT:
5501 		return ("XFER_ERROR_SATA_LINK_TIMEOUT");
5502 	case PMCOUT_STATUS_XFER_ERROR_SATA:
5503 		return ("XFER_ERROR_SATA");
5504 	case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE:
5505 		return ("XFER_ERROR_REJECTED_NCQ_MODE");
5506 	case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST:
5507 		return ("XFER_ERROR_ABORTED_DUE_TO_SRST");
5508 	case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE:
5509 		return ("XFER_ERROR_ABORTED_NCQ_MODE");
5510 	case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
5511 		return ("IO_XFER_OPEN_RETRY_TIMEOUT");
5512 	case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR:
5513 		return ("SMP_RESP_CONNECTION_ERROR");
5514 	case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE:
5515 		return ("XFER_ERROR_UNEXPECTED_PHASE");
5516 	case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN:
5517 		return ("XFER_ERROR_RDY_OVERRUN");
5518 	case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED:
5519 		return ("XFER_ERROR_RDY_NOT_EXPECTED");
5520 	case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
5521 		return ("XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT");
5522 	case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK:
5523 		return ("XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK");
5524 	case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK:
5525 		return ("XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK");
5526 	case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH:
5527 		return ("XFER_ERROR_OFFSET_MISMATCH");
5528 	case PMCOUT_STATUS_XFER_ERROR_ZERO_DATA_LEN:
5529 		return ("XFER_ERROR_ZERO_DATA_LEN");
5530 	case PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED:
5531 		return ("XFER_CMD_FRAME_ISSUED");
5532 	case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE:
5533 		return ("ERROR_INTERNAL_SMP_RESOURCE");
5534 	case PMCOUT_STATUS_IO_PORT_IN_RESET:
5535 		return ("IO_PORT_IN_RESET");
5536 	case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL:
5537 		return ("DEVICE STATE NON-OPERATIONAL");
5538 	case PMCOUT_STATUS_IO_DS_IN_RECOVERY:
5539 		return ("DEVICE STATE IN RECOVERY");
5540 	case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
5541 		return ("OPEN CNX ERR HW RESOURCE BUSY");
5542 	default:
5543 		return (NULL);
5544 	}
5545 }
5546 
5547 uint64_t
5548 pmcs_barray2wwn(uint8_t ba[8])
5549 {
5550 	uint64_t result = 0;
5551 	int i;
5552 
5553 	for (i = 0; i < 8; i++) {
5554 		result <<= 8;
5555 		result |= ba[i];
5556 	}
5557 	return (result);
5558 }
5559 
5560 void
5561 pmcs_wwn2barray(uint64_t wwn, uint8_t ba[8])
5562 {
5563 	int i;
5564 	for (i = 0; i < 8; i++) {
5565 		ba[7 - i] = wwn & 0xff;
5566 		wwn >>= 8;
5567 	}
5568 }
5569 
5570 void
5571 pmcs_report_fwversion(pmcs_hw_t *pwp)
5572 {
5573 	const char *fwsupport;
5574 	switch (PMCS_FW_TYPE(pwp)) {
5575 	case PMCS_FW_TYPE_RELEASED:
5576 		fwsupport = "Released";
5577 		break;
5578 	case PMCS_FW_TYPE_DEVELOPMENT:
5579 		fwsupport = "Development";
5580 		break;
5581 	case PMCS_FW_TYPE_ALPHA:
5582 		fwsupport = "Alpha";
5583 		break;
5584 	case PMCS_FW_TYPE_BETA:
5585 		fwsupport = "Beta";
5586 		break;
5587 	default:
5588 		fwsupport = "Special";
5589 		break;
5590 	}
5591 	pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
5592 	    "Chip Revision: %c; F/W Revision %x.%x.%x %s", 'A' + pwp->chiprev,
5593 	    PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp),
5594 	    fwsupport);
5595 }
5596 
5597 void
5598 pmcs_phy_name(pmcs_hw_t *pwp, pmcs_phy_t *pptr, char *obuf, size_t olen)
5599 {
5600 	if (pptr->parent) {
5601 		pmcs_phy_name(pwp, pptr->parent, obuf, olen);
5602 		(void) snprintf(obuf, olen, "%s.%02x", obuf, pptr->phynum);
5603 	} else {
5604 		(void) snprintf(obuf, olen, "pp%02x", pptr->phynum);
5605 	}
5606 }
5607 
5608 /*
5609  * Implementation for pmcs_find_phy_by_devid.
5610  * If the PHY is found, it is returned locked.
5611  */
5612 static pmcs_phy_t *
5613 pmcs_find_phy_by_devid_impl(pmcs_phy_t *phyp, uint32_t device_id)
5614 {
5615 	pmcs_phy_t *match, *cphyp, *nphyp;
5616 
5617 	ASSERT(!mutex_owned(&phyp->phy_lock));
5618 
5619 	while (phyp) {
5620 		pmcs_lock_phy(phyp);
5621 
5622 		if ((phyp->valid_device_id) && (phyp->device_id == device_id)) {
5623 			return (phyp);
5624 		}
5625 		if (phyp->children) {
5626 			cphyp = phyp->children;
5627 			pmcs_unlock_phy(phyp);
5628 			match = pmcs_find_phy_by_devid_impl(cphyp, device_id);
5629 			if (match) {
5630 				ASSERT(mutex_owned(&match->phy_lock));
5631 				return (match);
5632 			}
5633 			pmcs_lock_phy(phyp);
5634 		}
5635 
5636 		if (IS_ROOT_PHY(phyp)) {
5637 			pmcs_unlock_phy(phyp);
5638 			phyp = NULL;
5639 		} else {
5640 			nphyp = phyp->sibling;
5641 			pmcs_unlock_phy(phyp);
5642 			phyp = nphyp;
5643 		}
5644 	}
5645 
5646 	return (NULL);
5647 }
5648 
5649 /*
5650  * If the PHY is found, it is returned locked
5651  */
5652 pmcs_phy_t *
5653 pmcs_find_phy_by_devid(pmcs_hw_t *pwp, uint32_t device_id)
5654 {
5655 	pmcs_phy_t *phyp, *match = NULL;
5656 
5657 	phyp = pwp->root_phys;
5658 
5659 	while (phyp) {
5660 		match = pmcs_find_phy_by_devid_impl(phyp, device_id);
5661 		if (match) {
5662 			ASSERT(mutex_owned(&match->phy_lock));
5663 			return (match);
5664 		}
5665 		phyp = phyp->sibling;
5666 	}
5667 
5668 	return (NULL);
5669 }
5670 
5671 /*
5672  * This function is called as a sanity check to ensure that a newly registered
5673  * PHY doesn't have a device_id that exists with another registered PHY.
5674  */
5675 static boolean_t
5676 pmcs_validate_devid(pmcs_phy_t *parent, pmcs_phy_t *phyp, uint32_t device_id)
5677 {
5678 	pmcs_phy_t *pptr, *pchild;
5679 	boolean_t rval;
5680 
5681 	pptr = parent;
5682 
5683 	while (pptr) {
5684 		if (pptr->valid_device_id && (pptr != phyp) &&
5685 		    (pptr->device_id == device_id)) {
5686 			/*
5687 			 * This can still be OK if both of these PHYs actually
5688 			 * represent the same device (e.g. expander).  It could
5689 			 * be a case of a new "primary" PHY.  If the SAS address
5690 			 * is the same and they have the same parent, we'll
5691 			 * accept this if the PHY to be registered is the
5692 			 * primary.
5693 			 */
5694 			if ((phyp->parent == pptr->parent) &&
5695 			    (memcmp(phyp->sas_address,
5696 			    pptr->sas_address, 8) == 0) && (phyp->width > 1)) {
5697 				/*
5698 				 * Move children over to the new primary and
5699 				 * update both PHYs
5700 				 */
5701 				pmcs_lock_phy(pptr);
5702 				phyp->children = pptr->children;
5703 				pchild = phyp->children;
5704 				while (pchild) {
5705 					pchild->parent = phyp;
5706 					pchild = pchild->sibling;
5707 				}
5708 				phyp->subsidiary = 0;
5709 				phyp->ncphy = pptr->ncphy;
5710 				/*
5711 				 * device_id, valid_device_id, and configured
5712 				 * will be set by the caller
5713 				 */
5714 				pptr->children = NULL;
5715 				pptr->subsidiary = 1;
5716 				pptr->ncphy = 0;
5717 				pmcs_unlock_phy(pptr);
5718 				pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL,
5719 				    "%s: Moving device_id %d from PHY %s to %s",
5720 				    __func__, device_id, pptr->path,
5721 				    phyp->path);
5722 				return (B_TRUE);
5723 			}
5724 			pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL,
5725 			    "%s: phy %s already exists as %s with "
5726 			    "device id 0x%x", __func__, phyp->path,
5727 			    pptr->path, device_id);
5728 			return (B_FALSE);
5729 		}
5730 
5731 		if (pptr->children) {
5732 			rval = pmcs_validate_devid(pptr->children, phyp,
5733 			    device_id);
5734 			if (rval == B_FALSE) {
5735 				return (rval);
5736 			}
5737 		}
5738 
5739 		pptr = pptr->sibling;
5740 	}
5741 
5742 	/* This PHY and device_id are valid */
5743 	return (B_TRUE);
5744 }
5745 
5746 /*
5747  * If the PHY is found, it is returned locked
5748  */
5749 static pmcs_phy_t *
5750 pmcs_find_phy_by_wwn_impl(pmcs_phy_t *phyp, uint8_t *wwn)
5751 {
5752 	pmcs_phy_t *matched_phy, *cphyp, *nphyp;
5753 
5754 	ASSERT(!mutex_owned(&phyp->phy_lock));
5755 
5756 	while (phyp) {
5757 		pmcs_lock_phy(phyp);
5758 
5759 		if (phyp->valid_device_id) {
5760 			if (memcmp(phyp->sas_address, wwn, 8) == 0) {
5761 				return (phyp);
5762 			}
5763 		}
5764 
5765 		if (phyp->children) {
5766 			cphyp = phyp->children;
5767 			pmcs_unlock_phy(phyp);
5768 			matched_phy = pmcs_find_phy_by_wwn_impl(cphyp, wwn);
5769 			if (matched_phy) {
5770 				ASSERT(mutex_owned(&matched_phy->phy_lock));
5771 				return (matched_phy);
5772 			}
5773 			pmcs_lock_phy(phyp);
5774 		}
5775 
5776 		/*
5777 		 * Only iterate through non-root PHYs
5778 		 */
5779 		if (IS_ROOT_PHY(phyp)) {
5780 			pmcs_unlock_phy(phyp);
5781 			phyp = NULL;
5782 		} else {
5783 			nphyp = phyp->sibling;
5784 			pmcs_unlock_phy(phyp);
5785 			phyp = nphyp;
5786 		}
5787 	}
5788 
5789 	return (NULL);
5790 }
5791 
5792 pmcs_phy_t *
5793 pmcs_find_phy_by_wwn(pmcs_hw_t *pwp, uint64_t wwn)
5794 {
5795 	uint8_t ebstr[8];
5796 	pmcs_phy_t *pptr, *matched_phy;
5797 
5798 	pmcs_wwn2barray(wwn, ebstr);
5799 
5800 	pptr = pwp->root_phys;
5801 	while (pptr) {
5802 		matched_phy = pmcs_find_phy_by_wwn_impl(pptr, ebstr);
5803 		if (matched_phy) {
5804 			ASSERT(mutex_owned(&matched_phy->phy_lock));
5805 			return (matched_phy);
5806 		}
5807 
5808 		pptr = pptr->sibling;
5809 	}
5810 
5811 	return (NULL);
5812 }
5813 
5814 
5815 /*
5816  * pmcs_find_phy_by_sas_address
5817  *
5818  * Find a PHY that both matches "sas_addr" and is on "iport".
5819  * If a matching PHY is found, it is returned locked.
5820  */
5821 pmcs_phy_t *
5822 pmcs_find_phy_by_sas_address(pmcs_hw_t *pwp, pmcs_iport_t *iport,
5823     pmcs_phy_t *root, char *sas_addr)
5824 {
5825 	int ua_form = 1;
5826 	uint64_t wwn;
5827 	char addr[PMCS_MAX_UA_SIZE];
5828 	pmcs_phy_t *pptr, *pnext, *pchild;
5829 
5830 	if (root == NULL) {
5831 		pptr = pwp->root_phys;
5832 	} else {
5833 		pptr = root;
5834 	}
5835 
5836 	while (pptr) {
5837 		pmcs_lock_phy(pptr);
5838 		/*
5839 		 * If the PHY is dead or does not have a valid device ID,
5840 		 * skip it.
5841 		 */
5842 		if ((pptr->dead) || (!pptr->valid_device_id)) {
5843 			goto next_phy;
5844 		}
5845 
5846 		if (pptr->iport != iport) {
5847 			goto next_phy;
5848 		}
5849 
5850 		wwn = pmcs_barray2wwn(pptr->sas_address);
5851 		(void *) scsi_wwn_to_wwnstr(wwn, ua_form, addr);
5852 		if (strncmp(addr, sas_addr, strlen(addr)) == 0) {
5853 			return (pptr);
5854 		}
5855 
5856 		if (pptr->children) {
5857 			pchild = pptr->children;
5858 			pmcs_unlock_phy(pptr);
5859 			pnext = pmcs_find_phy_by_sas_address(pwp, iport, pchild,
5860 			    sas_addr);
5861 			if (pnext) {
5862 				return (pnext);
5863 			}
5864 			pmcs_lock_phy(pptr);
5865 		}
5866 
5867 next_phy:
5868 		pnext = pptr->sibling;
5869 		pmcs_unlock_phy(pptr);
5870 		pptr = pnext;
5871 	}
5872 
5873 	return (NULL);
5874 }
5875 
5876 void
5877 pmcs_fis_dump(pmcs_hw_t *pwp, fis_t fis)
5878 {
5879 	switch (fis[0] & 0xff) {
5880 	case FIS_REG_H2DEV:
5881 		pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
5882 		    "FIS REGISTER HOST TO DEVICE: "
5883 		    "OP=0x%02x Feature=0x%04x Count=0x%04x Device=0x%02x "
5884 		    "LBA=%llu", BYTE2(fis[0]), BYTE3(fis[2]) << 8 |
5885 		    BYTE3(fis[0]), WORD0(fis[3]), BYTE3(fis[1]),
5886 		    (unsigned long long)
5887 		    (((uint64_t)fis[2] & 0x00ffffff) << 24 |
5888 		    ((uint64_t)fis[1] & 0x00ffffff)));
5889 		break;
5890 	case FIS_REG_D2H:
5891 		pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
5892 		    "FIS REGISTER DEVICE TO HOST: Status=0x%02x "
5893 		    "Error=0x%02x Dev=0x%02x Count=0x%04x LBA=%llu",
5894 		    BYTE2(fis[0]), BYTE3(fis[0]), BYTE3(fis[1]), WORD0(fis[3]),
5895 		    (unsigned long long)(((uint64_t)fis[2] & 0x00ffffff) << 24 |
5896 		    ((uint64_t)fis[1] & 0x00ffffff)));
5897 		break;
5898 	default:
5899 		pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
5900 		    "FIS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x",
5901 		    fis[0], fis[1], fis[2], fis[3], fis[4]);
5902 		break;
5903 	}
5904 }
5905 
5906 void
5907 pmcs_print_entry(pmcs_hw_t *pwp, int level, char *msg, void *arg)
5908 {
5909 	uint32_t *mb = arg;
5910 	size_t i;
5911 
5912 	pmcs_prt(pwp, level, NULL, NULL, msg);
5913 	for (i = 0; i < (PMCS_QENTRY_SIZE / sizeof (uint32_t)); i += 4) {
5914 		pmcs_prt(pwp, level, NULL, NULL,
5915 		    "Offset %2lu: 0x%08x 0x%08x 0x%08x 0x%08x",
5916 		    i * sizeof (uint32_t), LE_32(mb[i]),
5917 		    LE_32(mb[i+1]), LE_32(mb[i+2]), LE_32(mb[i+3]));
5918 	}
5919 }
5920 
5921 /*
5922  * If phyp == NULL we're being called from the worker thread, in which
5923  * case we need to check all the PHYs.  In this case, the softstate lock
5924  * will be held.
5925  * If phyp is non-NULL, just issue the spinup release for the specified PHY
5926  * (which will already be locked).
5927  */
5928 void
5929 pmcs_spinup_release(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
5930 {
5931 	uint32_t *msg;
5932 	struct pmcwork *pwrk;
5933 	pmcs_phy_t *tphyp;
5934 
5935 	if (phyp != NULL) {
5936 		ASSERT(mutex_owned(&phyp->phy_lock));
5937 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL,
5938 		    "%s: Issuing spinup release only for PHY %s", __func__,
5939 		    phyp->path);
5940 		mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5941 		msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5942 		if (msg == NULL || (pwrk =
5943 		    pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) {
5944 			mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5945 			SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE);
5946 			return;
5947 		}
5948 
5949 		phyp->spinup_hold = 0;
5950 		bzero(msg, PMCS_QENTRY_SIZE);
5951 		msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
5952 		    PMCIN_LOCAL_PHY_CONTROL));
5953 		msg[1] = LE_32(pwrk->htag);
5954 		msg[2] = LE_32((0x10 << 8) | phyp->phynum);
5955 
5956 		pwrk->dtype = phyp->dtype;
5957 		pwrk->state = PMCS_WORK_STATE_ONCHIP;
5958 		mutex_exit(&pwrk->lock);
5959 		INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5960 		return;
5961 	}
5962 
5963 	ASSERT(mutex_owned(&pwp->lock));
5964 
5965 	tphyp = pwp->root_phys;
5966 	while (tphyp) {
5967 		pmcs_lock_phy(tphyp);
5968 		if (tphyp->spinup_hold == 0) {
5969 			pmcs_unlock_phy(tphyp);
5970 			tphyp = tphyp->sibling;
5971 			continue;
5972 		}
5973 
5974 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL,
5975 		    "%s: Issuing spinup release for PHY %s", __func__,
5976 		    phyp->path);
5977 
5978 		mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5979 		msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5980 		if (msg == NULL || (pwrk =
5981 		    pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) {
5982 			pmcs_unlock_phy(tphyp);
5983 			mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5984 			SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE);
5985 			break;
5986 		}
5987 
5988 		tphyp->spinup_hold = 0;
5989 		bzero(msg, PMCS_QENTRY_SIZE);
5990 		msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
5991 		    PMCIN_LOCAL_PHY_CONTROL));
5992 		msg[1] = LE_32(pwrk->htag);
5993 		msg[2] = LE_32((0x10 << 8) | tphyp->phynum);
5994 
5995 		pwrk->dtype = phyp->dtype;
5996 		pwrk->state = PMCS_WORK_STATE_ONCHIP;
5997 		mutex_exit(&pwrk->lock);
5998 		INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5999 		pmcs_unlock_phy(tphyp);
6000 
6001 		tphyp = tphyp->sibling;
6002 	}
6003 }
6004 
6005 /*
6006  * Abort commands on dead PHYs and deregister them as well as removing
6007  * the associated targets.
6008  */
6009 static int
6010 pmcs_kill_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
6011 {
6012 	pmcs_phy_t *pnext, *pchild;
6013 	boolean_t remove_device;
6014 	int rval = 0;
6015 
6016 	while (phyp) {
6017 		pmcs_lock_phy(phyp);
6018 		pchild = phyp->children;
6019 		pnext = phyp->sibling;
6020 		pmcs_unlock_phy(phyp);
6021 
6022 		if (pchild) {
6023 			rval = pmcs_kill_devices(pwp, pchild);
6024 			if (rval) {
6025 				return (rval);
6026 			}
6027 		}
6028 
6029 		/*
6030 		 * pmcs_remove_device requires the softstate lock.
6031 		 */
6032 		mutex_enter(&pwp->lock);
6033 		pmcs_lock_phy(phyp);
6034 		if (phyp->dead && phyp->valid_device_id) {
6035 			remove_device = B_TRUE;
6036 		} else {
6037 			remove_device = B_FALSE;
6038 		}
6039 
6040 		if (remove_device) {
6041 			pmcs_remove_device(pwp, phyp);
6042 			mutex_exit(&pwp->lock);
6043 
6044 			rval = pmcs_kill_device(pwp, phyp);
6045 
6046 			if (rval) {
6047 				pmcs_unlock_phy(phyp);
6048 				return (rval);
6049 			}
6050 		} else {
6051 			mutex_exit(&pwp->lock);
6052 		}
6053 
6054 		pmcs_unlock_phy(phyp);
6055 		phyp = pnext;
6056 	}
6057 
6058 	return (rval);
6059 }
6060 
6061 /*
6062  * Called with PHY locked
6063  */
6064 int
6065 pmcs_kill_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
6066 {
6067 	int r, result;
6068 	uint32_t msg[PMCS_MSG_SIZE], *ptr, status;
6069 	struct pmcwork *pwrk;
6070 
6071 	pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "kill %s device @ %s",
6072 	    pmcs_get_typename(pptr->dtype), pptr->path);
6073 
6074 	/*
6075 	 * There may be an outstanding ABORT_ALL running, which we wouldn't
6076 	 * know just by checking abort_pending.  We can, however, check
6077 	 * abort_all_start.  If it's non-zero, there is one, and we'll just
6078 	 * sit here and wait for it to complete.  If we don't, we'll remove
6079 	 * the device while there are still commands pending.
6080 	 */
6081 	if (pptr->abort_all_start) {
6082 		while (pptr->abort_all_start) {
6083 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
6084 			    "%s: Waiting for outstanding ABORT_ALL on PHY 0x%p",
6085 			    __func__, (void *)pptr);
6086 			cv_wait(&pptr->abort_all_cv, &pptr->phy_lock);
6087 		}
6088 	} else if (pptr->abort_pending) {
6089 		r = pmcs_abort(pwp, pptr, pptr->device_id, 1, 1);
6090 
6091 		if (r) {
6092 			pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
6093 			    "%s: ABORT_ALL returned non-zero status (%d) for "
6094 			    "PHY 0x%p", __func__, r, (void *)pptr);
6095 			return (r);
6096 		}
6097 		pptr->abort_pending = 0;
6098 	}
6099 
6100 	if (pptr->valid_device_id == 0) {
6101 		return (0);
6102 	}
6103 
6104 	if ((pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) {
6105 		pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__);
6106 		return (ENOMEM);
6107 	}
6108 	pwrk->arg = msg;
6109 	pwrk->dtype = pptr->dtype;
6110 	msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
6111 	    PMCIN_DEREGISTER_DEVICE_HANDLE));
6112 	msg[1] = LE_32(pwrk->htag);
6113 	msg[2] = LE_32(pptr->device_id);
6114 
6115 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
6116 	ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
6117 	if (ptr == NULL) {
6118 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
6119 		mutex_exit(&pwrk->lock);
6120 		pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__);
6121 		return (ENOMEM);
6122 	}
6123 
6124 	COPY_MESSAGE(ptr, msg, 3);
6125 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
6126 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
6127 
6128 	pmcs_unlock_phy(pptr);
6129 	WAIT_FOR(pwrk, 250, result);
6130 	pmcs_lock_phy(pptr);
6131 	pmcs_pwork(pwp, pwrk);
6132 
6133 	if (result) {
6134 		return (ETIMEDOUT);
6135 	}
6136 	status = LE_32(msg[2]);
6137 	if (status != PMCOUT_STATUS_OK) {
6138 		pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
6139 		    "%s: status 0x%x when trying to deregister device %s",
6140 		    __func__, status, pptr->path);
6141 	}
6142 
6143 	pptr->device_id = PMCS_INVALID_DEVICE_ID;
6144 	PHY_CHANGED(pwp, pptr);
6145 	RESTART_DISCOVERY(pwp);
6146 	pptr->valid_device_id = 0;
6147 	return (0);
6148 }
6149 
6150 /*
6151  * Acknowledge the SAS h/w events that need acknowledgement.
6152  * This is only needed for first level PHYs.
6153  */
6154 void
6155 pmcs_ack_events(pmcs_hw_t *pwp)
6156 {
6157 	uint32_t msg[PMCS_MSG_SIZE], *ptr;
6158 	struct pmcwork *pwrk;
6159 	pmcs_phy_t *pptr;
6160 
6161 	for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
6162 		pmcs_lock_phy(pptr);
6163 		if (pptr->hw_event_ack == 0) {
6164 			pmcs_unlock_phy(pptr);
6165 			continue;
6166 		}
6167 		mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
6168 		ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
6169 
6170 		if ((ptr == NULL) || (pwrk =
6171 		    pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) {
6172 			mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
6173 			pmcs_unlock_phy(pptr);
6174 			SCHEDULE_WORK(pwp, PMCS_WORK_SAS_HW_ACK);
6175 			break;
6176 		}
6177 
6178 		msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
6179 		    PMCIN_SAW_HW_EVENT_ACK));
6180 		msg[1] = LE_32(pwrk->htag);
6181 		msg[2] = LE_32(pptr->hw_event_ack);
6182 
6183 		mutex_exit(&pwrk->lock);
6184 		pwrk->dtype = pptr->dtype;
6185 		pptr->hw_event_ack = 0;
6186 		COPY_MESSAGE(ptr, msg, 3);
6187 		INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
6188 		pmcs_unlock_phy(pptr);
6189 	}
6190 }
6191 
6192 /*
6193  * Load DMA
6194  */
6195 int
6196 pmcs_dma_load(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint32_t *msg)
6197 {
6198 	ddi_dma_cookie_t *sg;
6199 	pmcs_dmachunk_t *tc;
6200 	pmcs_dmasgl_t *sgl, *prior;
6201 	int seg, tsc;
6202 	uint64_t sgl_addr;
6203 
6204 	/*
6205 	 * If we have no data segments, we're done.
6206 	 */
6207 	if (CMD2PKT(sp)->pkt_numcookies == 0) {
6208 		return (0);
6209 	}
6210 
6211 	/*
6212 	 * Get the S/G list pointer.
6213 	 */
6214 	sg = CMD2PKT(sp)->pkt_cookies;
6215 
6216 	/*
6217 	 * If we only have one dma segment, we can directly address that
6218 	 * data within the Inbound message itself.
6219 	 */
6220 	if (CMD2PKT(sp)->pkt_numcookies == 1) {
6221 		msg[12] = LE_32(DWORD0(sg->dmac_laddress));
6222 		msg[13] = LE_32(DWORD1(sg->dmac_laddress));
6223 		msg[14] = LE_32(sg->dmac_size);
6224 		msg[15] = 0;
6225 		return (0);
6226 	}
6227 
6228 	/*
6229 	 * Otherwise, we'll need one or more external S/G list chunks.
6230 	 * Get the first one and its dma address into the Inbound message.
6231 	 */
6232 	mutex_enter(&pwp->dma_lock);
6233 	tc = pwp->dma_freelist;
6234 	if (tc == NULL) {
6235 		SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS);
6236 		mutex_exit(&pwp->dma_lock);
6237 		pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL,
6238 		    "%s: out of SG lists", __func__);
6239 		return (-1);
6240 	}
6241 	pwp->dma_freelist = tc->nxt;
6242 	mutex_exit(&pwp->dma_lock);
6243 
6244 	tc->nxt = NULL;
6245 	sp->cmd_clist = tc;
6246 	sgl = tc->chunks;
6247 	(void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ);
6248 	sgl_addr = tc->addr;
6249 	msg[12] = LE_32(DWORD0(sgl_addr));
6250 	msg[13] = LE_32(DWORD1(sgl_addr));
6251 	msg[14] = 0;
6252 	msg[15] = LE_32(PMCS_DMASGL_EXTENSION);
6253 
6254 	prior = sgl;
6255 	tsc = 0;
6256 
6257 	for (seg = 0; seg < CMD2PKT(sp)->pkt_numcookies; seg++) {
6258 		/*
6259 		 * If the current segment count for this chunk is one less than
6260 		 * the number s/g lists per chunk and we have more than one seg
6261 		 * to go, we need another chunk. Get it, and make sure that the
6262 		 * tail end of the the previous chunk points the new chunk
6263 		 * (if remembering an offset can be called 'pointing to').
6264 		 *
6265 		 * Note that we can store the offset into our command area that
6266 		 * represents the new chunk in the length field of the part
6267 		 * that points the PMC chip at the next chunk- the PMC chip
6268 		 * ignores this field when the EXTENSION bit is set.
6269 		 *
6270 		 * This is required for dma unloads later.
6271 		 */
6272 		if (tsc == (PMCS_SGL_NCHUNKS - 1) &&
6273 		    seg < (CMD2PKT(sp)->pkt_numcookies - 1)) {
6274 			mutex_enter(&pwp->dma_lock);
6275 			tc = pwp->dma_freelist;
6276 			if (tc == NULL) {
6277 				SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS);
6278 				mutex_exit(&pwp->dma_lock);
6279 				pmcs_dma_unload(pwp, sp);
6280 				pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL,
6281 				    "%s: out of SG lists", __func__);
6282 				return (-1);
6283 			}
6284 			pwp->dma_freelist = tc->nxt;
6285 			tc->nxt = sp->cmd_clist;
6286 			mutex_exit(&pwp->dma_lock);
6287 
6288 			sp->cmd_clist = tc;
6289 			(void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ);
6290 			sgl = tc->chunks;
6291 			sgl_addr = tc->addr;
6292 			prior[PMCS_SGL_NCHUNKS-1].sglal =
6293 			    LE_32(DWORD0(sgl_addr));
6294 			prior[PMCS_SGL_NCHUNKS-1].sglah =
6295 			    LE_32(DWORD1(sgl_addr));
6296 			prior[PMCS_SGL_NCHUNKS-1].sglen = 0;
6297 			prior[PMCS_SGL_NCHUNKS-1].flags =
6298 			    LE_32(PMCS_DMASGL_EXTENSION);
6299 			prior = sgl;
6300 			tsc = 0;
6301 		}
6302 		sgl[tsc].sglal = LE_32(DWORD0(sg->dmac_laddress));
6303 		sgl[tsc].sglah = LE_32(DWORD1(sg->dmac_laddress));
6304 		sgl[tsc].sglen = LE_32(sg->dmac_size);
6305 		sgl[tsc++].flags = 0;
6306 		sg++;
6307 	}
6308 	return (0);
6309 }
6310 
6311 /*
6312  * Unload DMA
6313  */
6314 void
6315 pmcs_dma_unload(pmcs_hw_t *pwp, pmcs_cmd_t *sp)
6316 {
6317 	pmcs_dmachunk_t *cp;
6318 
6319 	mutex_enter(&pwp->dma_lock);
6320 	while ((cp = sp->cmd_clist) != NULL) {
6321 		sp->cmd_clist = cp->nxt;
6322 		cp->nxt = pwp->dma_freelist;
6323 		pwp->dma_freelist = cp;
6324 	}
6325 	mutex_exit(&pwp->dma_lock);
6326 }
6327 
6328 /*
6329  * Take a chunk of consistent memory that has just been allocated and inserted
6330  * into the cip indices and prepare it for DMA chunk usage and add it to the
6331  * freelist.
6332  *
6333  * Called with dma_lock locked (except during attach when it's unnecessary)
6334  */
6335 void
6336 pmcs_idma_chunks(pmcs_hw_t *pwp, pmcs_dmachunk_t *dcp,
6337     pmcs_chunk_t *pchunk, unsigned long lim)
6338 {
6339 	unsigned long off, n;
6340 	pmcs_dmachunk_t *np = dcp;
6341 	pmcs_chunk_t *tmp_chunk;
6342 
6343 	if (pwp->dma_chunklist == NULL) {
6344 		pwp->dma_chunklist = pchunk;
6345 	} else {
6346 		tmp_chunk = pwp->dma_chunklist;
6347 		while (tmp_chunk->next) {
6348 			tmp_chunk = tmp_chunk->next;
6349 		}
6350 		tmp_chunk->next = pchunk;
6351 	}
6352 
6353 	/*
6354 	 * Install offsets into chunk lists.
6355 	 */
6356 	for (n = 0, off = 0; off < lim; off += PMCS_SGL_CHUNKSZ, n++) {
6357 		np->chunks = (void *)&pchunk->addrp[off];
6358 		np->addr = pchunk->dma_addr + off;
6359 		np->acc_handle = pchunk->acc_handle;
6360 		np->dma_handle = pchunk->dma_handle;
6361 		if ((off + PMCS_SGL_CHUNKSZ) < lim) {
6362 			np = np->nxt;
6363 		}
6364 	}
6365 	np->nxt = pwp->dma_freelist;
6366 	pwp->dma_freelist = dcp;
6367 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL,
6368 	    "added %lu DMA chunks ", n);
6369 }
6370 
6371 /*
6372  * Change the value of the interrupt coalescing timer.  This is done currently
6373  * only for I/O completions.  If we're using the "auto clear" feature, it can
6374  * be turned back on when interrupt coalescing is turned off and must be
6375  * turned off when the coalescing timer is on.
6376  * NOTE: PMCS_MSIX_GENERAL and PMCS_OQ_IODONE are the same value.  As long
6377  * as that's true, we don't need to distinguish between them.
6378  */
6379 
6380 void
6381 pmcs_set_intr_coal_timer(pmcs_hw_t *pwp, pmcs_coal_timer_adj_t adj)
6382 {
6383 	if (adj == DECREASE_TIMER) {
6384 		/* If the timer is already off, nothing to do. */
6385 		if (pwp->io_intr_coal.timer_on == B_FALSE) {
6386 			return;
6387 		}
6388 
6389 		pwp->io_intr_coal.intr_coal_timer -= PMCS_COAL_TIMER_GRAN;
6390 
6391 		if (pwp->io_intr_coal.intr_coal_timer == 0) {
6392 			/* Disable the timer */
6393 			pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 0);
6394 
6395 			if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) {
6396 				pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR,
6397 				    pwp->odb_auto_clear);
6398 			}
6399 
6400 			pwp->io_intr_coal.timer_on = B_FALSE;
6401 			pwp->io_intr_coal.max_io_completions = B_FALSE;
6402 			pwp->io_intr_coal.num_intrs = 0;
6403 			pwp->io_intr_coal.int_cleared = B_FALSE;
6404 			pwp->io_intr_coal.num_io_completions = 0;
6405 
6406 			DTRACE_PROBE1(pmcs__intr__coalesce__timer__off,
6407 			    pmcs_io_intr_coal_t *, &pwp->io_intr_coal);
6408 		} else {
6409 			pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER,
6410 			    pwp->io_intr_coal.intr_coal_timer);
6411 		}
6412 	} else {
6413 		/*
6414 		 * If the timer isn't on yet, do the setup for it now.
6415 		 */
6416 		if (pwp->io_intr_coal.timer_on == B_FALSE) {
6417 			/* If auto clear is being used, turn it off. */
6418 			if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) {
6419 				pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR,
6420 				    (pwp->odb_auto_clear &
6421 				    ~(1 << PMCS_MSIX_IODONE)));
6422 			}
6423 
6424 			pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL,
6425 			    (1 << PMCS_MSIX_IODONE));
6426 			pwp->io_intr_coal.timer_on = B_TRUE;
6427 			pwp->io_intr_coal.intr_coal_timer =
6428 			    PMCS_COAL_TIMER_GRAN;
6429 
6430 			DTRACE_PROBE1(pmcs__intr__coalesce__timer__on,
6431 			    pmcs_io_intr_coal_t *, &pwp->io_intr_coal);
6432 		} else {
6433 			pwp->io_intr_coal.intr_coal_timer +=
6434 			    PMCS_COAL_TIMER_GRAN;
6435 		}
6436 
6437 		if (pwp->io_intr_coal.intr_coal_timer > PMCS_MAX_COAL_TIMER) {
6438 			pwp->io_intr_coal.intr_coal_timer = PMCS_MAX_COAL_TIMER;
6439 		}
6440 
6441 		pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER,
6442 		    pwp->io_intr_coal.intr_coal_timer);
6443 	}
6444 
6445 	/*
6446 	 * Adjust the interrupt threshold based on the current timer value
6447 	 */
6448 	pwp->io_intr_coal.intr_threshold =
6449 	    PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 /
6450 	    (pwp->io_intr_coal.intr_latency +
6451 	    (pwp->io_intr_coal.intr_coal_timer * 1000)));
6452 }
6453 
6454 /*
6455  * Register Access functions
6456  */
6457 uint32_t
6458 pmcs_rd_iqci(pmcs_hw_t *pwp, uint32_t qnum)
6459 {
6460 	uint32_t iqci;
6461 
6462 	if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
6463 	    DDI_SUCCESS) {
6464 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6465 		    "%s: ddi_dma_sync failed?", __func__);
6466 	}
6467 
6468 	iqci = LE_32(
6469 	    ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2]);
6470 
6471 	return (iqci);
6472 }
6473 
6474 uint32_t
6475 pmcs_rd_oqpi(pmcs_hw_t *pwp, uint32_t qnum)
6476 {
6477 	uint32_t oqpi;
6478 
6479 	if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
6480 	    DDI_SUCCESS) {
6481 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6482 		    "%s: ddi_dma_sync failed?", __func__);
6483 	}
6484 
6485 	oqpi = LE_32(
6486 	    ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2]);
6487 
6488 	return (oqpi);
6489 }
6490 
6491 uint32_t
6492 pmcs_rd_gsm_reg(pmcs_hw_t *pwp, uint32_t off)
6493 {
6494 	uint32_t rv, newaxil, oldaxil;
6495 
6496 	newaxil = off & ~GSM_BASE_MASK;
6497 	off &= GSM_BASE_MASK;
6498 	mutex_enter(&pwp->axil_lock);
6499 	oldaxil = ddi_get32(pwp->top_acc_handle,
6500 	    &pwp->top_regs[PMCS_AXI_TRANS >> 2]);
6501 	ddi_put32(pwp->top_acc_handle,
6502 	    &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil);
6503 	drv_usecwait(10);
6504 	if (ddi_get32(pwp->top_acc_handle,
6505 	    &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) {
6506 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6507 		    "AXIL register update failed");
6508 	}
6509 	rv = ddi_get32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2]);
6510 	ddi_put32(pwp->top_acc_handle,
6511 	    &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil);
6512 	drv_usecwait(10);
6513 	if (ddi_get32(pwp->top_acc_handle,
6514 	    &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) {
6515 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6516 		    "AXIL register restore failed");
6517 	}
6518 	mutex_exit(&pwp->axil_lock);
6519 	return (rv);
6520 }
6521 
6522 void
6523 pmcs_wr_gsm_reg(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6524 {
6525 	uint32_t newaxil, oldaxil;
6526 
6527 	newaxil = off & ~GSM_BASE_MASK;
6528 	off &= GSM_BASE_MASK;
6529 	mutex_enter(&pwp->axil_lock);
6530 	oldaxil = ddi_get32(pwp->top_acc_handle,
6531 	    &pwp->top_regs[PMCS_AXI_TRANS >> 2]);
6532 	ddi_put32(pwp->top_acc_handle,
6533 	    &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil);
6534 	drv_usecwait(10);
6535 	if (ddi_get32(pwp->top_acc_handle,
6536 	    &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) {
6537 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6538 		    "AXIL register update failed");
6539 	}
6540 	ddi_put32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2], val);
6541 	ddi_put32(pwp->top_acc_handle,
6542 	    &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil);
6543 	drv_usecwait(10);
6544 	if (ddi_get32(pwp->top_acc_handle,
6545 	    &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) {
6546 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6547 		    "AXIL register restore failed");
6548 	}
6549 	mutex_exit(&pwp->axil_lock);
6550 }
6551 
6552 uint32_t
6553 pmcs_rd_topunit(pmcs_hw_t *pwp, uint32_t off)
6554 {
6555 	switch (off) {
6556 	case PMCS_SPC_RESET:
6557 	case PMCS_SPC_BOOT_STRAP:
6558 	case PMCS_SPC_DEVICE_ID:
6559 	case PMCS_DEVICE_REVISION:
6560 		off = pmcs_rd_gsm_reg(pwp, off);
6561 		break;
6562 	default:
6563 		off = ddi_get32(pwp->top_acc_handle,
6564 		    &pwp->top_regs[off >> 2]);
6565 		break;
6566 	}
6567 	return (off);
6568 }
6569 
6570 void
6571 pmcs_wr_topunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6572 {
6573 	switch (off) {
6574 	case PMCS_SPC_RESET:
6575 	case PMCS_DEVICE_REVISION:
6576 		pmcs_wr_gsm_reg(pwp, off, val);
6577 		break;
6578 	default:
6579 		ddi_put32(pwp->top_acc_handle, &pwp->top_regs[off >> 2], val);
6580 		break;
6581 	}
6582 }
6583 
6584 uint32_t
6585 pmcs_rd_msgunit(pmcs_hw_t *pwp, uint32_t off)
6586 {
6587 	return (ddi_get32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2]));
6588 }
6589 
6590 uint32_t
6591 pmcs_rd_mpi_tbl(pmcs_hw_t *pwp, uint32_t off)
6592 {
6593 	return (ddi_get32(pwp->mpi_acc_handle,
6594 	    &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2]));
6595 }
6596 
6597 uint32_t
6598 pmcs_rd_gst_tbl(pmcs_hw_t *pwp, uint32_t off)
6599 {
6600 	return (ddi_get32(pwp->mpi_acc_handle,
6601 	    &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2]));
6602 }
6603 
6604 uint32_t
6605 pmcs_rd_iqc_tbl(pmcs_hw_t *pwp, uint32_t off)
6606 {
6607 	return (ddi_get32(pwp->mpi_acc_handle,
6608 	    &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2]));
6609 }
6610 
6611 uint32_t
6612 pmcs_rd_oqc_tbl(pmcs_hw_t *pwp, uint32_t off)
6613 {
6614 	return (ddi_get32(pwp->mpi_acc_handle,
6615 	    &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2]));
6616 }
6617 
6618 uint32_t
6619 pmcs_rd_iqpi(pmcs_hw_t *pwp, uint32_t qnum)
6620 {
6621 	return (ddi_get32(pwp->mpi_acc_handle,
6622 	    &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2]));
6623 }
6624 
6625 uint32_t
6626 pmcs_rd_oqci(pmcs_hw_t *pwp, uint32_t qnum)
6627 {
6628 	return (ddi_get32(pwp->mpi_acc_handle,
6629 	    &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2]));
6630 }
6631 
6632 void
6633 pmcs_wr_msgunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6634 {
6635 	ddi_put32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2], val);
6636 }
6637 
6638 void
6639 pmcs_wr_mpi_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6640 {
6641 	ddi_put32(pwp->mpi_acc_handle,
6642 	    &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2], (val));
6643 }
6644 
6645 void
6646 pmcs_wr_gst_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6647 {
6648 	ddi_put32(pwp->mpi_acc_handle,
6649 	    &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2], val);
6650 }
6651 
6652 void
6653 pmcs_wr_iqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6654 {
6655 	ddi_put32(pwp->mpi_acc_handle,
6656 	    &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2], val);
6657 }
6658 
6659 void
6660 pmcs_wr_oqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6661 {
6662 	ddi_put32(pwp->mpi_acc_handle,
6663 	    &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2], val);
6664 }
6665 
6666 void
6667 pmcs_wr_iqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
6668 {
6669 	((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2] = val;
6670 	if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) !=
6671 	    DDI_SUCCESS) {
6672 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6673 		    "%s: ddi_dma_sync failed?", __func__);
6674 	}
6675 }
6676 
6677 void
6678 pmcs_wr_iqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
6679 {
6680 	ddi_put32(pwp->mpi_acc_handle,
6681 	    &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2], val);
6682 }
6683 
6684 void
6685 pmcs_wr_oqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
6686 {
6687 	ddi_put32(pwp->mpi_acc_handle,
6688 	    &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2], val);
6689 }
6690 
6691 void
6692 pmcs_wr_oqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
6693 {
6694 	((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2] = val;
6695 	if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) !=
6696 	    DDI_SUCCESS) {
6697 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6698 		    "%s: ddi_dma_sync failed?", __func__);
6699 	}
6700 }
6701 
6702 /*
6703  * Check the status value of an outbound IOMB and report anything bad
6704  */
6705 
6706 void
6707 pmcs_check_iomb_status(pmcs_hw_t *pwp, uint32_t *iomb)
6708 {
6709 	uint16_t 	opcode;
6710 	int		offset;
6711 
6712 	if (iomb == NULL) {
6713 		return;
6714 	}
6715 
6716 	opcode = LE_32(iomb[0]) & 0xfff;
6717 
6718 	switch (opcode) {
6719 		/*
6720 		 * The following have no status field, so ignore them
6721 		 */
6722 	case PMCOUT_ECHO:
6723 	case PMCOUT_SAS_HW_EVENT:
6724 	case PMCOUT_GET_DEVICE_HANDLE:
6725 	case PMCOUT_SATA_EVENT:
6726 	case PMCOUT_SSP_EVENT:
6727 	case PMCOUT_DEVICE_HANDLE_ARRIVED:
6728 	case PMCOUT_SMP_REQUEST_RECEIVED:
6729 	case PMCOUT_GPIO:
6730 	case PMCOUT_GPIO_EVENT:
6731 	case PMCOUT_GET_TIME_STAMP:
6732 	case PMCOUT_SKIP_ENTRIES:
6733 	case PMCOUT_GET_NVMD_DATA:	/* Actually lower 16 bits of word 3 */
6734 	case PMCOUT_SET_NVMD_DATA:	/* but ignore - we don't use these */
6735 	case PMCOUT_DEVICE_HANDLE_REMOVED:
6736 	case PMCOUT_SSP_REQUEST_RECEIVED:
6737 		return;
6738 
6739 	case PMCOUT_GENERAL_EVENT:
6740 		offset = 1;
6741 		break;
6742 
6743 	case PMCOUT_SSP_COMPLETION:
6744 	case PMCOUT_SMP_COMPLETION:
6745 	case PMCOUT_DEVICE_REGISTRATION:
6746 	case PMCOUT_DEREGISTER_DEVICE_HANDLE:
6747 	case PMCOUT_SATA_COMPLETION:
6748 	case PMCOUT_DEVICE_INFO:
6749 	case PMCOUT_FW_FLASH_UPDATE:
6750 	case PMCOUT_SSP_ABORT:
6751 	case PMCOUT_SATA_ABORT:
6752 	case PMCOUT_SAS_DIAG_MODE_START_END:
6753 	case PMCOUT_SAS_HW_EVENT_ACK_ACK:
6754 	case PMCOUT_SMP_ABORT:
6755 	case PMCOUT_SET_DEVICE_STATE:
6756 	case PMCOUT_GET_DEVICE_STATE:
6757 	case PMCOUT_SET_DEVICE_INFO:
6758 		offset = 2;
6759 		break;
6760 
6761 	case PMCOUT_LOCAL_PHY_CONTROL:
6762 	case PMCOUT_SAS_DIAG_EXECUTE:
6763 	case PMCOUT_PORT_CONTROL:
6764 		offset = 3;
6765 		break;
6766 
6767 	case PMCOUT_GET_INFO:
6768 	case PMCOUT_GET_VPD:
6769 	case PMCOUT_SAS_ASSISTED_DISCOVERY_EVENT:
6770 	case PMCOUT_SATA_ASSISTED_DISCOVERY_EVENT:
6771 	case PMCOUT_SET_VPD:
6772 	case PMCOUT_TWI:
6773 		pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
6774 		    "Got response for deprecated opcode", iomb);
6775 		return;
6776 
6777 	default:
6778 		pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
6779 		    "Got response for unknown opcode", iomb);
6780 		return;
6781 	}
6782 
6783 	if (LE_32(iomb[offset]) != PMCOUT_STATUS_OK) {
6784 		pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
6785 		    "bad status on TAG_TYPE_NONE command", iomb);
6786 	}
6787 }
6788 
6789 /*
6790  * Called with statlock held
6791  */
6792 void
6793 pmcs_clear_xp(pmcs_hw_t *pwp, pmcs_xscsi_t *xp)
6794 {
6795 	_NOTE(ARGUNUSED(pwp));
6796 
6797 	ASSERT(mutex_owned(&xp->statlock));
6798 
6799 	pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: Device 0x%p is gone.",
6800 	    __func__, (void *)xp);
6801 
6802 	/*
6803 	 * Clear the dip now.  This keeps pmcs_remove_device from attempting
6804 	 * to call us on the same device while we're still flushing queues.
6805 	 * The only side effect is we can no longer update SM-HBA properties,
6806 	 * but this device is going away anyway, so no matter.
6807 	 */
6808 	xp->dip = NULL;
6809 	xp->smpd = NULL;
6810 	xp->special_running = 0;
6811 	xp->recovering = 0;
6812 	xp->recover_wait = 0;
6813 	xp->draining = 0;
6814 	xp->new = 0;
6815 	xp->assigned = 0;
6816 	xp->dev_state = 0;
6817 	xp->tagmap = 0;
6818 	xp->dev_gone = 1;
6819 	xp->event_recovery = 0;
6820 	xp->dtype = NOTHING;
6821 	xp->wq_recovery_tail = NULL;
6822 	/* Don't clear xp->phy */
6823 	/* Don't clear xp->actv_cnt */
6824 	/* Don't clear xp->actv_pkts */
6825 
6826 	/*
6827 	 * Flush all target queues
6828 	 */
6829 	pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES);
6830 }
6831 
6832 static int
6833 pmcs_smp_function_result(pmcs_hw_t *pwp, smp_response_frame_t *srf)
6834 {
6835 	int result = srf->srf_result;
6836 
6837 	switch (result) {
6838 	case SMP_RES_UNKNOWN_FUNCTION:
6839 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6840 		    "%s: SMP DISCOVER Response "
6841 		    "Function Result: Unknown SMP Function(0x%x)",
6842 		    __func__, result);
6843 		break;
6844 	case SMP_RES_FUNCTION_FAILED:
6845 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6846 		    "%s: SMP DISCOVER Response "
6847 		    "Function Result: SMP Function Failed(0x%x)",
6848 		    __func__, result);
6849 		break;
6850 	case SMP_RES_INVALID_REQUEST_FRAME_LENGTH:
6851 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6852 		    "%s: SMP DISCOVER Response "
6853 		    "Function Result: Invalid Request Frame Length(0x%x)",
6854 		    __func__, result);
6855 		break;
6856 	case SMP_RES_INCOMPLETE_DESCRIPTOR_LIST:
6857 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6858 		    "%s: SMP DISCOVER Response "
6859 		    "Function Result: Incomplete Descriptor List(0x%x)",
6860 		    __func__, result);
6861 		break;
6862 	case SMP_RES_PHY_DOES_NOT_EXIST:
6863 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6864 		    "%s: SMP DISCOVER Response "
6865 		    "Function Result: PHY does not exist(0x%x)",
6866 		    __func__, result);
6867 		break;
6868 	case SMP_RES_PHY_VACANT:
6869 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6870 		    "%s: SMP DISCOVER Response "
6871 		    "Function Result: PHY Vacant(0x%x)",
6872 		    __func__, result);
6873 		break;
6874 	default:
6875 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6876 		    "%s: SMP DISCOVER Response "
6877 		    "Function Result: (0x%x)",
6878 		    __func__, result);
6879 		break;
6880 	}
6881 
6882 	return (result);
6883 }
6884 
6885 /*
6886  * Do all the repetitive stuff necessary to setup for DMA
6887  *
6888  * pwp: Used for dip
6889  * dma_attr: ddi_dma_attr_t to use for the mapping
6890  * acch: ddi_acc_handle_t to use for the mapping
6891  * dmah: ddi_dma_handle_t to use
6892  * length: Amount of memory for mapping
6893  * kvap: Pointer filled in with kernel virtual address on successful return
6894  * dma_addr: Pointer filled in with DMA address on successful return
6895  */
6896 boolean_t
6897 pmcs_dma_setup(pmcs_hw_t *pwp, ddi_dma_attr_t *dma_attr, ddi_acc_handle_t *acch,
6898     ddi_dma_handle_t *dmah, size_t length, caddr_t *kvap, uint64_t *dma_addr)
6899 {
6900 	dev_info_t		*dip = pwp->dip;
6901 	ddi_dma_cookie_t	cookie;
6902 	size_t			real_length;
6903 	uint_t			ddma_flag = DDI_DMA_CONSISTENT;
6904 	uint_t			ddabh_flag = DDI_DMA_CONSISTENT | DDI_DMA_RDWR;
6905 	uint_t			cookie_cnt;
6906 	ddi_device_acc_attr_t	mattr = {
6907 		DDI_DEVICE_ATTR_V0,
6908 		DDI_NEVERSWAP_ACC,
6909 		DDI_STRICTORDER_ACC,
6910 		DDI_DEFAULT_ACC
6911 	};
6912 
6913 	*acch = NULL;
6914 	*dmah = NULL;
6915 
6916 	if (ddi_dma_alloc_handle(dip, dma_attr, DDI_DMA_SLEEP, NULL, dmah) !=
6917 	    DDI_SUCCESS) {
6918 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6919 		    "Failed to allocate DMA handle");
6920 		return (B_FALSE);
6921 	}
6922 
6923 	if (ddi_dma_mem_alloc(*dmah, length, &mattr, ddma_flag, DDI_DMA_SLEEP,
6924 	    NULL, kvap, &real_length, acch) != DDI_SUCCESS) {
6925 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6926 		    "Failed to allocate DMA mem");
6927 		ddi_dma_free_handle(dmah);
6928 		*dmah = NULL;
6929 		return (B_FALSE);
6930 	}
6931 
6932 	if (ddi_dma_addr_bind_handle(*dmah, NULL, *kvap, real_length,
6933 	    ddabh_flag, DDI_DMA_SLEEP, NULL, &cookie, &cookie_cnt)
6934 	    != DDI_DMA_MAPPED) {
6935 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Failed to bind DMA");
6936 		ddi_dma_free_handle(dmah);
6937 		ddi_dma_mem_free(acch);
6938 		*dmah = NULL;
6939 		*acch = NULL;
6940 		return (B_FALSE);
6941 	}
6942 
6943 	if (cookie_cnt != 1) {
6944 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Multiple cookies");
6945 		if (ddi_dma_unbind_handle(*dmah) != DDI_SUCCESS) {
6946 			pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Condition "
6947 			    "failed at %s():%d", __func__, __LINE__);
6948 		}
6949 		ddi_dma_free_handle(dmah);
6950 		ddi_dma_mem_free(acch);
6951 		*dmah = NULL;
6952 		*acch = NULL;
6953 		return (B_FALSE);
6954 	}
6955 
6956 	*dma_addr = cookie.dmac_laddress;
6957 
6958 	return (B_TRUE);
6959 }
6960 
6961 /*
6962  * Flush requested queues for a particular target.  Called with statlock held
6963  */
6964 void
6965 pmcs_flush_target_queues(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt, uint8_t queues)
6966 {
6967 	pmcs_cmd_t	*sp, *sp_next;
6968 	pmcwork_t	*pwrk;
6969 
6970 	ASSERT(pwp != NULL);
6971 	ASSERT(tgt != NULL);
6972 
6973 	pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, tgt,
6974 	    "%s: Flushing queues (%d) for target 0x%p", __func__,
6975 	    queues, (void *)tgt);
6976 
6977 	/*
6978 	 * Commands on the wait queue (or the special queue below) don't have
6979 	 * work structures associated with them.
6980 	 */
6981 	if (queues & PMCS_TGT_WAIT_QUEUE) {
6982 		mutex_enter(&tgt->wqlock);
6983 		while ((sp = STAILQ_FIRST(&tgt->wq)) != NULL) {
6984 			STAILQ_REMOVE(&tgt->wq, sp, pmcs_cmd, cmd_next);
6985 			pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, tgt,
6986 			    "%s: Removing cmd 0x%p from wq for target 0x%p",
6987 			    __func__, (void *)sp, (void *)tgt);
6988 			CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
6989 			CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
6990 			mutex_exit(&tgt->wqlock);
6991 			pmcs_dma_unload(pwp, sp);
6992 			mutex_enter(&pwp->cq_lock);
6993 			STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
6994 			mutex_exit(&pwp->cq_lock);
6995 			mutex_enter(&tgt->wqlock);
6996 		}
6997 		mutex_exit(&tgt->wqlock);
6998 	}
6999 
7000 	/*
7001 	 * Commands on the active queue will have work structures associated
7002 	 * with them.
7003 	 */
7004 	if (queues & PMCS_TGT_ACTIVE_QUEUE) {
7005 		mutex_enter(&tgt->aqlock);
7006 		sp = STAILQ_FIRST(&tgt->aq);
7007 		while (sp) {
7008 			sp_next = STAILQ_NEXT(sp, cmd_next);
7009 			pwrk = pmcs_tag2wp(pwp, sp->cmd_tag);
7010 
7011 			/*
7012 			 * If we don't find a work structure, it's because
7013 			 * the command is already complete.  If so, move on
7014 			 * to the next one.
7015 			 */
7016 			if (pwrk == NULL) {
7017 				pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt,
7018 				    "%s: Not removing cmd 0x%p (htag 0x%x) "
7019 				    "from aq", __func__, (void *)sp,
7020 				    sp->cmd_tag);
7021 				sp = sp_next;
7022 				continue;
7023 			}
7024 
7025 			STAILQ_REMOVE(&tgt->aq, sp, pmcs_cmd, cmd_next);
7026 			pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt,
7027 			    "%s: Removing cmd 0x%p (htag 0x%x) from aq for "
7028 			    "target 0x%p", __func__, (void *)sp, sp->cmd_tag,
7029 			    (void *)tgt);
7030 			mutex_exit(&tgt->aqlock);
7031 			mutex_exit(&tgt->statlock);
7032 			/*
7033 			 * Mark the work structure as dead and complete it
7034 			 */
7035 			pwrk->dead = 1;
7036 			CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
7037 			CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
7038 			pmcs_complete_work_impl(pwp, pwrk, NULL, 0);
7039 			pmcs_dma_unload(pwp, sp);
7040 			mutex_enter(&pwp->cq_lock);
7041 			STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
7042 			mutex_exit(&pwp->cq_lock);
7043 			mutex_enter(&tgt->aqlock);
7044 			mutex_enter(&tgt->statlock);
7045 			sp = sp_next;
7046 		}
7047 		mutex_exit(&tgt->aqlock);
7048 	}
7049 
7050 	if (queues & PMCS_TGT_SPECIAL_QUEUE) {
7051 		while ((sp = STAILQ_FIRST(&tgt->sq)) != NULL) {
7052 			STAILQ_REMOVE(&tgt->sq, sp, pmcs_cmd, cmd_next);
7053 			pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt,
7054 			    "%s: Removing cmd 0x%p from sq for target 0x%p",
7055 			    __func__, (void *)sp, (void *)tgt);
7056 			CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
7057 			CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
7058 			pmcs_dma_unload(pwp, sp);
7059 			mutex_enter(&pwp->cq_lock);
7060 			STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
7061 			mutex_exit(&pwp->cq_lock);
7062 		}
7063 	}
7064 }
7065 
7066 void
7067 pmcs_complete_work_impl(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *iomb,
7068     size_t amt)
7069 {
7070 	switch (PMCS_TAG_TYPE(pwrk->htag)) {
7071 	case PMCS_TAG_TYPE_CBACK:
7072 	{
7073 		pmcs_cb_t callback = (pmcs_cb_t)pwrk->ptr;
7074 		(*callback)(pwp, pwrk, iomb);
7075 		break;
7076 	}
7077 	case PMCS_TAG_TYPE_WAIT:
7078 		if (pwrk->arg && iomb && amt) {
7079 			(void) memcpy(pwrk->arg, iomb, amt);
7080 		}
7081 		cv_signal(&pwrk->sleep_cv);
7082 		mutex_exit(&pwrk->lock);
7083 		break;
7084 	case PMCS_TAG_TYPE_NONE:
7085 #ifdef DEBUG
7086 		pmcs_check_iomb_status(pwp, iomb);
7087 #endif
7088 		pmcs_pwork(pwp, pwrk);
7089 		break;
7090 	default:
7091 		/*
7092 		 * We will leak a structure here if we don't know
7093 		 * what happened
7094 		 */
7095 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
7096 		    "%s: Unknown PMCS_TAG_TYPE (%x)",
7097 		    __func__, PMCS_TAG_TYPE(pwrk->htag));
7098 		break;
7099 	}
7100 }
7101 
7102 /*
7103  * Determine if iport still has targets. During detach(9E), if SCSA is
7104  * successfull in its guarantee of tran_tgt_free(9E) before detach(9E),
7105  * this should always return B_FALSE.
7106  */
7107 boolean_t
7108 pmcs_iport_has_targets(pmcs_hw_t *pwp, pmcs_iport_t *iport)
7109 {
7110 	pmcs_xscsi_t *xp;
7111 	int i;
7112 
7113 	mutex_enter(&pwp->lock);
7114 
7115 	if (!pwp->targets || !pwp->max_dev) {
7116 		mutex_exit(&pwp->lock);
7117 		return (B_FALSE);
7118 	}
7119 
7120 	for (i = 0; i < pwp->max_dev; i++) {
7121 		xp = pwp->targets[i];
7122 		if ((xp == NULL) || (xp->phy == NULL) ||
7123 		    (xp->phy->iport != iport)) {
7124 			continue;
7125 		}
7126 
7127 		mutex_exit(&pwp->lock);
7128 		return (B_TRUE);
7129 	}
7130 
7131 	mutex_exit(&pwp->lock);
7132 	return (B_FALSE);
7133 }
7134 
7135 /*
7136  * Called with softstate lock held
7137  */
7138 void
7139 pmcs_destroy_target(pmcs_xscsi_t *target)
7140 {
7141 	pmcs_hw_t *pwp = target->pwp;
7142 	pmcs_iport_t *iport;
7143 
7144 	ASSERT(pwp);
7145 	ASSERT(mutex_owned(&pwp->lock));
7146 
7147 	if (!target->ua) {
7148 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target,
7149 		    "%s: target %p iport address is null",
7150 		    __func__, (void *)target);
7151 	}
7152 
7153 	iport = pmcs_get_iport_by_ua(pwp, target->ua);
7154 	if (iport == NULL) {
7155 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target,
7156 		    "%s: no iport associated with tgt(0x%p)",
7157 		    __func__, (void *)target);
7158 		return;
7159 	}
7160 
7161 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target,
7162 	    "%s: free target %p", __func__, (void *)target);
7163 	if (target->ua) {
7164 		strfree(target->ua);
7165 	}
7166 
7167 	mutex_destroy(&target->wqlock);
7168 	mutex_destroy(&target->aqlock);
7169 	mutex_destroy(&target->statlock);
7170 	cv_destroy(&target->reset_cv);
7171 	cv_destroy(&target->abort_cv);
7172 	ddi_soft_state_bystr_fini(&target->lun_sstate);
7173 	ddi_soft_state_bystr_free(iport->tgt_sstate, target->unit_address);
7174 	pmcs_rele_iport(iport);
7175 }
7176 
7177 /*
7178  * pmcs_lock_phy_impl
7179  *
7180  * This function is what does the actual work for pmcs_lock_phy.  It will
7181  * lock all PHYs from phyp down in a top-down fashion.
7182  *
7183  * Locking notes:
7184  * 1. level starts from 0 for the PHY ("parent") that's passed in.  It is
7185  * not a reflection of the actual level of the PHY in the SAS topology.
7186  * 2. If parent is an expander, then parent is locked along with all its
7187  * descendents.
7188  * 3. Expander subsidiary PHYs at level 0 are not locked.  It is the
7189  * responsibility of the caller to individually lock expander subsidiary PHYs
7190  * at level 0 if necessary.
7191  * 4. Siblings at level 0 are not traversed due to the possibility that we're
7192  * locking a PHY on the dead list.  The siblings could be pointing to invalid
7193  * PHYs.  We don't lock siblings at level 0 anyway.
7194  */
7195 static void
7196 pmcs_lock_phy_impl(pmcs_phy_t *phyp, int level)
7197 {
7198 	pmcs_phy_t *tphyp;
7199 
7200 	ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) ||
7201 	    (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING));
7202 
7203 	/*
7204 	 * Start walking the PHYs.
7205 	 */
7206 	tphyp = phyp;
7207 	while (tphyp) {
7208 		/*
7209 		 * If we're at the top level, only lock ourselves.  For anything
7210 		 * at level > 0, traverse children while locking everything.
7211 		 */
7212 		if ((level > 0) || (tphyp == phyp)) {
7213 			pmcs_prt(tphyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, tphyp,
7214 			    NULL, "%s: PHY 0x%p parent 0x%p path %s lvl %d",
7215 			    __func__, (void *)tphyp, (void *)tphyp->parent,
7216 			    tphyp->path, level);
7217 			mutex_enter(&tphyp->phy_lock);
7218 
7219 			if (tphyp->children) {
7220 				pmcs_lock_phy_impl(tphyp->children, level + 1);
7221 			}
7222 		}
7223 
7224 		if (level == 0) {
7225 			return;
7226 		}
7227 
7228 		tphyp = tphyp->sibling;
7229 	}
7230 }
7231 
7232 /*
7233  * pmcs_lock_phy
7234  *
7235  * This function is responsible for locking a PHY and all its descendents
7236  */
7237 void
7238 pmcs_lock_phy(pmcs_phy_t *phyp)
7239 {
7240 #ifdef DEBUG
7241 	char *callername = NULL;
7242 	ulong_t off;
7243 
7244 	ASSERT(phyp != NULL);
7245 
7246 	callername = modgetsymname((uintptr_t)caller(), &off);
7247 
7248 	if (callername == NULL) {
7249 		pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7250 		    "%s: PHY 0x%p path %s caller: unknown", __func__,
7251 		    (void *)phyp, phyp->path);
7252 	} else {
7253 		pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7254 		    "%s: PHY 0x%p path %s caller: %s+%lx", __func__,
7255 		    (void *)phyp, phyp->path, callername, off);
7256 	}
7257 #else
7258 	pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7259 	    "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path);
7260 #endif
7261 	pmcs_lock_phy_impl(phyp, 0);
7262 }
7263 
7264 /*
7265  * pmcs_unlock_phy_impl
7266  *
7267  * Unlock all PHYs from phyp down in a bottom-up fashion.
7268  */
7269 static void
7270 pmcs_unlock_phy_impl(pmcs_phy_t *phyp, int level)
7271 {
7272 	pmcs_phy_t *phy_next;
7273 
7274 	ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) ||
7275 	    (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING));
7276 
7277 	/*
7278 	 * Recurse down to the bottom PHYs
7279 	 */
7280 	if (level == 0) {
7281 		if (phyp->children) {
7282 			pmcs_unlock_phy_impl(phyp->children, level + 1);
7283 		}
7284 	} else {
7285 		phy_next = phyp;
7286 		while (phy_next) {
7287 			if (phy_next->children) {
7288 				pmcs_unlock_phy_impl(phy_next->children,
7289 				    level + 1);
7290 			}
7291 			phy_next = phy_next->sibling;
7292 		}
7293 	}
7294 
7295 	/*
7296 	 * Iterate through PHYs unlocking all at level > 0 as well the top PHY
7297 	 */
7298 	phy_next = phyp;
7299 	while (phy_next) {
7300 		if ((level > 0) || (phy_next == phyp)) {
7301 			pmcs_prt(phy_next->pwp, PMCS_PRT_DEBUG_PHY_LOCKING,
7302 			    phy_next, NULL,
7303 			    "%s: PHY 0x%p parent 0x%p path %s lvl %d",
7304 			    __func__, (void *)phy_next,
7305 			    (void *)phy_next->parent, phy_next->path, level);
7306 			mutex_exit(&phy_next->phy_lock);
7307 		}
7308 
7309 		if (level == 0) {
7310 			return;
7311 		}
7312 
7313 		phy_next = phy_next->sibling;
7314 	}
7315 }
7316 
7317 /*
7318  * pmcs_unlock_phy
7319  *
7320  * Unlock a PHY and all its descendents
7321  */
7322 void
7323 pmcs_unlock_phy(pmcs_phy_t *phyp)
7324 {
7325 #ifdef DEBUG
7326 	char *callername = NULL;
7327 	ulong_t off;
7328 
7329 	ASSERT(phyp != NULL);
7330 
7331 	callername = modgetsymname((uintptr_t)caller(), &off);
7332 
7333 	if (callername == NULL) {
7334 		pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7335 		    "%s: PHY 0x%p path %s caller: unknown", __func__,
7336 		    (void *)phyp, phyp->path);
7337 	} else {
7338 		pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7339 		    "%s: PHY 0x%p path %s caller: %s+%lx", __func__,
7340 		    (void *)phyp, phyp->path, callername, off);
7341 	}
7342 #else
7343 	pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7344 	    "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path);
7345 #endif
7346 	pmcs_unlock_phy_impl(phyp, 0);
7347 }
7348 
7349 /*
7350  * pmcs_get_root_phy
7351  *
7352  * For a given phy pointer return its root phy.
7353  * This function must only be called during discovery in order to ensure that
7354  * the chain of PHYs from phyp up to the root PHY doesn't change.
7355  */
7356 pmcs_phy_t *
7357 pmcs_get_root_phy(pmcs_phy_t *phyp)
7358 {
7359 	ASSERT(phyp);
7360 
7361 	while (phyp) {
7362 		if (IS_ROOT_PHY(phyp)) {
7363 			break;
7364 		}
7365 		phyp = phyp->parent;
7366 	}
7367 
7368 	return (phyp);
7369 }
7370 
7371 /*
7372  * pmcs_free_dma_chunklist
7373  *
7374  * Free DMA S/G chunk list
7375  */
7376 void
7377 pmcs_free_dma_chunklist(pmcs_hw_t *pwp)
7378 {
7379 	pmcs_chunk_t	*pchunk;
7380 
7381 	while (pwp->dma_chunklist) {
7382 		pchunk = pwp->dma_chunklist;
7383 		pwp->dma_chunklist = pwp->dma_chunklist->next;
7384 		if (pchunk->dma_handle) {
7385 			if (ddi_dma_unbind_handle(pchunk->dma_handle) !=
7386 			    DDI_SUCCESS) {
7387 				pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
7388 				    "Condition failed at %s():%d",
7389 				    __func__, __LINE__);
7390 			}
7391 			ddi_dma_free_handle(&pchunk->dma_handle);
7392 			ddi_dma_mem_free(&pchunk->acc_handle);
7393 		}
7394 		kmem_free(pchunk, sizeof (pmcs_chunk_t));
7395 	}
7396 }
7397 
7398 /*ARGSUSED2*/
7399 int
7400 pmcs_phy_constructor(void *buf, void *arg, int kmflags)
7401 {
7402 	pmcs_hw_t *pwp = (pmcs_hw_t *)arg;
7403 	pmcs_phy_t *phyp = (pmcs_phy_t *)buf;
7404 
7405 	mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER,
7406 	    DDI_INTR_PRI(pwp->intr_pri));
7407 	cv_init(&phyp->abort_all_cv, NULL, CV_DRIVER, NULL);
7408 	return (0);
7409 }
7410 
7411 /*ARGSUSED1*/
7412 void
7413 pmcs_phy_destructor(void *buf, void *arg)
7414 {
7415 	pmcs_phy_t *phyp = (pmcs_phy_t *)buf;
7416 
7417 	cv_destroy(&phyp->abort_all_cv);
7418 	mutex_destroy(&phyp->phy_lock);
7419 }
7420 
7421 /*
7422  * Free all PHYs from the kmem_cache starting at phyp as well as everything
7423  * on the dead_phys list.
7424  *
7425  * NOTE: This function does not free root PHYs as they are not allocated
7426  * from the kmem_cache.
7427  *
7428  * No PHY locks are acquired as this should only be called during DDI_DETACH
7429  * or soft reset (while pmcs interrupts are disabled).
7430  */
7431 void
7432 pmcs_free_all_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
7433 {
7434 	pmcs_phy_t *tphyp, *nphyp;
7435 
7436 	if (phyp == NULL) {
7437 		return;
7438 	}
7439 
7440 	tphyp = phyp;
7441 	while (tphyp) {
7442 		nphyp = tphyp->sibling;
7443 
7444 		if (tphyp->children) {
7445 			pmcs_free_all_phys(pwp, tphyp->children);
7446 			tphyp->children = NULL;
7447 		}
7448 		if (!IS_ROOT_PHY(tphyp)) {
7449 			kmem_cache_free(pwp->phy_cache, tphyp);
7450 		}
7451 
7452 		tphyp = nphyp;
7453 	}
7454 
7455 	tphyp = pwp->dead_phys;
7456 	while (tphyp) {
7457 		nphyp = tphyp->sibling;
7458 		kmem_cache_free(pwp->phy_cache, tphyp);
7459 		tphyp = nphyp;
7460 	}
7461 	pwp->dead_phys = NULL;
7462 }
7463 
7464 /*
7465  * Free a list of PHYs linked together by the sibling pointer back to the
7466  * kmem cache from whence they came.  This function does not recurse, so the
7467  * caller must ensure there are no children.
7468  */
7469 void
7470 pmcs_free_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
7471 {
7472 	pmcs_phy_t *next_phy;
7473 
7474 	while (phyp) {
7475 		next_phy = phyp->sibling;
7476 		ASSERT(!mutex_owned(&phyp->phy_lock));
7477 		kmem_cache_free(pwp->phy_cache, phyp);
7478 		phyp = next_phy;
7479 	}
7480 }
7481 
7482 /*
7483  * Make a copy of an existing PHY structure.  This is used primarily in
7484  * discovery to compare the contents of an existing PHY with what gets
7485  * reported back by an expander.
7486  *
7487  * This function must not be called from any context where sleeping is
7488  * not possible.
7489  *
7490  * The new PHY is returned unlocked.
7491  */
7492 static pmcs_phy_t *
7493 pmcs_clone_phy(pmcs_phy_t *orig_phy)
7494 {
7495 	pmcs_phy_t *local;
7496 
7497 	local = kmem_cache_alloc(orig_phy->pwp->phy_cache, KM_SLEEP);
7498 
7499 	/*
7500 	 * Go ahead and just copy everything...
7501 	 */
7502 	*local = *orig_phy;
7503 
7504 	/*
7505 	 * But the following must be set appropriately for this copy
7506 	 */
7507 	local->sibling = NULL;
7508 	local->children = NULL;
7509 	mutex_init(&local->phy_lock, NULL, MUTEX_DRIVER,
7510 	    DDI_INTR_PRI(orig_phy->pwp->intr_pri));
7511 
7512 	return (local);
7513 }
7514 
7515 int
7516 pmcs_check_acc_handle(ddi_acc_handle_t handle)
7517 {
7518 	ddi_fm_error_t de;
7519 
7520 	if (handle == NULL) {
7521 		return (DDI_FAILURE);
7522 	}
7523 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
7524 	return (de.fme_status);
7525 }
7526 
7527 int
7528 pmcs_check_dma_handle(ddi_dma_handle_t handle)
7529 {
7530 	ddi_fm_error_t de;
7531 
7532 	if (handle == NULL) {
7533 		return (DDI_FAILURE);
7534 	}
7535 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
7536 	return (de.fme_status);
7537 }
7538 
7539 
7540 void
7541 pmcs_fm_ereport(pmcs_hw_t *pwp, char *detail)
7542 {
7543 	uint64_t ena;
7544 	char buf[FM_MAX_CLASS];
7545 
7546 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7547 	ena = fm_ena_generate(0, FM_ENA_FMT1);
7548 	if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities)) {
7549 		ddi_fm_ereport_post(pwp->dip, buf, ena, DDI_NOSLEEP,
7550 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
7551 	}
7552 }
7553 
7554 int
7555 pmcs_check_acc_dma_handle(pmcs_hw_t *pwp)
7556 {
7557 	pmcs_chunk_t *pchunk;
7558 	int i;
7559 
7560 	/* check all acc & dma handles allocated in attach */
7561 	if ((pmcs_check_acc_handle(pwp->pci_acc_handle) != DDI_SUCCESS) ||
7562 	    (pmcs_check_acc_handle(pwp->msg_acc_handle) != DDI_SUCCESS) ||
7563 	    (pmcs_check_acc_handle(pwp->top_acc_handle) != DDI_SUCCESS) ||
7564 	    (pmcs_check_acc_handle(pwp->mpi_acc_handle) != DDI_SUCCESS) ||
7565 	    (pmcs_check_acc_handle(pwp->gsm_acc_handle) != DDI_SUCCESS)) {
7566 		goto check_failed;
7567 	}
7568 
7569 	for (i = 0; i < PMCS_NIQ; i++) {
7570 		if ((pmcs_check_dma_handle(
7571 		    pwp->iqp_handles[i]) != DDI_SUCCESS) ||
7572 		    (pmcs_check_acc_handle(
7573 		    pwp->iqp_acchdls[i]) != DDI_SUCCESS)) {
7574 			goto check_failed;
7575 		}
7576 	}
7577 
7578 	for (i = 0; i < PMCS_NOQ; i++) {
7579 		if ((pmcs_check_dma_handle(
7580 		    pwp->oqp_handles[i]) != DDI_SUCCESS) ||
7581 		    (pmcs_check_acc_handle(
7582 		    pwp->oqp_acchdls[i]) != DDI_SUCCESS)) {
7583 			goto check_failed;
7584 		}
7585 	}
7586 
7587 	if ((pmcs_check_dma_handle(pwp->cip_handles) != DDI_SUCCESS) ||
7588 	    (pmcs_check_acc_handle(pwp->cip_acchdls) != DDI_SUCCESS)) {
7589 		goto check_failed;
7590 	}
7591 
7592 	if (pwp->fwlog &&
7593 	    ((pmcs_check_dma_handle(pwp->fwlog_hndl) != DDI_SUCCESS) ||
7594 	    (pmcs_check_acc_handle(pwp->fwlog_acchdl) != DDI_SUCCESS))) {
7595 		goto check_failed;
7596 	}
7597 
7598 	if (pwp->regdump_hndl && pwp->regdump_acchdl &&
7599 	    ((pmcs_check_dma_handle(pwp->regdump_hndl) != DDI_SUCCESS) ||
7600 	    (pmcs_check_acc_handle(pwp->regdump_acchdl)
7601 	    != DDI_SUCCESS))) {
7602 		goto check_failed;
7603 	}
7604 
7605 
7606 	pchunk = pwp->dma_chunklist;
7607 	while (pchunk) {
7608 		if ((pmcs_check_acc_handle(pchunk->acc_handle)
7609 		    != DDI_SUCCESS) ||
7610 		    (pmcs_check_dma_handle(pchunk->dma_handle)
7611 		    != DDI_SUCCESS)) {
7612 			goto check_failed;
7613 		}
7614 		pchunk = pchunk->next;
7615 	}
7616 
7617 	return (0);
7618 
7619 check_failed:
7620 
7621 	return (1);
7622 }
7623 
7624 /*
7625  * pmcs_handle_dead_phys
7626  *
7627  * If the PHY has no outstanding work associated with it, remove it from
7628  * the dead PHY list and free it.
7629  *
7630  * If pwp->ds_err_recovering or pwp->configuring is set, don't run.
7631  * This keeps routines that need to submit work to the chip from having to
7632  * hold PHY locks to ensure that PHYs don't disappear while they do their work.
7633  */
7634 void
7635 pmcs_handle_dead_phys(pmcs_hw_t *pwp)
7636 {
7637 	pmcs_phy_t *phyp, *nphyp, *pphyp;
7638 
7639 	mutex_enter(&pwp->lock);
7640 	mutex_enter(&pwp->config_lock);
7641 
7642 	if (pwp->configuring | pwp->ds_err_recovering) {
7643 		mutex_exit(&pwp->config_lock);
7644 		mutex_exit(&pwp->lock);
7645 		return;
7646 	}
7647 
7648 	/*
7649 	 * Check every PHY in the dead PHY list
7650 	 */
7651 	mutex_enter(&pwp->dead_phylist_lock);
7652 	phyp = pwp->dead_phys;
7653 	pphyp = NULL;	/* Set previous PHY to NULL */
7654 
7655 	while (phyp != NULL) {
7656 		pmcs_lock_phy(phyp);
7657 		ASSERT(phyp->dead);
7658 
7659 		nphyp = phyp->dead_next;
7660 
7661 		/*
7662 		 * Check for outstanding work
7663 		 */
7664 		if (phyp->ref_count > 0) {
7665 			pmcs_unlock_phy(phyp);
7666 			pphyp = phyp;	/* This PHY becomes "previous" */
7667 		} else if (phyp->target) {
7668 			pmcs_unlock_phy(phyp);
7669 			pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, phyp->target,
7670 			    "%s: Not freeing PHY 0x%p: target 0x%p is not free",
7671 			    __func__, (void *)phyp, (void *)phyp->target);
7672 			pphyp = phyp;
7673 		} else {
7674 			/*
7675 			 * No outstanding work or target references. Remove it
7676 			 * from the list and free it
7677 			 */
7678 			pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target,
7679 			    "%s: Freeing inactive dead PHY 0x%p @ %s "
7680 			    "target = 0x%p", __func__, (void *)phyp,
7681 			    phyp->path, (void *)phyp->target);
7682 			/*
7683 			 * If pphyp is NULL, then phyp was the head of the list,
7684 			 * so just reset the head to nphyp. Otherwise, the
7685 			 * previous PHY will now point to nphyp (the next PHY)
7686 			 */
7687 			if (pphyp == NULL) {
7688 				pwp->dead_phys = nphyp;
7689 			} else {
7690 				pphyp->dead_next = nphyp;
7691 			}
7692 			/*
7693 			 * If the target still points to this PHY, remove
7694 			 * that linkage now.
7695 			 */
7696 			if (phyp->target) {
7697 				mutex_enter(&phyp->target->statlock);
7698 				if (phyp->target->phy == phyp) {
7699 					phyp->target->phy = NULL;
7700 				}
7701 				mutex_exit(&phyp->target->statlock);
7702 			}
7703 			pmcs_unlock_phy(phyp);
7704 			kmem_cache_free(pwp->phy_cache, phyp);
7705 		}
7706 
7707 		phyp = nphyp;
7708 	}
7709 
7710 	mutex_exit(&pwp->dead_phylist_lock);
7711 	mutex_exit(&pwp->config_lock);
7712 	mutex_exit(&pwp->lock);
7713 }
7714 
7715 void
7716 pmcs_inc_phy_ref_count(pmcs_phy_t *phyp)
7717 {
7718 	atomic_inc_32(&phyp->ref_count);
7719 }
7720 
7721 void
7722 pmcs_dec_phy_ref_count(pmcs_phy_t *phyp)
7723 {
7724 	ASSERT(phyp->ref_count != 0);
7725 	atomic_dec_32(&phyp->ref_count);
7726 }
7727 
7728 /*
7729  * pmcs_reap_dead_phy
7730  *
7731  * This function is called from pmcs_new_tport when we have a PHY
7732  * without a target pointer.  It's possible in that case that this PHY
7733  * may have a "brother" on the dead_phys list.  That is, it may be the same as
7734  * this one but with a different root PHY number (e.g. pp05 vs. pp04).  If
7735  * that's the case, update the dead PHY and this new PHY.  If that's not the
7736  * case, we should get a tran_tgt_init on this after it's reported to SCSA.
7737  *
7738  * Called with PHY locked.
7739  */
7740 static void
7741 pmcs_reap_dead_phy(pmcs_phy_t *phyp)
7742 {
7743 	pmcs_hw_t *pwp = phyp->pwp;
7744 	pmcs_phy_t *ctmp;
7745 	pmcs_iport_t *iport_cmp;
7746 
7747 	ASSERT(mutex_owned(&phyp->phy_lock));
7748 
7749 	/*
7750 	 * Check the dead PHYs list
7751 	 */
7752 	mutex_enter(&pwp->dead_phylist_lock);
7753 	ctmp = pwp->dead_phys;
7754 	while (ctmp) {
7755 		/*
7756 		 * If the iport is NULL, compare against last_iport.
7757 		 */
7758 		if (ctmp->iport) {
7759 			iport_cmp = ctmp->iport;
7760 		} else {
7761 			iport_cmp = ctmp->last_iport;
7762 		}
7763 
7764 		if ((iport_cmp != phyp->iport) ||
7765 		    (memcmp((void *)&ctmp->sas_address[0],
7766 		    (void *)&phyp->sas_address[0], 8))) {
7767 			ctmp = ctmp->dead_next;
7768 			continue;
7769 		}
7770 
7771 		/*
7772 		 * Same SAS address on same iport.  Now check to see if
7773 		 * the PHY path is the same with the possible exception
7774 		 * of the root PHY number.
7775 		 * The "5" is the string length of "pp00."
7776 		 */
7777 		if ((strnlen(phyp->path, 5) >= 5) &&
7778 		    (strnlen(ctmp->path, 5) >= 5)) {
7779 			if (memcmp((void *)&phyp->path[5],
7780 			    (void *)&ctmp->path[5],
7781 			    strnlen(phyp->path, 32) - 5) == 0) {
7782 				break;
7783 			}
7784 		}
7785 
7786 		ctmp = ctmp->dead_next;
7787 	}
7788 	mutex_exit(&pwp->dead_phylist_lock);
7789 
7790 	/*
7791 	 * Found a match.  Remove the target linkage and drop the
7792 	 * ref count on the old PHY.  Then, increment the ref count
7793 	 * on the new PHY to compensate.
7794 	 */
7795 	if (ctmp) {
7796 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
7797 		    "%s: Found match in dead PHY list (0x%p) for new PHY %s",
7798 		    __func__, (void *)ctmp, phyp->path);
7799 		/*
7800 		 * If there is a pointer to the target in the dead PHY, move
7801 		 * all reference counts to the new PHY.
7802 		 */
7803 		if (ctmp->target) {
7804 			mutex_enter(&ctmp->target->statlock);
7805 			phyp->target = ctmp->target;
7806 
7807 			while (ctmp->ref_count != 0) {
7808 				pmcs_inc_phy_ref_count(phyp);
7809 				pmcs_dec_phy_ref_count(ctmp);
7810 			}
7811 			/*
7812 			 * Update the target's linkage as well
7813 			 */
7814 			phyp->target->phy = phyp;
7815 			phyp->target->dtype = phyp->dtype;
7816 			ctmp->target = NULL;
7817 			mutex_exit(&phyp->target->statlock);
7818 		}
7819 	}
7820 }
7821 
7822 /*
7823  * Called with iport lock held
7824  */
7825 void
7826 pmcs_add_phy_to_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp)
7827 {
7828 	ASSERT(mutex_owned(&iport->lock));
7829 	ASSERT(phyp);
7830 	ASSERT(!list_link_active(&phyp->list_node));
7831 	iport->nphy++;
7832 	list_insert_tail(&iport->phys, phyp);
7833 	pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
7834 	    &iport->nphy);
7835 	mutex_enter(&iport->refcnt_lock);
7836 	iport->refcnt++;
7837 	mutex_exit(&iport->refcnt_lock);
7838 }
7839 
7840 /*
7841  * Called with the iport lock held
7842  */
7843 void
7844 pmcs_remove_phy_from_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp)
7845 {
7846 	pmcs_phy_t *pptr, *next_pptr;
7847 
7848 	ASSERT(mutex_owned(&iport->lock));
7849 
7850 	/*
7851 	 * If phyp is NULL, remove all PHYs from the iport
7852 	 */
7853 	if (phyp == NULL) {
7854 		for (pptr = list_head(&iport->phys); pptr != NULL;
7855 		    pptr = next_pptr) {
7856 			next_pptr = list_next(&iport->phys, pptr);
7857 			mutex_enter(&pptr->phy_lock);
7858 			pptr->iport = NULL;
7859 			pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp,
7860 			    pptr->tgt_port_pm_tmp, B_FALSE);
7861 			mutex_exit(&pptr->phy_lock);
7862 			pmcs_rele_iport(iport);
7863 			list_remove(&iport->phys, pptr);
7864 			pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32,
7865 			    PMCS_NUM_PHYS, &iport->nphy);
7866 		}
7867 		iport->nphy = 0;
7868 		return;
7869 	}
7870 
7871 	ASSERT(phyp);
7872 	ASSERT(iport->nphy > 0);
7873 	ASSERT(list_link_active(&phyp->list_node));
7874 	iport->nphy--;
7875 	list_remove(&iport->phys, phyp);
7876 	pmcs_update_phy_pm_props(phyp, phyp->att_port_pm_tmp,
7877 	    phyp->tgt_port_pm_tmp, B_FALSE);
7878 	pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
7879 	    &iport->nphy);
7880 	pmcs_rele_iport(iport);
7881 }
7882 
7883 /*
7884  * This function checks to see if the target pointed to by phyp is still
7885  * correct.  This is done by comparing the target's unit address with the
7886  * SAS address in phyp.
7887  *
7888  * Called with PHY locked and target statlock held
7889  */
7890 static boolean_t
7891 pmcs_phy_target_match(pmcs_phy_t *phyp)
7892 {
7893 	uint64_t wwn;
7894 	char unit_address[PMCS_MAX_UA_SIZE];
7895 	boolean_t rval = B_FALSE;
7896 
7897 	ASSERT(phyp);
7898 	ASSERT(phyp->target);
7899 	ASSERT(mutex_owned(&phyp->phy_lock));
7900 	ASSERT(mutex_owned(&phyp->target->statlock));
7901 
7902 	wwn = pmcs_barray2wwn(phyp->sas_address);
7903 	(void) scsi_wwn_to_wwnstr(wwn, 1, unit_address);
7904 
7905 	if (memcmp((void *)unit_address, (void *)phyp->target->unit_address,
7906 	    strnlen(phyp->target->unit_address, PMCS_MAX_UA_SIZE)) == 0) {
7907 		rval = B_TRUE;
7908 	}
7909 
7910 	return (rval);
7911 }
7912 /*
7913  * Commands used to serialize SMP requests.
7914  *
7915  * The SPC only allows 2 SMP commands per SMP target: 1 cmd pending and 1 cmd
7916  * queued for the same SMP target. If a third SMP cmd is sent to the SPC for an
7917  * SMP target that already has a SMP cmd pending and one queued, then the
7918  * SPC responds with the ERROR_INTERNAL_SMP_RESOURCE response.
7919  *
7920  * Additionally, the SPC has an 8 entry deep cmd queue and the number of SMP
7921  * cmds that can be queued is controlled by the PORT_CONTROL IOMB. The
7922  * SPC default is 1 SMP command/port (iport).  These 2 queued SMP cmds would
7923  * have to be for different SMP targets.  The INTERNAL_SMP_RESOURCE error will
7924  * also be returned if a 2nd SMP cmd is sent to the controller when there is
7925  * already 1 SMP cmd queued for that port or if a 3rd SMP cmd is sent to the
7926  * queue if there are already 2 queued SMP cmds.
7927  */
7928 void
7929 pmcs_smp_acquire(pmcs_iport_t *iport)
7930 {
7931 	if (iport == NULL) {
7932 		return;
7933 	}
7934 
7935 	mutex_enter(&iport->smp_lock);
7936 	while (iport->smp_active) {
7937 		pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL,
7938 		    "%s: SMP is active on thread 0x%p, waiting", __func__,
7939 		    (void *)iport->smp_active_thread);
7940 		cv_wait(&iport->smp_cv, &iport->smp_lock);
7941 	}
7942 	iport->smp_active = B_TRUE;
7943 	iport->smp_active_thread = curthread;
7944 	pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL,
7945 	    "%s: SMP acquired by thread 0x%p", __func__,
7946 	    (void *)iport->smp_active_thread);
7947 	mutex_exit(&iport->smp_lock);
7948 }
7949 
7950 void
7951 pmcs_smp_release(pmcs_iport_t *iport)
7952 {
7953 	if (iport == NULL) {
7954 		return;
7955 	}
7956 
7957 	mutex_enter(&iport->smp_lock);
7958 	pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL,
7959 	    "%s: SMP released by thread 0x%p", __func__, (void *)curthread);
7960 	iport->smp_active = B_FALSE;
7961 	iport->smp_active_thread = NULL;
7962 	cv_signal(&iport->smp_cv);
7963 	mutex_exit(&iport->smp_lock);
7964 }
7965 
7966 /*
7967  * Update a PHY's attached-port-pm and target-port-pm properties
7968  *
7969  * phyp: PHY whose properties are to be updated
7970  *
7971  * att_bv: Bit value of the attached-port-pm property to be updated in the
7972  * 64-bit holding area for the PHY.
7973  *
7974  * tgt_bv: Bit value of the target-port-pm property to update in the 64-bit
7975  * holding area for the PHY.
7976  *
7977  * prop_add_val: If TRUE, we're adding bits into the property value.
7978  * Otherwise, we're taking them out.  Either way, the properties for this
7979  * PHY will be updated.
7980  */
7981 void
7982 pmcs_update_phy_pm_props(pmcs_phy_t *phyp, uint64_t att_bv, uint64_t tgt_bv,
7983     boolean_t prop_add_val)
7984 {
7985 	if (prop_add_val) {
7986 		/*
7987 		 * If the values are currently 0, then we're setting the
7988 		 * phymask for just this PHY as well.
7989 		 */
7990 		if (phyp->att_port_pm_tmp == 0) {
7991 			phyp->att_port_pm = att_bv;
7992 			phyp->tgt_port_pm = tgt_bv;
7993 		}
7994 		phyp->att_port_pm_tmp |= att_bv;
7995 		phyp->tgt_port_pm_tmp |= tgt_bv;
7996 		(void) snprintf(phyp->att_port_pm_str, PMCS_PM_MAX_NAMELEN,
7997 		    "%"PRIx64, phyp->att_port_pm_tmp);
7998 		(void) snprintf(phyp->tgt_port_pm_str, PMCS_PM_MAX_NAMELEN,
7999 		    "%"PRIx64, phyp->tgt_port_pm_tmp);
8000 	} else {
8001 		phyp->att_port_pm_tmp &= ~att_bv;
8002 		phyp->tgt_port_pm_tmp &= ~tgt_bv;
8003 		if (phyp->att_port_pm_tmp) {
8004 			(void) snprintf(phyp->att_port_pm_str,
8005 			    PMCS_PM_MAX_NAMELEN, "%"PRIx64,
8006 			    phyp->att_port_pm_tmp);
8007 		} else {
8008 			phyp->att_port_pm_str[0] = '\0';
8009 			phyp->att_port_pm = 0;
8010 		}
8011 		if (phyp->tgt_port_pm_tmp) {
8012 			(void) snprintf(phyp->tgt_port_pm_str,
8013 			    PMCS_PM_MAX_NAMELEN, "%"PRIx64,
8014 			    phyp->tgt_port_pm_tmp);
8015 		} else {
8016 			phyp->tgt_port_pm_str[0] = '\0';
8017 			phyp->tgt_port_pm = 0;
8018 		}
8019 	}
8020 
8021 	if (phyp->target == NULL) {
8022 		return;
8023 	}
8024 
8025 	mutex_enter(&phyp->target->statlock);
8026 	if (!list_is_empty(&phyp->target->lun_list)) {
8027 		pmcs_lun_t *lunp;
8028 
8029 		lunp = list_head(&phyp->target->lun_list);
8030 		while (lunp) {
8031 			(void) scsi_device_prop_update_string(lunp->sd,
8032 			    SCSI_DEVICE_PROP_PATH,
8033 			    SCSI_ADDR_PROP_ATTACHED_PORT_PM,
8034 			    phyp->att_port_pm_str);
8035 			(void) scsi_device_prop_update_string(lunp->sd,
8036 			    SCSI_DEVICE_PROP_PATH,
8037 			    SCSI_ADDR_PROP_TARGET_PORT_PM,
8038 			    phyp->tgt_port_pm_str);
8039 			lunp = list_next(&phyp->target->lun_list, lunp);
8040 		}
8041 	} else if (phyp->target->smpd) {
8042 		(void) smp_device_prop_update_string(phyp->target->smpd,
8043 		    SCSI_ADDR_PROP_ATTACHED_PORT_PM,
8044 		    phyp->att_port_pm_str);
8045 		(void) smp_device_prop_update_string(phyp->target->smpd,
8046 		    SCSI_ADDR_PROP_TARGET_PORT_PM,
8047 		    phyp->tgt_port_pm_str);
8048 	}
8049 	mutex_exit(&phyp->target->statlock);
8050 }
8051 
8052 /* ARGSUSED */
8053 void
8054 pmcs_deregister_device_work(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
8055 {
8056 	pmcs_phy_t	*pptr;
8057 
8058 	for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
8059 		pmcs_lock_phy(pptr);
8060 		if (pptr->deregister_wait) {
8061 			pmcs_deregister_device(pwp, pptr);
8062 		}
8063 		pmcs_unlock_phy(pptr);
8064 	}
8065 }
8066 
8067 /*
8068  * pmcs_iport_active
8069  *
8070  * Mark this iport as active.  Called with the iport lock held.
8071  */
8072 static void
8073 pmcs_iport_active(pmcs_iport_t *iport)
8074 {
8075 	ASSERT(mutex_owned(&iport->lock));
8076 
8077 	iport->ua_state = UA_ACTIVE;
8078 	iport->smp_active = B_FALSE;
8079 	iport->smp_active_thread = NULL;
8080 }
8081 
8082 /* ARGSUSED */
8083 static void
8084 pmcs_tgtmap_activate_cb(void *tgtmap_priv, char *tgt_addr,
8085     scsi_tgtmap_tgt_type_t tgt_type, void **tgt_privp)
8086 {
8087 	pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv;
8088 	pmcs_hw_t *pwp = iport->pwp;
8089 	pmcs_xscsi_t *target;
8090 
8091 	/*
8092 	 * Look up the target.  If there is one, and it doesn't have a PHY
8093 	 * pointer, re-establish that linkage here.
8094 	 */
8095 	mutex_enter(&pwp->lock);
8096 	target = pmcs_get_target(iport, tgt_addr, B_FALSE);
8097 	mutex_exit(&pwp->lock);
8098 
8099 	/*
8100 	 * If we got a target, it will now have a PHY pointer and the PHY
8101 	 * will point to the target.  The PHY will be locked, so we'll need
8102 	 * to unlock it.
8103 	 */
8104 	if (target) {
8105 		pmcs_unlock_phy(target->phy);
8106 	}
8107 
8108 	/*
8109 	 * Update config_restart_time so we don't try to restart discovery
8110 	 * while enumeration is still in progress.
8111 	 */
8112 	mutex_enter(&pwp->config_lock);
8113 	pwp->config_restart_time = ddi_get_lbolt() +
8114 	    drv_usectohz(PMCS_REDISCOVERY_DELAY);
8115 	mutex_exit(&pwp->config_lock);
8116 }
8117 
8118 /* ARGSUSED */
8119 static boolean_t
8120 pmcs_tgtmap_deactivate_cb(void *tgtmap_priv, char *tgt_addr,
8121     scsi_tgtmap_tgt_type_t tgt_type, void *tgt_priv,
8122     scsi_tgtmap_deact_rsn_t tgt_deact_rsn)
8123 {
8124 	pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv;
8125 	pmcs_phy_t *phyp;
8126 	boolean_t rediscover = B_FALSE;
8127 
8128 	ASSERT(iport);
8129 
8130 	phyp = pmcs_find_phy_by_sas_address(iport->pwp, iport, NULL, tgt_addr);
8131 	if (phyp == NULL) {
8132 		pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL,
8133 		    "%s: Couldn't find PHY at %s", __func__, tgt_addr);
8134 		return (rediscover);
8135 	}
8136 	/* phyp is locked */
8137 
8138 	if (!phyp->reenumerate && phyp->configured) {
8139 		pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, phyp->target,
8140 		    "%s: PHY @ %s is configured... re-enumerate", __func__,
8141 		    tgt_addr);
8142 		phyp->reenumerate = 1;
8143 	}
8144 
8145 	/*
8146 	 * Check to see if reenumerate is set, and if so, if we've reached our
8147 	 * maximum number of retries.
8148 	 */
8149 	if (phyp->reenumerate) {
8150 		if (phyp->enum_attempts == PMCS_MAX_REENUMERATE) {
8151 			pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp,
8152 			    phyp->target,
8153 			    "%s: No more enumeration attempts for %s", __func__,
8154 			    tgt_addr);
8155 		} else {
8156 			pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp,
8157 			    phyp->target, "%s: Re-attempt enumeration for %s",
8158 			    __func__, tgt_addr);
8159 			++phyp->enum_attempts;
8160 			rediscover = B_TRUE;
8161 		}
8162 
8163 		phyp->reenumerate = 0;
8164 	}
8165 
8166 	pmcs_unlock_phy(phyp);
8167 
8168 	mutex_enter(&iport->pwp->config_lock);
8169 	iport->pwp->config_restart_time = ddi_get_lbolt() +
8170 	    drv_usectohz(PMCS_REDISCOVERY_DELAY);
8171 	if (rediscover) {
8172 		iport->pwp->config_restart = B_TRUE;
8173 	} else if (iport->pwp->config_restart == B_TRUE) {
8174 		/*
8175 		 * If we aren't asking for rediscovery because of this PHY,
8176 		 * check to see if we're already asking for it on behalf of
8177 		 * some other PHY.  If so, we'll want to return TRUE, so reset
8178 		 * "rediscover" here.
8179 		 */
8180 		rediscover = B_TRUE;
8181 	}
8182 
8183 	mutex_exit(&iport->pwp->config_lock);
8184 
8185 	return (rediscover);
8186 }
8187 
8188 void
8189 pmcs_status_disposition(pmcs_phy_t *phyp, uint32_t status)
8190 {
8191 	ASSERT(phyp);
8192 	ASSERT(!mutex_owned(&phyp->phy_lock));
8193 
8194 	if (phyp == NULL) {
8195 		return;
8196 	}
8197 
8198 	pmcs_lock_phy(phyp);
8199 
8200 	/*
8201 	 * XXX: Do we need to call this function from an SSP_EVENT?
8202 	 */
8203 
8204 	switch (status) {
8205 	case PMCOUT_STATUS_NO_DEVICE:
8206 	case PMCOUT_STATUS_ERROR_HW_TIMEOUT:
8207 	case PMCOUT_STATUS_XFER_ERR_BREAK:
8208 	case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
8209 	case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED:
8210 	case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION:
8211 	case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK:
8212 	case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION:
8213 	case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
8214 	case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
8215 	case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION:
8216 	case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR:
8217 	case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED:
8218 	case PMCOUT_STATUS_XFER_ERROR_RX_FRAME:
8219 	case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
8220 	case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE:
8221 	case PMCOUT_STATUS_IO_PORT_IN_RESET:
8222 	case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL:
8223 	case PMCOUT_STATUS_IO_DS_IN_RECOVERY:
8224 	case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
8225 		pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target,
8226 		    "%s: status = 0x%x for " SAS_ADDR_FMT ", reenumerate",
8227 		    __func__, status, SAS_ADDR_PRT(phyp->sas_address));
8228 		phyp->reenumerate = 1;
8229 		break;
8230 
8231 	default:
8232 		pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target,
8233 		    "%s: status = 0x%x for " SAS_ADDR_FMT ", no reenumeration",
8234 		    __func__, status, SAS_ADDR_PRT(phyp->sas_address));
8235 		break;
8236 	}
8237 
8238 	pmcs_unlock_phy(phyp);
8239 }
8240 
8241 /*
8242  * Add the list of PHYs pointed to by phyp to the dead_phys_list
8243  *
8244  * Called with all PHYs in the list locked
8245  */
8246 static void
8247 pmcs_add_dead_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
8248 {
8249 	mutex_enter(&pwp->dead_phylist_lock);
8250 	while (phyp) {
8251 		pmcs_phy_t *nxt = phyp->sibling;
8252 		ASSERT(phyp->dead);
8253 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL,
8254 		    "%s: dead PHY 0x%p (%s) (ref_count %d)", __func__,
8255 		    (void *)phyp, phyp->path, phyp->ref_count);
8256 		/*
8257 		 * Put this PHY on the dead PHY list for the watchdog to
8258 		 * clean up after any outstanding work has completed.
8259 		 */
8260 		phyp->dead_next = pwp->dead_phys;
8261 		pwp->dead_phys = phyp;
8262 		pmcs_unlock_phy(phyp);
8263 		phyp = nxt;
8264 	}
8265 	mutex_exit(&pwp->dead_phylist_lock);
8266 }
8267