xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_attach.c (revision b18a19c275d2531444fcd2f66664cbe3c6897f6a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  *
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 #include <sys/scsi/adapters/pmcs/pmcs.h>
26 
27 #define	PMCS_DRIVER_VERSION	"pmcs HBA device driver"
28 
29 static	char	*pmcs_driver_rev = PMCS_DRIVER_VERSION;
30 
31 /*
32  * Non-DDI Compliant stuff
33  */
34 extern char hw_serial[];
35 
36 /*
37  * Global driver data
38  */
39 void *pmcs_softc_state = NULL;
40 void *pmcs_iport_softstate = NULL;
41 
42 /*
43  * Tracing and Logging info
44  */
45 pmcs_tbuf_t *pmcs_tbuf = NULL;
46 uint32_t pmcs_tbuf_num_elems = 0;
47 pmcs_tbuf_t *pmcs_tbuf_ptr;
48 uint32_t pmcs_tbuf_idx = 0;
49 boolean_t pmcs_tbuf_wrap = B_FALSE;
50 static kmutex_t pmcs_trace_lock;
51 
52 /*
53  * If pmcs_force_syslog value is non-zero, all messages put in the trace log
54  * will also be sent to system log.
55  */
56 int pmcs_force_syslog = 0;
57 int pmcs_console = 0;
58 
59 /*
60  * External References
61  */
62 extern int ncpus_online;
63 
64 /*
65  * Local static data
66  */
67 static int fwlog_level = 3;
68 static int physpeed = PHY_LINK_ALL;
69 static int phymode = PHY_LM_AUTO;
70 static int block_mask = 0;
71 static int phymap_usec = 3 * MICROSEC;
72 static int iportmap_usec = 2 * MICROSEC;
73 
74 #ifdef DEBUG
75 static int debug_mask = 1;
76 #else
77 static int debug_mask = 0;
78 #endif
79 
80 #ifdef DISABLE_MSIX
81 static int disable_msix = 1;
82 #else
83 static int disable_msix = 0;
84 #endif
85 
86 #ifdef DISABLE_MSI
87 static int disable_msi = 1;
88 #else
89 static int disable_msi = 0;
90 #endif
91 
92 static uint16_t maxqdepth = 0xfffe;
93 
94 /*
95  * Local prototypes
96  */
97 static int pmcs_attach(dev_info_t *, ddi_attach_cmd_t);
98 static int pmcs_detach(dev_info_t *, ddi_detach_cmd_t);
99 static int pmcs_unattach(pmcs_hw_t *);
100 static int pmcs_iport_unattach(pmcs_iport_t *);
101 static int pmcs_add_more_chunks(pmcs_hw_t *, unsigned long);
102 static void pmcs_watchdog(void *);
103 static int pmcs_setup_intr(pmcs_hw_t *);
104 static int pmcs_teardown_intr(pmcs_hw_t *);
105 
106 static uint_t pmcs_nonio_ix(caddr_t, caddr_t);
107 static uint_t pmcs_general_ix(caddr_t, caddr_t);
108 static uint_t pmcs_event_ix(caddr_t, caddr_t);
109 static uint_t pmcs_iodone_ix(caddr_t, caddr_t);
110 static uint_t pmcs_fatal_ix(caddr_t, caddr_t);
111 static uint_t pmcs_all_intr(caddr_t, caddr_t);
112 static int pmcs_quiesce(dev_info_t *dip);
113 static boolean_t pmcs_fabricate_wwid(pmcs_hw_t *);
114 
115 static void pmcs_create_phy_stats(pmcs_iport_t *);
116 int pmcs_update_phy_stats(kstat_t *, int);
117 static void pmcs_destroy_phy_stats(pmcs_iport_t *);
118 
119 static void pmcs_fm_fini(pmcs_hw_t *pwp);
120 static void pmcs_fm_init(pmcs_hw_t *pwp);
121 static int pmcs_fm_error_cb(dev_info_t *dip,
122     ddi_fm_error_t *err, const void *impl_data);
123 
124 /*
125  * Local configuration data
126  */
127 static struct dev_ops pmcs_ops = {
128 	DEVO_REV,		/* devo_rev, */
129 	0,			/* refcnt */
130 	ddi_no_info,		/* info */
131 	nulldev,		/* identify */
132 	nulldev,		/* probe */
133 	pmcs_attach,		/* attach */
134 	pmcs_detach,		/* detach */
135 	nodev,			/* reset */
136 	NULL,			/* driver operations */
137 	NULL,			/* bus operations */
138 	ddi_power,		/* power management */
139 	pmcs_quiesce		/* quiesce */
140 };
141 
142 static struct modldrv modldrv = {
143 	&mod_driverops,
144 	PMCS_DRIVER_VERSION,
145 	&pmcs_ops,	/* driver ops */
146 };
147 static struct modlinkage modlinkage = {
148 	MODREV_1, &modldrv, NULL
149 };
150 
151 const ddi_dma_attr_t pmcs_dattr = {
152 	DMA_ATTR_V0,			/* dma_attr version	*/
153 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
154 	0xFFFFFFFFFFFFFFFFull,		/* dma_attr_addr_hi	*/
155 	0x00000000FFFFFFFFull,		/* dma_attr_count_max	*/
156 	0x0000000000000001ull,		/* dma_attr_align	*/
157 	0x00000078,			/* dma_attr_burstsizes	*/
158 	0x00000001,			/* dma_attr_minxfer	*/
159 	0x00000000FFFFFFFFull,		/* dma_attr_maxxfer	*/
160 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
161 	1,				/* dma_attr_sgllen 	*/
162 	512,				/* dma_attr_granular 	*/
163 	0				/* dma_attr_flags 	*/
164 };
165 
166 static ddi_device_acc_attr_t rattr = {
167 	DDI_DEVICE_ATTR_V0,
168 	DDI_STRUCTURE_LE_ACC,
169 	DDI_STRICTORDER_ACC,
170 	DDI_DEFAULT_ACC
171 };
172 
173 
174 /*
175  * Attach/Detach functions
176  */
177 
178 int
179 _init(void)
180 {
181 	int ret;
182 
183 	ret = ddi_soft_state_init(&pmcs_softc_state, sizeof (pmcs_hw_t), 1);
184 	if (ret != 0) {
185 		cmn_err(CE_WARN, "?soft state init failed for pmcs");
186 		return (ret);
187 	}
188 
189 	if ((ret = scsi_hba_init(&modlinkage)) != 0) {
190 		cmn_err(CE_WARN, "?scsi_hba_init failed for pmcs");
191 		ddi_soft_state_fini(&pmcs_softc_state);
192 		return (ret);
193 	}
194 
195 	/*
196 	 * Allocate soft state for iports
197 	 */
198 	ret = ddi_soft_state_init(&pmcs_iport_softstate,
199 	    sizeof (pmcs_iport_t), 2);
200 	if (ret != 0) {
201 		cmn_err(CE_WARN, "?iport soft state init failed for pmcs");
202 		ddi_soft_state_fini(&pmcs_softc_state);
203 		return (ret);
204 	}
205 
206 	ret = mod_install(&modlinkage);
207 	if (ret != 0) {
208 		cmn_err(CE_WARN, "?mod_install failed for pmcs (%d)", ret);
209 		scsi_hba_fini(&modlinkage);
210 		ddi_soft_state_fini(&pmcs_iport_softstate);
211 		ddi_soft_state_fini(&pmcs_softc_state);
212 		return (ret);
213 	}
214 
215 	/* Initialize the global trace lock */
216 	mutex_init(&pmcs_trace_lock, NULL, MUTEX_DRIVER, NULL);
217 
218 	return (0);
219 }
220 
221 int
222 _fini(void)
223 {
224 	int ret;
225 	if ((ret = mod_remove(&modlinkage)) != 0) {
226 		return (ret);
227 	}
228 	scsi_hba_fini(&modlinkage);
229 
230 	/* Free pmcs log buffer and destroy the global lock */
231 	if (pmcs_tbuf) {
232 		kmem_free(pmcs_tbuf,
233 		    pmcs_tbuf_num_elems * sizeof (pmcs_tbuf_t));
234 		pmcs_tbuf = NULL;
235 	}
236 	mutex_destroy(&pmcs_trace_lock);
237 
238 	ddi_soft_state_fini(&pmcs_iport_softstate);
239 	ddi_soft_state_fini(&pmcs_softc_state);
240 	return (0);
241 }
242 
243 int
244 _info(struct modinfo *modinfop)
245 {
246 	return (mod_info(&modlinkage, modinfop));
247 }
248 
249 static int
250 pmcs_iport_attach(dev_info_t *dip)
251 {
252 	pmcs_iport_t		*iport;
253 	pmcs_hw_t		*pwp;
254 	scsi_hba_tran_t		*tran;
255 	void			*ua_priv = NULL;
256 	char			*iport_ua;
257 	char			*init_port;
258 	int			hba_inst;
259 	int			inst;
260 
261 	hba_inst = ddi_get_instance(ddi_get_parent(dip));
262 	inst = ddi_get_instance(dip);
263 
264 	pwp = ddi_get_soft_state(pmcs_softc_state, hba_inst);
265 	if (pwp == NULL) {
266 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
267 		    "%s: iport%d attach invoked with NULL parent (HBA) node)",
268 		    __func__, inst);
269 		return (DDI_FAILURE);
270 	}
271 
272 	if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) {
273 		return (DDI_FAILURE);
274 	}
275 
276 	if ((iport_ua = scsi_hba_iport_unit_address(dip)) == NULL) {
277 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
278 		    "%s: invoked with NULL unit address, inst (%d)",
279 		    __func__, inst);
280 		return (DDI_FAILURE);
281 	}
282 
283 	if (ddi_soft_state_zalloc(pmcs_iport_softstate, inst) != DDI_SUCCESS) {
284 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
285 		    "Failed to alloc soft state for iport %d", inst);
286 		return (DDI_FAILURE);
287 	}
288 
289 	iport = ddi_get_soft_state(pmcs_iport_softstate, inst);
290 	if (iport == NULL) {
291 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
292 		    "cannot get iport soft state");
293 		goto iport_attach_fail1;
294 	}
295 
296 	mutex_init(&iport->lock, NULL, MUTEX_DRIVER,
297 	    DDI_INTR_PRI(pwp->intr_pri));
298 	cv_init(&iport->refcnt_cv, NULL, CV_DEFAULT, NULL);
299 	mutex_init(&iport->refcnt_lock, NULL, MUTEX_DRIVER,
300 	    DDI_INTR_PRI(pwp->intr_pri));
301 
302 	/* Set some data on the iport handle */
303 	iport->dip = dip;
304 	iport->pwp = pwp;
305 
306 	/* Dup the UA into the iport handle */
307 	iport->ua = strdup(iport_ua);
308 
309 	tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
310 	tran->tran_hba_private = iport;
311 
312 	list_create(&iport->phys, sizeof (pmcs_phy_t),
313 	    offsetof(pmcs_phy_t, list_node));
314 
315 	/*
316 	 * If our unit address is active in the phymap, configure our
317 	 * iport's phylist.
318 	 */
319 	mutex_enter(&iport->lock);
320 	ua_priv = sas_phymap_lookup_uapriv(pwp->hss_phymap, iport->ua);
321 	if (ua_priv) {
322 		/* Non-NULL private data indicates the unit address is active */
323 		iport->ua_state = UA_ACTIVE;
324 		if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) {
325 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: failed to "
326 			    "configure phys on iport handle (0x%p), "
327 			    " unit address [%s]", __func__,
328 			    (void *)iport, iport_ua);
329 			mutex_exit(&iport->lock);
330 			goto iport_attach_fail2;
331 		}
332 	} else {
333 		iport->ua_state = UA_INACTIVE;
334 	}
335 	mutex_exit(&iport->lock);
336 
337 	/* Allocate string-based soft state pool for targets */
338 	iport->tgt_sstate = NULL;
339 	if (ddi_soft_state_bystr_init(&iport->tgt_sstate,
340 	    sizeof (pmcs_xscsi_t), PMCS_TGT_SSTATE_SZ) != 0) {
341 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
342 		    "cannot get iport tgt soft state");
343 		goto iport_attach_fail2;
344 	}
345 
346 	/* Create this iport's target map */
347 	if (pmcs_iport_tgtmap_create(iport) == B_FALSE) {
348 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
349 		    "Failed to create tgtmap on iport %d", inst);
350 		goto iport_attach_fail3;
351 	}
352 
353 	/* Set up the 'initiator-port' DDI property on this iport */
354 	init_port = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP);
355 	if (pwp->separate_ports) {
356 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: separate ports not "
357 		    "supported", __func__);
358 	} else {
359 		/* Set initiator-port value to the HBA's base WWN */
360 		(void) scsi_wwn_to_wwnstr(pwp->sas_wwns[0], 1,
361 		    init_port);
362 	}
363 	pmcs_smhba_add_iport_prop(iport, DATA_TYPE_STRING,
364 	    SCSI_ADDR_PROP_INITIATOR_PORT, init_port);
365 	kmem_free(init_port, PMCS_MAX_UA_SIZE);
366 
367 	/* Set up a 'num-phys' DDI property for the iport node */
368 	pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
369 	    &iport->nphy);
370 
371 	/* Create kstats for each of the phys in this port */
372 	pmcs_create_phy_stats(iport);
373 
374 	/*
375 	 * Insert this iport handle into our list and set
376 	 * iports_attached on the HBA node.
377 	 */
378 	rw_enter(&pwp->iports_lock, RW_WRITER);
379 	ASSERT(!list_link_active(&iport->list_node));
380 	list_insert_tail(&pwp->iports, iport);
381 	pwp->iports_attached = 1;
382 	pwp->num_iports++;
383 	rw_exit(&pwp->iports_lock);
384 
385 	pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, "iport%d attached", inst);
386 	ddi_report_dev(dip);
387 	return (DDI_SUCCESS);
388 
389 	/* teardown and fail */
390 iport_attach_fail3:
391 	ddi_soft_state_bystr_fini(&iport->tgt_sstate);
392 iport_attach_fail2:
393 	list_destroy(&iport->phys);
394 	strfree(iport->ua);
395 	mutex_destroy(&iport->refcnt_lock);
396 	cv_destroy(&iport->refcnt_cv);
397 	mutex_destroy(&iport->lock);
398 iport_attach_fail1:
399 	ddi_soft_state_free(pmcs_iport_softstate, inst);
400 	return (DDI_FAILURE);
401 }
402 
403 static int
404 pmcs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
405 {
406 	scsi_hba_tran_t *tran;
407 	char chiprev, *fwsupport, hw_rev[24], fw_rev[24];
408 	off_t set3size;
409 	int inst, i;
410 	int sm_hba = 1;
411 	int protocol = 0;
412 	int num_phys = 0;
413 	pmcs_hw_t *pwp;
414 	pmcs_phy_t *phyp;
415 	uint32_t num_threads;
416 	char buf[64];
417 
418 	switch (cmd) {
419 	case DDI_ATTACH:
420 		break;
421 
422 	case DDI_PM_RESUME:
423 	case DDI_RESUME:
424 		tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
425 		if (!tran) {
426 			return (DDI_FAILURE);
427 		}
428 		/* No DDI_?_RESUME on iport nodes */
429 		if (scsi_hba_iport_unit_address(dip) != NULL) {
430 			return (DDI_SUCCESS);
431 		}
432 		pwp = TRAN2PMC(tran);
433 		if (pwp == NULL) {
434 			return (DDI_FAILURE);
435 		}
436 
437 		mutex_enter(&pwp->lock);
438 		pwp->suspended = 0;
439 		if (pwp->tq) {
440 			ddi_taskq_resume(pwp->tq);
441 		}
442 		mutex_exit(&pwp->lock);
443 		return (DDI_SUCCESS);
444 
445 	default:
446 		return (DDI_FAILURE);
447 	}
448 
449 	/*
450 	 * If this is an iport node, invoke iport attach.
451 	 */
452 	if (scsi_hba_iport_unit_address(dip) != NULL) {
453 		return (pmcs_iport_attach(dip));
454 	}
455 
456 	/*
457 	 * From here on is attach for the HBA node
458 	 */
459 
460 #ifdef	DEBUG
461 	/*
462 	 * Check to see if this unit is to be disabled.  We can't disable
463 	 * on a per-iport node.  It's either the entire HBA or nothing.
464 	 */
465 	(void) snprintf(buf, sizeof (buf),
466 	    "disable-instance-%d", ddi_get_instance(dip));
467 	if (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
468 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, buf, 0)) {
469 		cmn_err(CE_NOTE, "pmcs%d: disabled by configuration",
470 		    ddi_get_instance(dip));
471 		return (DDI_FAILURE);
472 	}
473 #endif
474 
475 	/*
476 	 * Allocate softstate
477 	 */
478 	inst = ddi_get_instance(dip);
479 	if (ddi_soft_state_zalloc(pmcs_softc_state, inst) != DDI_SUCCESS) {
480 		cmn_err(CE_WARN, "pmcs%d: Failed to alloc soft state", inst);
481 		return (DDI_FAILURE);
482 	}
483 
484 	pwp = ddi_get_soft_state(pmcs_softc_state, inst);
485 	if (pwp == NULL) {
486 		cmn_err(CE_WARN, "pmcs%d: cannot get soft state", inst);
487 		ddi_soft_state_free(pmcs_softc_state, inst);
488 		return (DDI_FAILURE);
489 	}
490 	pwp->dip = dip;
491 	STAILQ_INIT(&pwp->dq);
492 	STAILQ_INIT(&pwp->cq);
493 	STAILQ_INIT(&pwp->wf);
494 	STAILQ_INIT(&pwp->pf);
495 	/*
496 	 * Create the list for iports
497 	 */
498 	list_create(&pwp->iports, sizeof (pmcs_iport_t),
499 	    offsetof(pmcs_iport_t, list_node));
500 
501 	pwp->state = STATE_PROBING;
502 
503 	/*
504 	 * Get driver.conf properties
505 	 */
506 	pwp->debug_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
507 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-debug-mask",
508 	    debug_mask);
509 	pwp->phyid_block_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
510 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phyid-block-mask",
511 	    block_mask);
512 	pwp->physpeed = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
513 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-physpeed", physpeed);
514 	pwp->phymode = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
515 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phymode", phymode);
516 	pwp->fwlog = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
517 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fwlog", fwlog_level);
518 	if (pwp->fwlog > PMCS_FWLOG_MAX) {
519 		pwp->fwlog = PMCS_FWLOG_MAX;
520 	}
521 
522 	mutex_enter(&pmcs_trace_lock);
523 	if (pmcs_tbuf == NULL) {
524 		/* Allocate trace buffer */
525 		pmcs_tbuf_num_elems = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
526 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-tbuf-num-elems",
527 		    PMCS_TBUF_NUM_ELEMS_DEF);
528 		if ((pmcs_tbuf_num_elems == DDI_PROP_NOT_FOUND) ||
529 		    (pmcs_tbuf_num_elems == 0)) {
530 			pmcs_tbuf_num_elems = PMCS_TBUF_NUM_ELEMS_DEF;
531 		}
532 
533 		pmcs_tbuf = kmem_zalloc(pmcs_tbuf_num_elems *
534 		    sizeof (pmcs_tbuf_t), KM_SLEEP);
535 		pmcs_tbuf_ptr = pmcs_tbuf;
536 		pmcs_tbuf_idx = 0;
537 	}
538 	mutex_exit(&pmcs_trace_lock);
539 
540 	disable_msix = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
541 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msix",
542 	    disable_msix);
543 	disable_msi = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
544 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msi",
545 	    disable_msi);
546 	maxqdepth = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
547 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-maxqdepth", maxqdepth);
548 	pwp->fw_force_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
549 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fw-force-update", 0);
550 	if (pwp->fw_force_update == 0) {
551 		pwp->fw_disable_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
552 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
553 		    "pmcs-fw-disable-update", 0);
554 	}
555 	pwp->ioq_depth = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
556 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-num-io-qentries",
557 	    PMCS_NQENTRY);
558 
559 	/*
560 	 * Initialize FMA
561 	 */
562 	pwp->dev_acc_attr = pwp->reg_acc_attr = rattr;
563 	pwp->iqp_dma_attr = pwp->oqp_dma_attr =
564 	    pwp->regdump_dma_attr = pwp->cip_dma_attr =
565 	    pwp->fwlog_dma_attr = pmcs_dattr;
566 	pwp->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, pwp->dip,
567 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "fm-capable",
568 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
569 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
570 	pmcs_fm_init(pwp);
571 
572 	/*
573 	 * Map registers
574 	 */
575 	if (pci_config_setup(dip, &pwp->pci_acc_handle)) {
576 		pmcs_prt(pwp, PMCS_PRT_WARN, "pci config setup failed");
577 		ddi_soft_state_free(pmcs_softc_state, inst);
578 		return (DDI_FAILURE);
579 	}
580 
581 	/*
582 	 * Get the size of register set 3.
583 	 */
584 	if (ddi_dev_regsize(dip, PMCS_REGSET_3, &set3size) != DDI_SUCCESS) {
585 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
586 		    "unable to get size of register set %d", PMCS_REGSET_3);
587 		pci_config_teardown(&pwp->pci_acc_handle);
588 		ddi_soft_state_free(pmcs_softc_state, inst);
589 		return (DDI_FAILURE);
590 	}
591 
592 	/*
593 	 * Map registers
594 	 */
595 	pwp->reg_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
596 
597 	if (ddi_regs_map_setup(dip, PMCS_REGSET_0, (caddr_t *)&pwp->msg_regs,
598 	    0, 0, &pwp->reg_acc_attr, &pwp->msg_acc_handle)) {
599 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
600 		    "failed to map Message Unit registers");
601 		pci_config_teardown(&pwp->pci_acc_handle);
602 		ddi_soft_state_free(pmcs_softc_state, inst);
603 		return (DDI_FAILURE);
604 	}
605 
606 	if (ddi_regs_map_setup(dip, PMCS_REGSET_1, (caddr_t *)&pwp->top_regs,
607 	    0, 0, &pwp->reg_acc_attr, &pwp->top_acc_handle)) {
608 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "failed to map TOP registers");
609 		ddi_regs_map_free(&pwp->msg_acc_handle);
610 		pci_config_teardown(&pwp->pci_acc_handle);
611 		ddi_soft_state_free(pmcs_softc_state, inst);
612 		return (DDI_FAILURE);
613 	}
614 
615 	if (ddi_regs_map_setup(dip, PMCS_REGSET_2, (caddr_t *)&pwp->gsm_regs,
616 	    0, 0, &pwp->reg_acc_attr, &pwp->gsm_acc_handle)) {
617 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "failed to map GSM registers");
618 		ddi_regs_map_free(&pwp->top_acc_handle);
619 		ddi_regs_map_free(&pwp->msg_acc_handle);
620 		pci_config_teardown(&pwp->pci_acc_handle);
621 		ddi_soft_state_free(pmcs_softc_state, inst);
622 		return (DDI_FAILURE);
623 	}
624 
625 	if (ddi_regs_map_setup(dip, PMCS_REGSET_3, (caddr_t *)&pwp->mpi_regs,
626 	    0, 0, &pwp->reg_acc_attr, &pwp->mpi_acc_handle)) {
627 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "failed to map MPI registers");
628 		ddi_regs_map_free(&pwp->top_acc_handle);
629 		ddi_regs_map_free(&pwp->gsm_acc_handle);
630 		ddi_regs_map_free(&pwp->msg_acc_handle);
631 		pci_config_teardown(&pwp->pci_acc_handle);
632 		ddi_soft_state_free(pmcs_softc_state, inst);
633 		return (DDI_FAILURE);
634 	}
635 	pwp->mpibar =
636 	    (((5U << 2) + 0x10) << PMCS_MSGU_MPI_BAR_SHIFT) | set3size;
637 
638 	/*
639 	 * Make sure we can support this card.
640 	 */
641 	pwp->chiprev = pmcs_rd_topunit(pwp, PMCS_DEVICE_REVISION);
642 
643 	switch (pwp->chiprev) {
644 	case PMCS_PM8001_REV_A:
645 	case PMCS_PM8001_REV_B:
646 		pmcs_prt(pwp, PMCS_PRT_ERR,
647 		    "Rev A/B Card no longer supported");
648 		goto failure;
649 	case PMCS_PM8001_REV_C:
650 		break;
651 	default:
652 		pmcs_prt(pwp, PMCS_PRT_ERR,
653 		    "Unknown chip revision (%d)", pwp->chiprev);
654 		goto failure;
655 	}
656 
657 	/*
658 	 * Allocate DMA addressable area for Inbound and Outbound Queue indices
659 	 * that the chip needs to access plus a space for scratch usage
660 	 */
661 	pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t);
662 	if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pwp->cip_acchdls,
663 	    &pwp->cip_handles, ptob(1), (caddr_t *)&pwp->cip,
664 	    &pwp->ciaddr) == B_FALSE) {
665 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
666 		    "Failed to setup DMA for index/scratch");
667 		goto failure;
668 	}
669 
670 	bzero(pwp->cip, ptob(1));
671 	pwp->scratch = &pwp->cip[PMCS_INDICES_SIZE];
672 	pwp->scratch_dma = pwp->ciaddr + PMCS_INDICES_SIZE;
673 
674 	/*
675 	 * Allocate DMA S/G list chunks
676 	 */
677 	(void) pmcs_add_more_chunks(pwp, ptob(1) * PMCS_MIN_CHUNK_PAGES);
678 
679 	/*
680 	 * Allocate a DMA addressable area for the firmware log (if needed)
681 	 */
682 	if (pwp->fwlog) {
683 		/*
684 		 * Align to event log header and entry size
685 		 */
686 		pwp->fwlog_dma_attr.dma_attr_align = 32;
687 		if (pmcs_dma_setup(pwp, &pwp->fwlog_dma_attr,
688 		    &pwp->fwlog_acchdl,
689 		    &pwp->fwlog_hndl, PMCS_FWLOG_SIZE,
690 		    (caddr_t *)&pwp->fwlogp,
691 		    &pwp->fwaddr) == B_FALSE) {
692 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
693 			    "Failed to setup DMA for fwlog area");
694 			pwp->fwlog = 0;
695 		} else {
696 			bzero(pwp->fwlogp, PMCS_FWLOG_SIZE);
697 		}
698 	}
699 
700 	if (pwp->flash_chunk_addr == NULL) {
701 		pwp->regdump_dma_attr.dma_attr_align = PMCS_FLASH_CHUNK_SIZE;
702 		if (pmcs_dma_setup(pwp, &pwp->regdump_dma_attr,
703 		    &pwp->regdump_acchdl,
704 		    &pwp->regdump_hndl, PMCS_FLASH_CHUNK_SIZE,
705 		    (caddr_t *)&pwp->flash_chunkp, &pwp->flash_chunk_addr) ==
706 		    B_FALSE) {
707 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
708 			    "Failed to setup DMA for register dump area");
709 			goto failure;
710 		}
711 		bzero(pwp->flash_chunkp, PMCS_FLASH_CHUNK_SIZE);
712 	}
713 
714 	/*
715 	 * More bits of local initialization...
716 	 */
717 	pwp->tq = ddi_taskq_create(dip, "_tq", 4, TASKQ_DEFAULTPRI, 0);
718 	if (pwp->tq == NULL) {
719 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "unable to create worker taskq");
720 		goto failure;
721 	}
722 
723 	/*
724 	 * Cache of structures for dealing with I/O completion callbacks.
725 	 */
726 	(void) snprintf(buf, sizeof (buf), "pmcs_iocomp_cb_cache%d", inst);
727 	pwp->iocomp_cb_cache = kmem_cache_create(buf,
728 	    sizeof (pmcs_iocomp_cb_t), 16, NULL, NULL, NULL, NULL, NULL, 0);
729 
730 	/*
731 	 * Cache of PHY structures
732 	 */
733 	(void) snprintf(buf, sizeof (buf), "pmcs_phy_cache%d", inst);
734 	pwp->phy_cache = kmem_cache_create(buf, sizeof (pmcs_phy_t), 8,
735 	    pmcs_phy_constructor, pmcs_phy_destructor, NULL, (void *)pwp,
736 	    NULL, 0);
737 
738 	/*
739 	 * Allocate space for the I/O completion threads
740 	 */
741 	num_threads = ncpus_online;
742 	if (num_threads > PMCS_MAX_CQ_THREADS) {
743 		num_threads = PMCS_MAX_CQ_THREADS;
744 	}
745 
746 	pwp->cq_info.cq_thr_info = kmem_zalloc(sizeof (pmcs_cq_thr_info_t) *
747 	    num_threads, KM_SLEEP);
748 	pwp->cq_info.cq_threads = num_threads;
749 	pwp->cq_info.cq_next_disp_thr = 0;
750 	pwp->cq_info.cq_stop = B_FALSE;
751 
752 	/*
753 	 * Set the quantum value in clock ticks for the I/O interrupt
754 	 * coalescing timer.
755 	 */
756 	pwp->io_intr_coal.quantum = drv_usectohz(PMCS_QUANTUM_TIME_USECS);
757 
758 	/*
759 	 * We have a delicate dance here. We need to set up
760 	 * interrupts so we know how to set up some OQC
761 	 * tables. However, while we're setting up table
762 	 * access, we may need to flash new firmware and
763 	 * reset the card, which will take some finessing.
764 	 */
765 
766 	/*
767 	 * Set up interrupts here.
768 	 */
769 	switch (pmcs_setup_intr(pwp)) {
770 	case 0:
771 		break;
772 	case EIO:
773 		pwp->stuck = 1;
774 		/* FALLTHROUGH */
775 	default:
776 		goto failure;
777 	}
778 
779 	/*
780 	 * Set these up now becuase they are used to initialize the OQC tables.
781 	 *
782 	 * If we have MSI or MSI-X interrupts set up and we have enough
783 	 * vectors for each OQ, the Outbound Queue vectors can all be the
784 	 * same as the appropriate interrupt routine will have been called
785 	 * and the doorbell register automatically cleared.
786 	 * This keeps us from having to check the Outbound Doorbell register
787 	 * when the routines for these interrupts are called.
788 	 *
789 	 * If we have Legacy INT-X interrupts set up or we didn't have enough
790 	 * MSI/MSI-X vectors to uniquely identify each OQ, we point these
791 	 * vectors to the bits we would like to have set in the Outbound
792 	 * Doorbell register because pmcs_all_intr will read the doorbell
793 	 * register to find out why we have an interrupt and write the
794 	 * corresponding 'clear' bit for that interrupt.
795 	 */
796 
797 	switch (pwp->intr_cnt) {
798 	case 1:
799 		/*
800 		 * Only one vector, so we must check all OQs for MSI.  For
801 		 * INT-X, there's only one vector anyway, so we can just
802 		 * use the outbound queue bits to keep from having to
803 		 * check each queue for each interrupt.
804 		 */
805 		if (pwp->int_type == PMCS_INT_FIXED) {
806 			pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
807 			pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL;
808 			pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS;
809 		} else {
810 			pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
811 			pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_IODONE;
812 			pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_IODONE;
813 		}
814 		break;
815 	case 2:
816 		/* With 2, we can at least isolate IODONE */
817 		pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
818 		pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL;
819 		pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_GENERAL;
820 		break;
821 	case 4:
822 		/* With 4 vectors, everybody gets one */
823 		pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
824 		pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL;
825 		pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS;
826 		break;
827 	}
828 
829 	/*
830 	 * Do the first part of setup
831 	 */
832 	if (pmcs_setup(pwp)) {
833 		goto failure;
834 	}
835 	pmcs_report_fwversion(pwp);
836 
837 	/*
838 	 * Now do some additonal allocations based upon information
839 	 * gathered during MPI setup.
840 	 */
841 	pwp->root_phys = kmem_zalloc(pwp->nphy * sizeof (pmcs_phy_t), KM_SLEEP);
842 	ASSERT(pwp->nphy < SAS2_PHYNUM_MAX);
843 	phyp = pwp->root_phys;
844 	for (i = 0; i < pwp->nphy; i++) {
845 		if (i < pwp->nphy-1) {
846 			phyp->sibling = (phyp + 1);
847 		}
848 		mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER,
849 		    DDI_INTR_PRI(pwp->intr_pri));
850 		phyp->phynum = i & SAS2_PHYNUM_MASK;
851 		pmcs_phy_name(pwp, phyp, phyp->path, sizeof (phyp->path));
852 		phyp->pwp = pwp;
853 		phyp->device_id = PMCS_INVALID_DEVICE_ID;
854 		phyp++;
855 	}
856 
857 	pwp->work = kmem_zalloc(pwp->max_cmd * sizeof (pmcwork_t), KM_SLEEP);
858 	for (i = 0; i < pwp->max_cmd - 1; i++) {
859 		pmcwork_t *pwrk = &pwp->work[i];
860 		mutex_init(&pwrk->lock, NULL, MUTEX_DRIVER,
861 		    DDI_INTR_PRI(pwp->intr_pri));
862 		cv_init(&pwrk->sleep_cv, NULL, CV_DRIVER, NULL);
863 		STAILQ_INSERT_TAIL(&pwp->wf, pwrk, next);
864 
865 	}
866 	pwp->targets = (pmcs_xscsi_t **)
867 	    kmem_zalloc(pwp->max_dev * sizeof (pmcs_xscsi_t *), KM_SLEEP);
868 
869 	pwp->iqpt = (pmcs_iqp_trace_t *)
870 	    kmem_zalloc(sizeof (pmcs_iqp_trace_t), KM_SLEEP);
871 	pwp->iqpt->head = kmem_zalloc(PMCS_IQP_TRACE_BUFFER_SIZE, KM_SLEEP);
872 	pwp->iqpt->curpos = pwp->iqpt->head;
873 	pwp->iqpt->size_left = PMCS_IQP_TRACE_BUFFER_SIZE;
874 
875 	/*
876 	 * Start MPI communication.
877 	 */
878 	if (pmcs_start_mpi(pwp)) {
879 		if (pmcs_soft_reset(pwp, B_FALSE)) {
880 			goto failure;
881 		}
882 	}
883 
884 	/*
885 	 * Do some initial acceptance tests.
886 	 * This tests interrupts and queues.
887 	 */
888 	if (pmcs_echo_test(pwp)) {
889 		goto failure;
890 	}
891 
892 	/* Read VPD - if it exists */
893 	if (pmcs_get_nvmd(pwp, PMCS_NVMD_VPD, PMCIN_NVMD_VPD, 0, NULL, 0)) {
894 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Unable to read VPD: "
895 		    "attempting to fabricate", __func__);
896 		/*
897 		 * When we release, this must goto failure and the call
898 		 * to pmcs_fabricate_wwid is removed.
899 		 */
900 		/* goto failure; */
901 		if (!pmcs_fabricate_wwid(pwp)) {
902 			goto failure;
903 		}
904 	}
905 
906 	/*
907 	 * We're now officially running
908 	 */
909 	pwp->state = STATE_RUNNING;
910 
911 	/*
912 	 * Check firmware versions and load new firmware
913 	 * if needed and reset.
914 	 */
915 	if (pmcs_firmware_update(pwp)) {
916 		pmcs_prt(pwp, PMCS_PRT_WARN, "%s: Firmware update failed",
917 		    __func__);
918 		goto failure;
919 	}
920 
921 	/*
922 	 * Create completion threads.
923 	 */
924 	for (i = 0; i < pwp->cq_info.cq_threads; i++) {
925 		pwp->cq_info.cq_thr_info[i].cq_pwp = pwp;
926 		pwp->cq_info.cq_thr_info[i].cq_thread =
927 		    thread_create(NULL, 0, pmcs_scsa_cq_run,
928 		    &pwp->cq_info.cq_thr_info[i], 0, &p0, TS_RUN, minclsyspri);
929 	}
930 
931 	/*
932 	 * Create one thread to deal with the updating of the interrupt
933 	 * coalescing timer.
934 	 */
935 	pwp->ict_thread = thread_create(NULL, 0, pmcs_check_intr_coal,
936 	    pwp, 0, &p0, TS_RUN, minclsyspri);
937 
938 	/*
939 	 * Kick off the watchdog
940 	 */
941 	pwp->wdhandle = timeout(pmcs_watchdog, pwp,
942 	    drv_usectohz(PMCS_WATCH_INTERVAL));
943 	/*
944 	 * Do the SCSI attachment code (before starting phys)
945 	 */
946 	if (pmcs_scsa_init(pwp, &pmcs_dattr)) {
947 		goto failure;
948 	}
949 	pwp->hba_attached = 1;
950 
951 	/*
952 	 * Initialize the rwlock for the iport elements.
953 	 */
954 	rw_init(&pwp->iports_lock, NULL, RW_DRIVER, NULL);
955 
956 	/* Check all acc & dma handles allocated in attach */
957 	if (pmcs_check_acc_dma_handle(pwp)) {
958 		ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
959 		goto failure;
960 	}
961 
962 	/*
963 	 * Create the phymap for this HBA instance
964 	 */
965 	if (sas_phymap_create(dip, phymap_usec, PHYMAP_MODE_SIMPLE, NULL,
966 	    pwp, pmcs_phymap_activate, pmcs_phymap_deactivate,
967 	    &pwp->hss_phymap) != DDI_SUCCESS) {
968 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: pmcs%d phymap_create failed",
969 		    __func__, inst);
970 		goto failure;
971 	}
972 	ASSERT(pwp->hss_phymap);
973 
974 	/*
975 	 * Create the iportmap for this HBA instance
976 	 */
977 	if (scsi_hba_iportmap_create(dip, iportmap_usec, pwp->nphy,
978 	    &pwp->hss_iportmap) != DDI_SUCCESS) {
979 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: pmcs%d iportmap_create "
980 		    "failed", __func__, inst);
981 		goto failure;
982 	}
983 	ASSERT(pwp->hss_iportmap);
984 
985 	/*
986 	 * Start the PHYs.
987 	 */
988 	if (pmcs_start_phys(pwp)) {
989 		goto failure;
990 	}
991 
992 	/*
993 	 * From this point on, we can't fail.
994 	 */
995 	ddi_report_dev(dip);
996 
997 	/* SM-HBA */
998 	pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SMHBA_SUPPORTED,
999 	    &sm_hba);
1000 
1001 	/* SM-HBA */
1002 	pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_DRV_VERSION,
1003 	    pmcs_driver_rev);
1004 
1005 	/* SM-HBA */
1006 	chiprev = 'A' + pwp->chiprev;
1007 	(void) snprintf(hw_rev, 2, "%s", &chiprev);
1008 	pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_HWARE_VERSION,
1009 	    hw_rev);
1010 
1011 	/* SM-HBA */
1012 	switch (PMCS_FW_TYPE(pwp)) {
1013 	case PMCS_FW_TYPE_RELEASED:
1014 		fwsupport = "Released";
1015 		break;
1016 	case PMCS_FW_TYPE_DEVELOPMENT:
1017 		fwsupport = "Development";
1018 		break;
1019 	case PMCS_FW_TYPE_ALPHA:
1020 		fwsupport = "Alpha";
1021 		break;
1022 	case PMCS_FW_TYPE_BETA:
1023 		fwsupport = "Beta";
1024 		break;
1025 	default:
1026 		fwsupport = "Special";
1027 		break;
1028 	}
1029 	(void) snprintf(fw_rev, sizeof (fw_rev), "%x.%x.%x %s",
1030 	    PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp),
1031 	    fwsupport);
1032 	pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_FWARE_VERSION,
1033 	    fw_rev);
1034 
1035 	/* SM-HBA */
1036 	num_phys = pwp->nphy;
1037 	pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_NUM_PHYS_HBA,
1038 	    &num_phys);
1039 
1040 	/* SM-HBA */
1041 	protocol = SAS_SSP_SUPPORT | SAS_SATA_SUPPORT | SAS_SMP_SUPPORT;
1042 	pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SUPPORTED_PROTOCOL,
1043 	    &protocol);
1044 
1045 	return (DDI_SUCCESS);
1046 
1047 failure:
1048 	if (pmcs_unattach(pwp)) {
1049 		pwp->stuck = 1;
1050 	}
1051 	return (DDI_FAILURE);
1052 }
1053 
1054 int
1055 pmcs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1056 {
1057 	int inst = ddi_get_instance(dip);
1058 	pmcs_iport_t	*iport = NULL;
1059 	pmcs_hw_t	*pwp = NULL;
1060 	scsi_hba_tran_t	*tran;
1061 
1062 	if (scsi_hba_iport_unit_address(dip) != NULL) {
1063 		/* iport node */
1064 		iport = ddi_get_soft_state(pmcs_iport_softstate, inst);
1065 		ASSERT(iport);
1066 		if (iport == NULL) {
1067 			return (DDI_FAILURE);
1068 		}
1069 		pwp = iport->pwp;
1070 	} else {
1071 		/* hba node */
1072 		pwp = (pmcs_hw_t *)ddi_get_soft_state(pmcs_softc_state, inst);
1073 		ASSERT(pwp);
1074 		if (pwp == NULL) {
1075 			return (DDI_FAILURE);
1076 		}
1077 	}
1078 
1079 	switch (cmd) {
1080 	case DDI_DETACH:
1081 		if (iport) {
1082 			/* iport detach */
1083 			if (pmcs_iport_unattach(iport)) {
1084 				return (DDI_FAILURE);
1085 			}
1086 			pmcs_prt(pwp, PMCS_PRT_DEBUG, "iport%d detached", inst);
1087 			return (DDI_SUCCESS);
1088 		} else {
1089 			/* HBA detach */
1090 			if (pmcs_unattach(pwp)) {
1091 				return (DDI_FAILURE);
1092 			}
1093 			return (DDI_SUCCESS);
1094 		}
1095 
1096 	case DDI_SUSPEND:
1097 	case DDI_PM_SUSPEND:
1098 		/* No DDI_SUSPEND on iport nodes */
1099 		if (iport) {
1100 			return (DDI_SUCCESS);
1101 		}
1102 
1103 		if (pwp->stuck) {
1104 			return (DDI_FAILURE);
1105 		}
1106 		tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
1107 		if (!tran) {
1108 			return (DDI_FAILURE);
1109 		}
1110 
1111 		pwp = TRAN2PMC(tran);
1112 		if (pwp == NULL) {
1113 			return (DDI_FAILURE);
1114 		}
1115 		mutex_enter(&pwp->lock);
1116 		if (pwp->tq) {
1117 			ddi_taskq_suspend(pwp->tq);
1118 		}
1119 		pwp->suspended = 1;
1120 		mutex_exit(&pwp->lock);
1121 		pmcs_prt(pwp, PMCS_PRT_INFO, "PMC8X6G suspending");
1122 		return (DDI_SUCCESS);
1123 
1124 	default:
1125 		return (DDI_FAILURE);
1126 	}
1127 }
1128 
1129 static int
1130 pmcs_iport_unattach(pmcs_iport_t *iport)
1131 {
1132 	pmcs_hw_t	*pwp = iport->pwp;
1133 
1134 	/*
1135 	 * First, check if there are still any configured targets on this
1136 	 * iport.  If so, we fail detach.
1137 	 */
1138 	if (pmcs_iport_has_targets(pwp, iport)) {
1139 		pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, "iport%d detach failure: "
1140 		    "iport has targets (luns)", ddi_get_instance(iport->dip));
1141 		return (DDI_FAILURE);
1142 	}
1143 
1144 	/*
1145 	 * Remove this iport from our list if it is inactive in the phymap.
1146 	 */
1147 	rw_enter(&pwp->iports_lock, RW_WRITER);
1148 	mutex_enter(&iport->lock);
1149 
1150 	if (iport->ua_state == UA_ACTIVE) {
1151 		mutex_exit(&iport->lock);
1152 		rw_exit(&pwp->iports_lock);
1153 		pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, "iport%d detach failure: "
1154 		    "iport unit address active in phymap",
1155 		    ddi_get_instance(iport->dip));
1156 		return (DDI_FAILURE);
1157 	}
1158 
1159 	/* If it's our only iport, clear iports_attached */
1160 	ASSERT(pwp->num_iports >= 1);
1161 	if (--pwp->num_iports == 0) {
1162 		pwp->iports_attached = 0;
1163 	}
1164 
1165 	ASSERT(list_link_active(&iport->list_node));
1166 	list_remove(&pwp->iports, iport);
1167 	rw_exit(&pwp->iports_lock);
1168 
1169 	/*
1170 	 * We have removed the iport handle from the HBA's iports list,
1171 	 * there will be no new references to it. Two things must be
1172 	 * guarded against here.  First, we could have PHY up events,
1173 	 * adding themselves to the iport->phys list and grabbing ref's
1174 	 * on our iport handle.  Second, we could have existing references
1175 	 * to this iport handle from a point in time prior to the list
1176 	 * removal above.
1177 	 *
1178 	 * So first, destroy the phys list. Remove any phys that have snuck
1179 	 * in after the phymap deactivate, dropping the refcnt accordingly.
1180 	 * If these PHYs are still up if and when the phymap reactivates
1181 	 * (i.e. when this iport reattaches), we'll populate the list with
1182 	 * them and bump the refcnt back up.
1183 	 */
1184 	pmcs_remove_phy_from_iport(iport, NULL);
1185 	ASSERT(list_is_empty(&iport->phys));
1186 	list_destroy(&iport->phys);
1187 	mutex_exit(&iport->lock);
1188 
1189 	/*
1190 	 * Second, wait for any other references to this iport to be
1191 	 * dropped, then continue teardown.
1192 	 */
1193 	mutex_enter(&iport->refcnt_lock);
1194 	while (iport->refcnt != 0) {
1195 		cv_wait(&iport->refcnt_cv, &iport->refcnt_lock);
1196 	}
1197 	mutex_exit(&iport->refcnt_lock);
1198 
1199 	/* Delete kstats */
1200 	pmcs_destroy_phy_stats(iport);
1201 
1202 	/* Destroy the iport target map */
1203 	if (pmcs_iport_tgtmap_destroy(iport) == B_FALSE) {
1204 		return (DDI_FAILURE);
1205 	}
1206 
1207 	/* Free the tgt soft state */
1208 	if (iport->tgt_sstate != NULL) {
1209 		ddi_soft_state_bystr_fini(&iport->tgt_sstate);
1210 	}
1211 
1212 	/* Free our unit address string */
1213 	strfree(iport->ua);
1214 
1215 	/* Finish teardown and free the softstate */
1216 	mutex_destroy(&iport->refcnt_lock);
1217 	ASSERT(iport->refcnt == 0);
1218 	cv_destroy(&iport->refcnt_cv);
1219 	mutex_destroy(&iport->lock);
1220 	ddi_soft_state_free(pmcs_iport_softstate, ddi_get_instance(iport->dip));
1221 
1222 	return (DDI_SUCCESS);
1223 }
1224 
1225 static int
1226 pmcs_unattach(pmcs_hw_t *pwp)
1227 {
1228 	int i;
1229 	enum pwpstate curstate;
1230 	pmcs_cq_thr_info_t *cqti;
1231 
1232 	/*
1233 	 * Tear down the interrupt infrastructure.
1234 	 */
1235 	if (pmcs_teardown_intr(pwp)) {
1236 		pwp->stuck = 1;
1237 	}
1238 	pwp->intr_cnt = 0;
1239 
1240 	/*
1241 	 * Grab a lock, if initted, to set state.
1242 	 */
1243 	if (pwp->locks_initted) {
1244 		mutex_enter(&pwp->lock);
1245 		if (pwp->state != STATE_DEAD) {
1246 			pwp->state = STATE_UNPROBING;
1247 		}
1248 		curstate = pwp->state;
1249 		mutex_exit(&pwp->lock);
1250 
1251 		/*
1252 		 * Stop the I/O completion threads.
1253 		 */
1254 		mutex_enter(&pwp->cq_lock);
1255 		pwp->cq_info.cq_stop = B_TRUE;
1256 		for (i = 0; i < pwp->cq_info.cq_threads; i++) {
1257 			if (pwp->cq_info.cq_thr_info[i].cq_thread) {
1258 				cqti = &pwp->cq_info.cq_thr_info[i];
1259 				mutex_enter(&cqti->cq_thr_lock);
1260 				cv_signal(&cqti->cq_cv);
1261 				mutex_exit(&cqti->cq_thr_lock);
1262 				mutex_exit(&pwp->cq_lock);
1263 				thread_join(cqti->cq_thread->t_did);
1264 				mutex_enter(&pwp->cq_lock);
1265 			}
1266 		}
1267 		mutex_exit(&pwp->cq_lock);
1268 
1269 		/*
1270 		 * Stop the interrupt coalescing timer thread
1271 		 */
1272 		if (pwp->ict_thread) {
1273 			mutex_enter(&pwp->ict_lock);
1274 			pwp->io_intr_coal.stop_thread = B_TRUE;
1275 			cv_signal(&pwp->ict_cv);
1276 			mutex_exit(&pwp->ict_lock);
1277 			thread_join(pwp->ict_thread->t_did);
1278 		}
1279 	} else {
1280 		if (pwp->state != STATE_DEAD) {
1281 			pwp->state = STATE_UNPROBING;
1282 		}
1283 		curstate = pwp->state;
1284 	}
1285 
1286 	if (&pwp->iports != NULL) {
1287 		/* Destroy the iports lock */
1288 		rw_destroy(&pwp->iports_lock);
1289 		/* Destroy the iports list */
1290 		ASSERT(list_is_empty(&pwp->iports));
1291 		list_destroy(&pwp->iports);
1292 	}
1293 
1294 	if (pwp->hss_iportmap != NULL) {
1295 		/* Destroy the iportmap */
1296 		scsi_hba_iportmap_destroy(pwp->hss_iportmap);
1297 	}
1298 
1299 	if (pwp->hss_phymap != NULL) {
1300 		/* Destroy the phymap */
1301 		sas_phymap_destroy(pwp->hss_phymap);
1302 	}
1303 
1304 	/*
1305 	 * Make sure that any pending watchdog won't
1306 	 * be called from this point on out.
1307 	 */
1308 	(void) untimeout(pwp->wdhandle);
1309 	/*
1310 	 * After the above action, the watchdog
1311 	 * timer that starts up the worker task
1312 	 * may trigger but will exit immediately
1313 	 * on triggering.
1314 	 *
1315 	 * Now that this is done, we can destroy
1316 	 * the task queue, which will wait if we're
1317 	 * running something on it.
1318 	 */
1319 	if (pwp->tq) {
1320 		ddi_taskq_destroy(pwp->tq);
1321 		pwp->tq = NULL;
1322 	}
1323 
1324 	pmcs_fm_fini(pwp);
1325 
1326 	if (pwp->hba_attached) {
1327 		(void) scsi_hba_detach(pwp->dip);
1328 		pwp->hba_attached = 0;
1329 	}
1330 
1331 	/*
1332 	 * If the chip hasn't been marked dead, shut it down now
1333 	 * to bring it back to a known state without attempting
1334 	 * a soft reset.
1335 	 */
1336 	if (curstate != STATE_DEAD && pwp->locks_initted) {
1337 		/*
1338 		 * De-register all registered devices
1339 		 */
1340 		pmcs_deregister_devices(pwp, pwp->root_phys);
1341 
1342 		/*
1343 		 * Stop all the phys.
1344 		 */
1345 		pmcs_stop_phys(pwp);
1346 
1347 		/*
1348 		 * Shut Down Message Passing
1349 		 */
1350 		(void) pmcs_stop_mpi(pwp);
1351 
1352 		/*
1353 		 * Reset chip
1354 		 */
1355 		(void) pmcs_soft_reset(pwp, B_FALSE);
1356 	}
1357 
1358 	/*
1359 	 * Turn off interrupts on the chip
1360 	 */
1361 	if (pwp->mpi_acc_handle) {
1362 		pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
1363 	}
1364 
1365 	/* Destroy pwp's lock */
1366 	if (pwp->locks_initted) {
1367 		mutex_destroy(&pwp->lock);
1368 		mutex_destroy(&pwp->dma_lock);
1369 		mutex_destroy(&pwp->axil_lock);
1370 		mutex_destroy(&pwp->cq_lock);
1371 		mutex_destroy(&pwp->config_lock);
1372 		mutex_destroy(&pwp->ict_lock);
1373 		mutex_destroy(&pwp->wfree_lock);
1374 		mutex_destroy(&pwp->pfree_lock);
1375 		mutex_destroy(&pwp->dead_phylist_lock);
1376 #ifdef	DEBUG
1377 		mutex_destroy(&pwp->dbglock);
1378 #endif
1379 		cv_destroy(&pwp->ict_cv);
1380 		cv_destroy(&pwp->drain_cv);
1381 		pwp->locks_initted = 0;
1382 	}
1383 
1384 	/*
1385 	 * Free DMA handles and associated consistent memory
1386 	 */
1387 	if (pwp->regdump_hndl) {
1388 		if (ddi_dma_unbind_handle(pwp->regdump_hndl) != DDI_SUCCESS) {
1389 			pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check failed "
1390 			    "at %s():%d", __func__, __LINE__);
1391 		}
1392 		ddi_dma_free_handle(&pwp->regdump_hndl);
1393 		ddi_dma_mem_free(&pwp->regdump_acchdl);
1394 		pwp->regdump_hndl = 0;
1395 	}
1396 	if (pwp->fwlog_hndl) {
1397 		if (ddi_dma_unbind_handle(pwp->fwlog_hndl) != DDI_SUCCESS) {
1398 			pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check failed "
1399 			    "at %s():%d", __func__, __LINE__);
1400 		}
1401 		ddi_dma_free_handle(&pwp->fwlog_hndl);
1402 		ddi_dma_mem_free(&pwp->fwlog_acchdl);
1403 		pwp->fwlog_hndl = 0;
1404 	}
1405 	if (pwp->cip_handles) {
1406 		if (ddi_dma_unbind_handle(pwp->cip_handles) != DDI_SUCCESS) {
1407 			pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check failed "
1408 			    "at %s():%d", __func__, __LINE__);
1409 		}
1410 		ddi_dma_free_handle(&pwp->cip_handles);
1411 		ddi_dma_mem_free(&pwp->cip_acchdls);
1412 		pwp->cip_handles = 0;
1413 	}
1414 	for (i = 0; i < PMCS_NOQ; i++) {
1415 		if (pwp->oqp_handles[i]) {
1416 			if (ddi_dma_unbind_handle(pwp->oqp_handles[i]) !=
1417 			    DDI_SUCCESS) {
1418 				pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check "
1419 				    "failed at %s():%d", __func__, __LINE__);
1420 			}
1421 			ddi_dma_free_handle(&pwp->oqp_handles[i]);
1422 			ddi_dma_mem_free(&pwp->oqp_acchdls[i]);
1423 			pwp->oqp_handles[i] = 0;
1424 		}
1425 	}
1426 	for (i = 0; i < PMCS_NIQ; i++) {
1427 		if (pwp->iqp_handles[i]) {
1428 			if (ddi_dma_unbind_handle(pwp->iqp_handles[i]) !=
1429 			    DDI_SUCCESS) {
1430 				pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check "
1431 				    "failed at %s():%d", __func__, __LINE__);
1432 			}
1433 			ddi_dma_free_handle(&pwp->iqp_handles[i]);
1434 			ddi_dma_mem_free(&pwp->iqp_acchdls[i]);
1435 			pwp->iqp_handles[i] = 0;
1436 		}
1437 	}
1438 
1439 	pmcs_free_dma_chunklist(pwp);
1440 
1441 	/*
1442 	 * Unmap registers and destroy access handles
1443 	 */
1444 	if (pwp->mpi_acc_handle) {
1445 		ddi_regs_map_free(&pwp->mpi_acc_handle);
1446 		pwp->mpi_acc_handle = 0;
1447 	}
1448 	if (pwp->top_acc_handle) {
1449 		ddi_regs_map_free(&pwp->top_acc_handle);
1450 		pwp->top_acc_handle = 0;
1451 	}
1452 	if (pwp->gsm_acc_handle) {
1453 		ddi_regs_map_free(&pwp->gsm_acc_handle);
1454 		pwp->gsm_acc_handle = 0;
1455 	}
1456 	if (pwp->msg_acc_handle) {
1457 		ddi_regs_map_free(&pwp->msg_acc_handle);
1458 		pwp->msg_acc_handle = 0;
1459 	}
1460 	if (pwp->pci_acc_handle) {
1461 		pci_config_teardown(&pwp->pci_acc_handle);
1462 		pwp->pci_acc_handle = 0;
1463 	}
1464 
1465 	/*
1466 	 * Do memory allocation cleanup.
1467 	 */
1468 	while (pwp->dma_freelist) {
1469 		pmcs_dmachunk_t *this = pwp->dma_freelist;
1470 		pwp->dma_freelist = this->nxt;
1471 		kmem_free(this, sizeof (pmcs_dmachunk_t));
1472 	}
1473 
1474 	/*
1475 	 * Free pools
1476 	 */
1477 	if (pwp->iocomp_cb_cache) {
1478 		kmem_cache_destroy(pwp->iocomp_cb_cache);
1479 	}
1480 
1481 	/*
1482 	 * Free all PHYs (at level > 0), then free the cache
1483 	 */
1484 	pmcs_free_all_phys(pwp, pwp->root_phys);
1485 	if (pwp->phy_cache) {
1486 		kmem_cache_destroy(pwp->phy_cache);
1487 	}
1488 
1489 	/*
1490 	 * Free root PHYs
1491 	 */
1492 	if (pwp->root_phys) {
1493 		pmcs_phy_t *phyp = pwp->root_phys;
1494 		for (i = 0; i < pwp->nphy; i++) {
1495 			mutex_destroy(&phyp->phy_lock);
1496 			phyp = phyp->sibling;
1497 		}
1498 		kmem_free(pwp->root_phys, pwp->nphy * sizeof (pmcs_phy_t));
1499 		pwp->root_phys = NULL;
1500 		pwp->nphy = 0;
1501 	}
1502 
1503 	/* Free the targets list */
1504 	if (pwp->targets) {
1505 		kmem_free(pwp->targets,
1506 		    sizeof (pmcs_xscsi_t *) * pwp->max_dev);
1507 	}
1508 
1509 	/*
1510 	 * Free work structures
1511 	 */
1512 
1513 	if (pwp->work && pwp->max_cmd) {
1514 		for (i = 0; i < pwp->max_cmd - 1; i++) {
1515 			pmcwork_t *pwrk = &pwp->work[i];
1516 			mutex_destroy(&pwrk->lock);
1517 			cv_destroy(&pwrk->sleep_cv);
1518 		}
1519 		kmem_free(pwp->work, sizeof (pmcwork_t) * pwp->max_cmd);
1520 		pwp->work = NULL;
1521 		pwp->max_cmd = 0;
1522 	}
1523 
1524 	/*
1525 	 * Do last property and SCSA cleanup
1526 	 */
1527 	if (pwp->tran) {
1528 		scsi_hba_tran_free(pwp->tran);
1529 		pwp->tran = NULL;
1530 	}
1531 	if (pwp->reset_notify_listf) {
1532 		scsi_hba_reset_notify_tear_down(pwp->reset_notify_listf);
1533 		pwp->reset_notify_listf = NULL;
1534 	}
1535 	ddi_prop_remove_all(pwp->dip);
1536 	if (pwp->stuck) {
1537 		return (-1);
1538 	}
1539 
1540 	/* Free register dump area if allocated */
1541 	if (pwp->regdumpp) {
1542 		kmem_free(pwp->regdumpp, PMCS_REG_DUMP_SIZE);
1543 		pwp->regdumpp = NULL;
1544 	}
1545 	if (pwp->iqpt && pwp->iqpt->head) {
1546 		kmem_free(pwp->iqpt->head, PMCS_IQP_TRACE_BUFFER_SIZE);
1547 		pwp->iqpt->head = pwp->iqpt->curpos = NULL;
1548 	}
1549 	if (pwp->iqpt) {
1550 		kmem_free(pwp->iqpt, sizeof (pmcs_iqp_trace_t));
1551 		pwp->iqpt = NULL;
1552 	}
1553 
1554 	ddi_soft_state_free(pmcs_softc_state, ddi_get_instance(pwp->dip));
1555 	return (0);
1556 }
1557 
1558 /*
1559  * quiesce (9E) entry point
1560  *
1561  * This function is called when the system is single-threaded at high PIL
1562  * with preemption disabled. Therefore, the function must not block/wait/sleep.
1563  *
1564  * Returns DDI_SUCCESS or DDI_FAILURE.
1565  *
1566  */
1567 static int
1568 pmcs_quiesce(dev_info_t *dip)
1569 {
1570 	pmcs_hw_t	*pwp;
1571 	scsi_hba_tran_t	*tran;
1572 
1573 	if ((tran = ddi_get_driver_private(dip)) == NULL)
1574 		return (DDI_SUCCESS);
1575 
1576 	/* No quiesce necessary on a per-iport basis */
1577 	if (scsi_hba_iport_unit_address(dip) != NULL) {
1578 		return (DDI_SUCCESS);
1579 	}
1580 
1581 	if ((pwp = TRAN2PMC(tran)) == NULL)
1582 		return (DDI_SUCCESS);
1583 
1584 	/* Stop MPI & Reset chip (no need to re-initialize) */
1585 	(void) pmcs_stop_mpi(pwp);
1586 	(void) pmcs_soft_reset(pwp, B_TRUE);
1587 
1588 	return (DDI_SUCCESS);
1589 }
1590 
1591 /*
1592  * Called with xp->statlock and PHY lock and scratch acquired.
1593  */
1594 static int
1595 pmcs_add_sata_device(pmcs_hw_t *pwp, pmcs_xscsi_t *xp)
1596 {
1597 	ata_identify_t *ati;
1598 	int result, i;
1599 	pmcs_phy_t *pptr;
1600 	uint16_t *a;
1601 	union {
1602 		uint8_t nsa[8];
1603 		uint16_t nsb[4];
1604 	} u;
1605 
1606 	/*
1607 	 * Safe defaults - use only if this target is brand new (i.e. doesn't
1608 	 * already have these settings configured)
1609 	 */
1610 	if (xp->capacity == 0) {
1611 		xp->capacity = (uint64_t)-1;
1612 		xp->ca = 1;
1613 		xp->qdepth = 1;
1614 		xp->pio = 1;
1615 	}
1616 
1617 	pptr = xp->phy;
1618 
1619 	/*
1620 	 * We only try and issue an IDENTIFY for first level
1621 	 * (direct attached) devices. We don't try and
1622 	 * set other quirks here (this will happen later,
1623 	 * if the device is fully configured)
1624 	 */
1625 	if (pptr->level) {
1626 		return (0);
1627 	}
1628 
1629 	mutex_exit(&xp->statlock);
1630 	result = pmcs_sata_identify(pwp, pptr);
1631 	mutex_enter(&xp->statlock);
1632 
1633 	if (result) {
1634 		return (result);
1635 	}
1636 	ati = pwp->scratch;
1637 	a = &ati->word108;
1638 	for (i = 0; i < 4; i++) {
1639 		u.nsb[i] = ddi_swap16(*a++);
1640 	}
1641 
1642 	/*
1643 	 * Check the returned data for being a valid (NAA=5) WWN.
1644 	 * If so, use that and override the SAS address we were
1645 	 * given at Link Up time.
1646 	 */
1647 	if ((u.nsa[0] >> 4) == 5) {
1648 		(void) memcpy(pptr->sas_address, u.nsa, 8);
1649 	}
1650 	pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: %s has SAS ADDRESS " SAS_ADDR_FMT,
1651 	    __func__, pptr->path, SAS_ADDR_PRT(pptr->sas_address));
1652 	return (0);
1653 }
1654 
1655 /*
1656  * Called with PHY lock and target statlock held and scratch acquired
1657  */
1658 static boolean_t
1659 pmcs_add_new_device(pmcs_hw_t *pwp, pmcs_xscsi_t *target)
1660 {
1661 	ASSERT(target != NULL);
1662 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: target = 0x%p",
1663 	    __func__, (void *) target);
1664 
1665 	switch (target->phy->dtype) {
1666 	case SATA:
1667 		if (pmcs_add_sata_device(pwp, target) != 0) {
1668 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
1669 			    "%s: add_sata_device failed for tgt 0x%p",
1670 			    __func__, (void *) target);
1671 			return (B_FALSE);
1672 		}
1673 		break;
1674 	case SAS:
1675 		target->qdepth = maxqdepth;
1676 		break;
1677 	case EXPANDER:
1678 		target->qdepth = 1;
1679 		break;
1680 	}
1681 
1682 	target->new = 0;
1683 	target->assigned = 1;
1684 	target->dev_state = PMCS_DEVICE_STATE_OPERATIONAL;
1685 	target->dtype = target->phy->dtype;
1686 
1687 	/*
1688 	 * Set the PHY's config stop time to 0.  This is one of the final
1689 	 * stops along the config path, so we're indicating that we
1690 	 * successfully configured the PHY.
1691 	 */
1692 	target->phy->config_stop = 0;
1693 
1694 	return (B_TRUE);
1695 }
1696 
1697 void
1698 pmcs_worker(void *arg)
1699 {
1700 	pmcs_hw_t *pwp = arg;
1701 	ulong_t work_flags;
1702 
1703 	DTRACE_PROBE2(pmcs__worker, ulong_t, pwp->work_flags, boolean_t,
1704 	    pwp->config_changed);
1705 
1706 	if (pwp->state != STATE_RUNNING) {
1707 		return;
1708 	}
1709 
1710 	work_flags = atomic_swap_ulong(&pwp->work_flags, 0);
1711 
1712 	if (work_flags & PMCS_WORK_FLAG_SAS_HW_ACK) {
1713 		pmcs_ack_events(pwp);
1714 	}
1715 
1716 	if (work_flags & PMCS_WORK_FLAG_SPINUP_RELEASE) {
1717 		mutex_enter(&pwp->lock);
1718 		pmcs_spinup_release(pwp, NULL);
1719 		mutex_exit(&pwp->lock);
1720 	}
1721 
1722 	if (work_flags & PMCS_WORK_FLAG_SSP_EVT_RECOVERY) {
1723 		pmcs_ssp_event_recovery(pwp);
1724 	}
1725 
1726 	if (work_flags & PMCS_WORK_FLAG_DS_ERR_RECOVERY) {
1727 		pmcs_dev_state_recovery(pwp, NULL);
1728 	}
1729 
1730 	if (work_flags & PMCS_WORK_FLAG_DISCOVER) {
1731 		pmcs_discover(pwp);
1732 	}
1733 
1734 	if (work_flags & PMCS_WORK_FLAG_ABORT_HANDLE) {
1735 		if (pmcs_abort_handler(pwp)) {
1736 			SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
1737 		}
1738 	}
1739 
1740 	if (work_flags & PMCS_WORK_FLAG_SATA_RUN) {
1741 		pmcs_sata_work(pwp);
1742 	}
1743 
1744 	if (work_flags & PMCS_WORK_FLAG_RUN_QUEUES) {
1745 		pmcs_scsa_wq_run(pwp);
1746 		mutex_enter(&pwp->lock);
1747 		PMCS_CQ_RUN(pwp);
1748 		mutex_exit(&pwp->lock);
1749 	}
1750 
1751 	if (work_flags & PMCS_WORK_FLAG_ADD_DMA_CHUNKS) {
1752 		if (pmcs_add_more_chunks(pwp,
1753 		    ptob(1) * PMCS_ADDTL_CHUNK_PAGES)) {
1754 			SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS);
1755 		} else {
1756 			SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
1757 		}
1758 	}
1759 }
1760 
1761 static int
1762 pmcs_add_more_chunks(pmcs_hw_t *pwp, unsigned long nsize)
1763 {
1764 	pmcs_dmachunk_t *dc;
1765 	unsigned long dl;
1766 	pmcs_chunk_t	*pchunk = NULL;
1767 
1768 	pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t);
1769 
1770 	pchunk = kmem_zalloc(sizeof (pmcs_chunk_t), KM_SLEEP);
1771 	if (pchunk == NULL) {
1772 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
1773 		    "Not enough memory for DMA chunks");
1774 		return (-1);
1775 	}
1776 
1777 	if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pchunk->acc_handle,
1778 	    &pchunk->dma_handle, nsize, (caddr_t *)&pchunk->addrp,
1779 	    &pchunk->dma_addr) == B_FALSE) {
1780 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "Failed to setup DMA for chunks");
1781 		kmem_free(pchunk, sizeof (pmcs_chunk_t));
1782 		return (-1);
1783 	}
1784 
1785 	if ((pmcs_check_acc_handle(pchunk->acc_handle) != DDI_SUCCESS) ||
1786 	    (pmcs_check_dma_handle(pchunk->dma_handle) != DDI_SUCCESS)) {
1787 		ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED);
1788 		return (-1);
1789 	}
1790 
1791 	bzero(pchunk->addrp, nsize);
1792 	dc = NULL;
1793 	for (dl = 0; dl < (nsize / PMCS_SGL_CHUNKSZ); dl++) {
1794 		pmcs_dmachunk_t *tmp;
1795 		tmp = kmem_alloc(sizeof (pmcs_dmachunk_t), KM_SLEEP);
1796 		tmp->nxt = dc;
1797 		dc = tmp;
1798 	}
1799 	mutex_enter(&pwp->dma_lock);
1800 	pmcs_idma_chunks(pwp, dc, pchunk, nsize);
1801 	pwp->nchunks++;
1802 	mutex_exit(&pwp->dma_lock);
1803 	return (0);
1804 }
1805 
1806 
1807 static void
1808 pmcs_check_commands(pmcs_hw_t *pwp)
1809 {
1810 	pmcs_cmd_t *sp;
1811 	size_t amt;
1812 	char path[32];
1813 	pmcwork_t *pwrk;
1814 	pmcs_xscsi_t *target;
1815 	pmcs_phy_t *phyp;
1816 
1817 	for (pwrk = pwp->work; pwrk < &pwp->work[pwp->max_cmd]; pwrk++) {
1818 		mutex_enter(&pwrk->lock);
1819 
1820 		/*
1821 		 * If the command isn't active, we can't be timing it still.
1822 		 * Active means the tag is not free and the state is "on chip".
1823 		 */
1824 		if (!PMCS_COMMAND_ACTIVE(pwrk)) {
1825 			mutex_exit(&pwrk->lock);
1826 			continue;
1827 		}
1828 
1829 		/*
1830 		 * No timer active for this command.
1831 		 */
1832 		if (pwrk->timer == 0) {
1833 			mutex_exit(&pwrk->lock);
1834 			continue;
1835 		}
1836 
1837 		/*
1838 		 * Knock off bits for the time interval.
1839 		 */
1840 		if (pwrk->timer >= US2WT(PMCS_WATCH_INTERVAL)) {
1841 			pwrk->timer -= US2WT(PMCS_WATCH_INTERVAL);
1842 		} else {
1843 			pwrk->timer = 0;
1844 		}
1845 		if (pwrk->timer > 0) {
1846 			mutex_exit(&pwrk->lock);
1847 			continue;
1848 		}
1849 
1850 		/*
1851 		 * The command has now officially timed out.
1852 		 * Get the path for it. If it doesn't have
1853 		 * a phy pointer any more, it's really dead
1854 		 * and can just be put back on the free list.
1855 		 * There should *not* be any commands associated
1856 		 * with it any more.
1857 		 */
1858 		if (pwrk->phy == NULL) {
1859 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
1860 			    "dead command with gone phy being recycled");
1861 			ASSERT(pwrk->xp == NULL);
1862 			pmcs_pwork(pwp, pwrk);
1863 			continue;
1864 		}
1865 		amt = sizeof (path);
1866 		amt = min(sizeof (pwrk->phy->path), amt);
1867 		(void) memcpy(path, pwrk->phy->path, amt);
1868 
1869 		/*
1870 		 * If this is a non-SCSA command, stop here. Eventually
1871 		 * we might do something with non-SCSA commands here-
1872 		 * but so far their timeout mechanisms are handled in
1873 		 * the WAIT_FOR macro.
1874 		 */
1875 		if (pwrk->xp == NULL) {
1876 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
1877 			    "%s: non-SCSA cmd tag 0x%x timed out",
1878 			    path, pwrk->htag);
1879 			mutex_exit(&pwrk->lock);
1880 			continue;
1881 		}
1882 
1883 		sp = pwrk->arg;
1884 		ASSERT(sp != NULL);
1885 
1886 		/*
1887 		 * Mark it as timed out.
1888 		 */
1889 		CMD2PKT(sp)->pkt_reason = CMD_TIMEOUT;
1890 		CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT;
1891 #ifdef	DEBUG
1892 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
1893 		    "%s: SCSA cmd tag 0x%x timed out (state %x) onwire=%d",
1894 		    path, pwrk->htag, pwrk->state, pwrk->onwire);
1895 #else
1896 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
1897 		    "%s: SCSA cmd tag 0x%x timed out (state %x)",
1898 		    path, pwrk->htag, pwrk->state);
1899 #endif
1900 		/*
1901 		 * Mark the work structure as timed out.
1902 		 */
1903 		pwrk->state = PMCS_WORK_STATE_TIMED_OUT;
1904 		phyp = pwrk->phy;
1905 		target = pwrk->xp;
1906 		mutex_exit(&pwrk->lock);
1907 
1908 		pmcs_lock_phy(phyp);
1909 		mutex_enter(&target->statlock);
1910 
1911 		/*
1912 		 * No point attempting recovery if the device is gone
1913 		 */
1914 		if (pwrk->xp->dev_gone) {
1915 			mutex_exit(&target->statlock);
1916 			pmcs_unlock_phy(phyp);
1917 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
1918 			    "%s: tgt(0x%p) is gone. Returning CMD_DEV_GONE "
1919 			    "for htag 0x%08x", __func__,
1920 			    (void *)pwrk->xp, pwrk->htag);
1921 			mutex_enter(&pwrk->lock);
1922 			if (!PMCS_COMMAND_DONE(pwrk)) {
1923 				/* Complete this command here */
1924 				pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: "
1925 				    "Completing cmd (htag 0x%08x) "
1926 				    "anyway", __func__, pwrk->htag);
1927 				pwrk->dead = 1;
1928 				CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
1929 				CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
1930 				pmcs_complete_work_impl(pwp, pwrk, NULL, 0);
1931 			} else {
1932 				mutex_exit(&pwrk->lock);
1933 			}
1934 			continue;
1935 		}
1936 
1937 		/*
1938 		 * See if we're already waiting for device state recovery
1939 		 */
1940 		if (target->recover_wait) {
1941 			pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
1942 			    "%s: Target %p already in recovery", __func__,
1943 			    (void *)target);
1944 			mutex_exit(&target->statlock);
1945 			pmcs_unlock_phy(phyp);
1946 			continue;
1947 		}
1948 
1949 		pmcs_start_dev_state_recovery(target, phyp);
1950 		mutex_exit(&target->statlock);
1951 		pmcs_unlock_phy(phyp);
1952 	}
1953 	/*
1954 	 * Run any completions that may have been queued up.
1955 	 */
1956 	PMCS_CQ_RUN(pwp);
1957 }
1958 
1959 static void
1960 pmcs_watchdog(void *arg)
1961 {
1962 	pmcs_hw_t *pwp = arg;
1963 
1964 	DTRACE_PROBE2(pmcs__watchdog, ulong_t, pwp->work_flags, boolean_t,
1965 	    pwp->config_changed);
1966 
1967 	mutex_enter(&pwp->lock);
1968 
1969 	if (pwp->state != STATE_RUNNING) {
1970 		mutex_exit(&pwp->lock);
1971 		return;
1972 	}
1973 
1974 	if (atomic_cas_ulong(&pwp->work_flags, 0, 0) != 0) {
1975 		if (ddi_taskq_dispatch(pwp->tq, pmcs_worker, pwp,
1976 		    DDI_NOSLEEP) != DDI_SUCCESS) {
1977 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
1978 			    "Could not dispatch to worker thread");
1979 		}
1980 	}
1981 	pwp->wdhandle = timeout(pmcs_watchdog, pwp,
1982 	    drv_usectohz(PMCS_WATCH_INTERVAL));
1983 	mutex_exit(&pwp->lock);
1984 	pmcs_check_commands(pwp);
1985 	pmcs_handle_dead_phys(pwp);
1986 }
1987 
1988 static int
1989 pmcs_remove_ihandlers(pmcs_hw_t *pwp, int icnt)
1990 {
1991 	int i, r, rslt = 0;
1992 	for (i = 0; i < icnt; i++) {
1993 		r = ddi_intr_remove_handler(pwp->ih_table[i]);
1994 		if (r == DDI_SUCCESS) {
1995 			continue;
1996 		}
1997 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
1998 		    "%s: unable to remove interrupt handler %d", __func__, i);
1999 		rslt = -1;
2000 		break;
2001 	}
2002 	return (rslt);
2003 }
2004 
2005 static int
2006 pmcs_disable_intrs(pmcs_hw_t *pwp, int icnt)
2007 {
2008 	if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) {
2009 		int r = ddi_intr_block_disable(&pwp->ih_table[0],
2010 		    pwp->intr_cnt);
2011 		if (r != DDI_SUCCESS) {
2012 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
2013 			    "unable to disable interrupt block");
2014 			return (-1);
2015 		}
2016 	} else {
2017 		int i;
2018 		for (i = 0; i < icnt; i++) {
2019 			if (ddi_intr_disable(pwp->ih_table[i]) == DDI_SUCCESS) {
2020 				continue;
2021 			}
2022 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
2023 			    "unable to disable interrupt %d", i);
2024 			return (-1);
2025 		}
2026 	}
2027 	return (0);
2028 }
2029 
2030 static int
2031 pmcs_free_intrs(pmcs_hw_t *pwp, int icnt)
2032 {
2033 	int i;
2034 	for (i = 0; i < icnt; i++) {
2035 		if (ddi_intr_free(pwp->ih_table[i]) == DDI_SUCCESS) {
2036 			continue;
2037 		}
2038 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "unable to free interrupt %d", i);
2039 		return (-1);
2040 	}
2041 	kmem_free(pwp->ih_table, pwp->ih_table_size);
2042 	pwp->ih_table_size = 0;
2043 	return (0);
2044 }
2045 
2046 /*
2047  * Try to set up interrupts of type "type" with a minimum number of interrupts
2048  * of "min".
2049  */
2050 static void
2051 pmcs_setup_intr_impl(pmcs_hw_t *pwp, int type, int min)
2052 {
2053 	int rval, avail, count, actual, max;
2054 
2055 	rval = ddi_intr_get_nintrs(pwp->dip, type, &count);
2056 	if ((rval != DDI_SUCCESS) || (count < min)) {
2057 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2058 		    "%s: get_nintrs failed; type: %d rc: %d count: %d min: %d",
2059 		    __func__, type, rval, count, min);
2060 		return;
2061 	}
2062 
2063 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2064 	    "%s: nintrs = %d for type: %d", __func__, count, type);
2065 
2066 	rval = ddi_intr_get_navail(pwp->dip, type, &avail);
2067 	if ((rval != DDI_SUCCESS) || (avail < min)) {
2068 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2069 		    "%s: get_navail failed; type: %d rc: %d avail: %d min: %d",
2070 		    __func__, type, rval, avail, min);
2071 		return;
2072 	}
2073 
2074 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2075 	    "%s: navail = %d for type: %d", __func__, avail, type);
2076 
2077 	pwp->ih_table_size = avail * sizeof (ddi_intr_handle_t);
2078 	pwp->ih_table = kmem_alloc(pwp->ih_table_size, KM_SLEEP);
2079 
2080 	switch (type) {
2081 	case DDI_INTR_TYPE_MSIX:
2082 		pwp->int_type = PMCS_INT_MSIX;
2083 		max = PMCS_MAX_MSIX;
2084 		break;
2085 	case DDI_INTR_TYPE_MSI:
2086 		pwp->int_type = PMCS_INT_MSI;
2087 		max = PMCS_MAX_MSI;
2088 		break;
2089 	case DDI_INTR_TYPE_FIXED:
2090 	default:
2091 		pwp->int_type = PMCS_INT_FIXED;
2092 		max = PMCS_MAX_FIXED;
2093 		break;
2094 	}
2095 
2096 	rval = ddi_intr_alloc(pwp->dip, pwp->ih_table, type, 0, max, &actual,
2097 	    DDI_INTR_ALLOC_NORMAL);
2098 	if (rval != DDI_SUCCESS) {
2099 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2100 		    "%s: ddi_intr_alloc failed; type: %d rc: %d",
2101 		    __func__, type, rval);
2102 		kmem_free(pwp->ih_table, pwp->ih_table_size);
2103 		pwp->ih_table = NULL;
2104 		pwp->ih_table_size = 0;
2105 		pwp->intr_cnt = 0;
2106 		pwp->int_type = PMCS_INT_NONE;
2107 		return;
2108 	}
2109 
2110 	pwp->intr_cnt = actual;
2111 }
2112 
2113 /*
2114  * Set up interrupts.
2115  * We return one of three values:
2116  *
2117  * 0 - success
2118  * EAGAIN - failure to set up interrupts
2119  * EIO - "" + we're now stuck partly enabled
2120  *
2121  * If EIO is returned, we can't unload the driver.
2122  */
2123 static int
2124 pmcs_setup_intr(pmcs_hw_t *pwp)
2125 {
2126 	int i, r, itypes, oqv_count;
2127 	ddi_intr_handler_t **iv_table;
2128 	size_t iv_table_size;
2129 	uint_t pri;
2130 
2131 	if (ddi_intr_get_supported_types(pwp->dip, &itypes) != DDI_SUCCESS) {
2132 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "cannot get interrupt types");
2133 		return (EAGAIN);
2134 	}
2135 
2136 	if (disable_msix) {
2137 		itypes &= ~DDI_INTR_TYPE_MSIX;
2138 	}
2139 	if (disable_msi) {
2140 		itypes &= ~DDI_INTR_TYPE_MSI;
2141 	}
2142 
2143 	/*
2144 	 * We won't know what firmware we're running until we call pmcs_setup,
2145 	 * and we can't call pmcs_setup until we establish interrupts.
2146 	 */
2147 
2148 	pwp->int_type = PMCS_INT_NONE;
2149 
2150 	/*
2151 	 * We want PMCS_MAX_MSIX vectors for MSI-X.  Anything less would be
2152 	 * uncivilized.
2153 	 */
2154 	if (itypes & DDI_INTR_TYPE_MSIX) {
2155 		pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSIX, PMCS_MAX_MSIX);
2156 		if (pwp->int_type == PMCS_INT_MSIX) {
2157 			itypes = 0;
2158 		}
2159 	}
2160 
2161 	if (itypes & DDI_INTR_TYPE_MSI) {
2162 		pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSI, 1);
2163 		if (pwp->int_type == PMCS_INT_MSI) {
2164 			itypes = 0;
2165 		}
2166 	}
2167 
2168 	if (itypes & DDI_INTR_TYPE_FIXED) {
2169 		pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_FIXED, 1);
2170 		if (pwp->int_type == PMCS_INT_FIXED) {
2171 			itypes = 0;
2172 		}
2173 	}
2174 
2175 	if (pwp->intr_cnt == 0) {
2176 		pmcs_prt(pwp, PMCS_PRT_ERR, "No interrupts available");
2177 		return (EAGAIN);
2178 	}
2179 
2180 	iv_table_size = sizeof (ddi_intr_handler_t *) * pwp->intr_cnt;
2181 	iv_table = kmem_alloc(iv_table_size, KM_SLEEP);
2182 
2183 	/*
2184 	 * Get iblock cookie and add handlers.
2185 	 */
2186 	switch (pwp->intr_cnt) {
2187 	case 1:
2188 		iv_table[0] = pmcs_all_intr;
2189 		break;
2190 	case 2:
2191 		iv_table[0] = pmcs_iodone_ix;
2192 		iv_table[1] = pmcs_nonio_ix;
2193 		break;
2194 	case 4:
2195 		iv_table[PMCS_MSIX_GENERAL] = pmcs_general_ix;
2196 		iv_table[PMCS_MSIX_IODONE] = pmcs_iodone_ix;
2197 		iv_table[PMCS_MSIX_EVENTS] = pmcs_event_ix;
2198 		iv_table[PMCS_MSIX_FATAL] = pmcs_fatal_ix;
2199 		break;
2200 	default:
2201 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
2202 		    "%s: intr_cnt = %d - unexpected", __func__, pwp->intr_cnt);
2203 		kmem_free(iv_table, iv_table_size);
2204 		return (EAGAIN);
2205 	}
2206 
2207 	for (i = 0; i < pwp->intr_cnt; i++) {
2208 		r = ddi_intr_add_handler(pwp->ih_table[i], iv_table[i],
2209 		    (caddr_t)pwp, NULL);
2210 		if (r != DDI_SUCCESS) {
2211 			kmem_free(iv_table, iv_table_size);
2212 			if (pmcs_remove_ihandlers(pwp, i)) {
2213 				return (EIO);
2214 			}
2215 			if (pmcs_free_intrs(pwp, i)) {
2216 				return (EIO);
2217 			}
2218 			pwp->intr_cnt = 0;
2219 			return (EAGAIN);
2220 		}
2221 	}
2222 
2223 	kmem_free(iv_table, iv_table_size);
2224 
2225 	if (ddi_intr_get_cap(pwp->ih_table[0], &pwp->intr_cap) != DDI_SUCCESS) {
2226 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "unable to get int capabilities");
2227 		if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
2228 			return (EIO);
2229 		}
2230 		if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
2231 			return (EIO);
2232 		}
2233 		pwp->intr_cnt = 0;
2234 		return (EAGAIN);
2235 	}
2236 
2237 	if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) {
2238 		r = ddi_intr_block_enable(&pwp->ih_table[0], pwp->intr_cnt);
2239 		if (r != DDI_SUCCESS) {
2240 			pmcs_prt(pwp, PMCS_PRT_DEBUG, "intr blk enable failed");
2241 			if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
2242 				return (EIO);
2243 			}
2244 			if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
2245 				return (EIO);
2246 			}
2247 			pwp->intr_cnt = 0;
2248 			return (EFAULT);
2249 		}
2250 	} else {
2251 		for (i = 0; i < pwp->intr_cnt; i++) {
2252 			r = ddi_intr_enable(pwp->ih_table[i]);
2253 			if (r == DDI_SUCCESS) {
2254 				continue;
2255 			}
2256 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
2257 			    "unable to enable interrupt %d", i);
2258 			if (pmcs_disable_intrs(pwp, i)) {
2259 				return (EIO);
2260 			}
2261 			if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
2262 				return (EIO);
2263 			}
2264 			if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
2265 				return (EIO);
2266 			}
2267 			pwp->intr_cnt = 0;
2268 			return (EAGAIN);
2269 		}
2270 	}
2271 
2272 	/*
2273 	 * Set up locks.
2274 	 */
2275 	if (ddi_intr_get_pri(pwp->ih_table[0], &pri) != DDI_SUCCESS) {
2276 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
2277 		    "unable to get interrupt priority");
2278 		if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) {
2279 			return (EIO);
2280 		}
2281 		if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
2282 			return (EIO);
2283 		}
2284 		if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
2285 			return (EIO);
2286 		}
2287 		pwp->intr_cnt = 0;
2288 		return (EAGAIN);
2289 	}
2290 
2291 	pwp->locks_initted = 1;
2292 	pwp->intr_pri = pri;
2293 	mutex_init(&pwp->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2294 	mutex_init(&pwp->dma_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2295 	mutex_init(&pwp->axil_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2296 	mutex_init(&pwp->cq_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2297 	mutex_init(&pwp->ict_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2298 	mutex_init(&pwp->config_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2299 	mutex_init(&pwp->wfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2300 	mutex_init(&pwp->pfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2301 	mutex_init(&pwp->dead_phylist_lock, NULL, MUTEX_DRIVER,
2302 	    DDI_INTR_PRI(pri));
2303 #ifdef	DEBUG
2304 	mutex_init(&pwp->dbglock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2305 #endif
2306 	cv_init(&pwp->ict_cv, NULL, CV_DRIVER, NULL);
2307 	cv_init(&pwp->drain_cv, NULL, CV_DRIVER, NULL);
2308 	for (i = 0; i < PMCS_NIQ; i++) {
2309 		mutex_init(&pwp->iqp_lock[i], NULL,
2310 		    MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri));
2311 	}
2312 	for (i = 0; i < pwp->cq_info.cq_threads; i++) {
2313 		mutex_init(&pwp->cq_info.cq_thr_info[i].cq_thr_lock, NULL,
2314 		    MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri));
2315 		cv_init(&pwp->cq_info.cq_thr_info[i].cq_cv, NULL,
2316 		    CV_DRIVER, NULL);
2317 	}
2318 
2319 	pmcs_prt(pwp, PMCS_PRT_INFO, "%d %s interrup%s configured",
2320 	    pwp->intr_cnt, (pwp->int_type == PMCS_INT_MSIX)? "MSI-X" :
2321 	    ((pwp->int_type == PMCS_INT_MSI)? "MSI" : "INT-X"),
2322 	    pwp->intr_cnt == 1? "t" : "ts");
2323 
2324 
2325 	/*
2326 	 * Enable Interrupts
2327 	 */
2328 	if (pwp->intr_cnt > PMCS_NOQ) {
2329 		oqv_count = pwp->intr_cnt;
2330 	} else {
2331 		oqv_count = PMCS_NOQ;
2332 	}
2333 	for (pri = 0xffffffff, i = 0; i < oqv_count; i++) {
2334 		pri ^= (1 << i);
2335 	}
2336 
2337 	mutex_enter(&pwp->lock);
2338 	pwp->intr_mask = pri;
2339 	pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask);
2340 	pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
2341 	mutex_exit(&pwp->lock);
2342 
2343 	return (0);
2344 }
2345 
2346 static int
2347 pmcs_teardown_intr(pmcs_hw_t *pwp)
2348 {
2349 	if (pwp->intr_cnt) {
2350 		if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) {
2351 			return (EIO);
2352 		}
2353 		if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
2354 			return (EIO);
2355 		}
2356 		if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
2357 			return (EIO);
2358 		}
2359 		pwp->intr_cnt = 0;
2360 	}
2361 	return (0);
2362 }
2363 
2364 static uint_t
2365 pmcs_general_ix(caddr_t arg1, caddr_t arg2)
2366 {
2367 	pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
2368 	_NOTE(ARGUNUSED(arg2));
2369 	pmcs_general_intr(pwp);
2370 	return (DDI_INTR_CLAIMED);
2371 }
2372 
2373 static uint_t
2374 pmcs_event_ix(caddr_t arg1, caddr_t arg2)
2375 {
2376 	pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
2377 	_NOTE(ARGUNUSED(arg2));
2378 	pmcs_event_intr(pwp);
2379 	return (DDI_INTR_CLAIMED);
2380 }
2381 
2382 static uint_t
2383 pmcs_iodone_ix(caddr_t arg1, caddr_t arg2)
2384 {
2385 	_NOTE(ARGUNUSED(arg2));
2386 	pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
2387 
2388 	/*
2389 	 * It's possible that if we just turned interrupt coalescing off
2390 	 * (and thus, re-enabled auto clear for interrupts on the I/O outbound
2391 	 * queue) that there was an interrupt already pending.  We use
2392 	 * io_intr_coal.int_cleared to ensure that we still drop in here and
2393 	 * clear the appropriate interrupt bit one last time.
2394 	 */
2395 	mutex_enter(&pwp->ict_lock);
2396 	if (pwp->io_intr_coal.timer_on ||
2397 	    (pwp->io_intr_coal.int_cleared == B_FALSE)) {
2398 		pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
2399 		    (1 << PMCS_OQ_IODONE));
2400 		pwp->io_intr_coal.int_cleared = B_TRUE;
2401 	}
2402 	mutex_exit(&pwp->ict_lock);
2403 
2404 	pmcs_iodone_intr(pwp);
2405 
2406 	return (DDI_INTR_CLAIMED);
2407 }
2408 
2409 static uint_t
2410 pmcs_fatal_ix(caddr_t arg1, caddr_t arg2)
2411 {
2412 	pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
2413 	_NOTE(ARGUNUSED(arg2));
2414 	pmcs_fatal_handler(pwp);
2415 	return (DDI_INTR_CLAIMED);
2416 }
2417 
2418 static uint_t
2419 pmcs_nonio_ix(caddr_t arg1, caddr_t arg2)
2420 {
2421 	_NOTE(ARGUNUSED(arg2));
2422 	pmcs_hw_t *pwp = (void *)arg1;
2423 	uint32_t obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB);
2424 
2425 	/*
2426 	 * Check for Fatal Interrupts
2427 	 */
2428 	if (obdb & (1 << PMCS_FATAL_INTERRUPT)) {
2429 		pmcs_fatal_handler(pwp);
2430 		return (DDI_INTR_CLAIMED);
2431 	}
2432 
2433 	if (obdb & (1 << PMCS_OQ_GENERAL)) {
2434 		pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
2435 		    (1 << PMCS_OQ_GENERAL));
2436 		pmcs_general_intr(pwp);
2437 		pmcs_event_intr(pwp);
2438 	}
2439 
2440 	return (DDI_INTR_CLAIMED);
2441 }
2442 
2443 static uint_t
2444 pmcs_all_intr(caddr_t arg1, caddr_t arg2)
2445 {
2446 	_NOTE(ARGUNUSED(arg2));
2447 	pmcs_hw_t *pwp = (void *) arg1;
2448 	uint32_t obdb;
2449 	int handled = 0;
2450 
2451 	obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB);
2452 
2453 	/*
2454 	 * Check for Fatal Interrupts
2455 	 */
2456 	if (obdb & (1 << PMCS_FATAL_INTERRUPT)) {
2457 		pmcs_fatal_handler(pwp);
2458 		return (DDI_INTR_CLAIMED);
2459 	}
2460 
2461 	/*
2462 	 * Check for Outbound Queue service needed
2463 	 */
2464 	if (obdb & (1 << PMCS_OQ_IODONE)) {
2465 		pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
2466 		    (1 << PMCS_OQ_IODONE));
2467 		obdb ^= (1 << PMCS_OQ_IODONE);
2468 		handled++;
2469 		pmcs_iodone_intr(pwp);
2470 	}
2471 	if (obdb & (1 << PMCS_OQ_GENERAL)) {
2472 		pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
2473 		    (1 << PMCS_OQ_GENERAL));
2474 		obdb ^= (1 << PMCS_OQ_GENERAL);
2475 		handled++;
2476 		pmcs_general_intr(pwp);
2477 	}
2478 	if (obdb & (1 << PMCS_OQ_EVENTS)) {
2479 		pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
2480 		    (1 << PMCS_OQ_EVENTS));
2481 		obdb ^= (1 << PMCS_OQ_EVENTS);
2482 		handled++;
2483 		pmcs_event_intr(pwp);
2484 	}
2485 	if (obdb) {
2486 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
2487 		    "interrupt bits not handled (0x%x)", obdb);
2488 		pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, obdb);
2489 		handled++;
2490 	}
2491 	if (pwp->int_type == PMCS_INT_MSI) {
2492 		handled++;
2493 	}
2494 	return (handled? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2495 }
2496 
2497 void
2498 pmcs_fatal_handler(pmcs_hw_t *pwp)
2499 {
2500 	pmcs_prt(pwp, PMCS_PRT_ERR, "Fatal Interrupt caught");
2501 	mutex_enter(&pwp->lock);
2502 	pwp->state = STATE_DEAD;
2503 	pmcs_register_dump_int(pwp);
2504 	pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
2505 	pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
2506 	mutex_exit(&pwp->lock);
2507 	pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE);
2508 	ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
2509 
2510 #ifdef	DEBUG
2511 	cmn_err(CE_PANIC, "PMCS Fatal Firmware Error");
2512 #endif
2513 }
2514 
2515 /*
2516  * Called with PHY lock and target statlock held and scratch acquired.
2517  */
2518 boolean_t
2519 pmcs_assign_device(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt)
2520 {
2521 	pmcs_phy_t *pptr = tgt->phy;
2522 
2523 	switch (pptr->dtype) {
2524 	case SAS:
2525 	case EXPANDER:
2526 		break;
2527 	case SATA:
2528 		tgt->ca = 1;
2529 		break;
2530 	default:
2531 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2532 		    "%s: Target %p has PHY %p with invalid dtype",
2533 		    __func__, (void *)tgt, (void *)pptr);
2534 		return (B_FALSE);
2535 	}
2536 
2537 	tgt->new = 1;
2538 	tgt->dev_gone = 0;
2539 	tgt->recover_wait = 0;
2540 
2541 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2542 	    "%s: config %s vtgt %u for " SAS_ADDR_FMT, __func__,
2543 	    pptr->path, tgt->target_num, SAS_ADDR_PRT(pptr->sas_address));
2544 
2545 	if (pmcs_add_new_device(pwp, tgt) != B_TRUE) {
2546 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2547 		    "%s: Failed for vtgt %u / WWN " SAS_ADDR_FMT, __func__,
2548 		    tgt->target_num, SAS_ADDR_PRT(pptr->sas_address));
2549 		mutex_destroy(&tgt->statlock);
2550 		mutex_destroy(&tgt->wqlock);
2551 		mutex_destroy(&tgt->aqlock);
2552 		return (B_FALSE);
2553 	}
2554 
2555 	return (B_TRUE);
2556 }
2557 
2558 /*
2559  * Called with softstate lock held
2560  */
2561 void
2562 pmcs_remove_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
2563 {
2564 	pmcs_xscsi_t *xp;
2565 	unsigned int vtgt;
2566 
2567 	ASSERT(mutex_owned(&pwp->lock));
2568 
2569 	for (vtgt = 0; vtgt < pwp->max_dev; vtgt++) {
2570 		xp = pwp->targets[vtgt];
2571 		if (xp == NULL) {
2572 			continue;
2573 		}
2574 
2575 		mutex_enter(&xp->statlock);
2576 		if (xp->phy == pptr) {
2577 			if (xp->new) {
2578 				xp->new = 0;
2579 				pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2580 				    "cancel config of vtgt %u", vtgt);
2581 			} else {
2582 				pmcs_clear_xp(pwp, xp);
2583 				pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2584 				    "Removed tgt 0x%p vtgt %u",
2585 				    (void *)xp, vtgt);
2586 			}
2587 			mutex_exit(&xp->statlock);
2588 			break;
2589 		}
2590 		mutex_exit(&xp->statlock);
2591 	}
2592 }
2593 
2594 void
2595 pmcs_prt_impl(pmcs_hw_t *pwp, pmcs_prt_level_t level, const char *fmt, ...)
2596 {
2597 	va_list	ap;
2598 	int written = 0;
2599 	char *ptr;
2600 	uint32_t elem_size = PMCS_TBUF_ELEM_SIZE - 1;
2601 	boolean_t system_log;
2602 	int system_log_level;
2603 
2604 	switch (level) {
2605 	case PMCS_PRT_DEBUG_DEVEL:
2606 	case PMCS_PRT_DEBUG_DEV_STATE:
2607 	case PMCS_PRT_DEBUG_PHY_LOCKING:
2608 	case PMCS_PRT_DEBUG_SCSI_STATUS:
2609 	case PMCS_PRT_DEBUG_UNDERFLOW:
2610 	case PMCS_PRT_DEBUG_CONFIG:
2611 	case PMCS_PRT_DEBUG_IPORT:
2612 	case PMCS_PRT_DEBUG_MAP:
2613 	case PMCS_PRT_DEBUG3:
2614 	case PMCS_PRT_DEBUG2:
2615 	case PMCS_PRT_DEBUG1:
2616 	case PMCS_PRT_DEBUG:
2617 		system_log = B_FALSE;
2618 		break;
2619 	case PMCS_PRT_INFO:
2620 		system_log = B_TRUE;
2621 		system_log_level = CE_CONT;
2622 		break;
2623 	case PMCS_PRT_WARN:
2624 		system_log = B_TRUE;
2625 		system_log_level = CE_NOTE;
2626 		break;
2627 	case PMCS_PRT_ERR:
2628 		system_log = B_TRUE;
2629 		system_log_level = CE_WARN;
2630 		break;
2631 	default:
2632 		return;
2633 	}
2634 
2635 	mutex_enter(&pmcs_trace_lock);
2636 	gethrestime(&pmcs_tbuf_ptr->timestamp);
2637 	ptr = pmcs_tbuf_ptr->buf;
2638 	written += snprintf(ptr, elem_size, "pmcs%d:%d: ",
2639 	    ddi_get_instance(pwp->dip), level);
2640 	ptr += strlen(ptr);
2641 	va_start(ap, fmt);
2642 	written += vsnprintf(ptr, elem_size - written, fmt, ap);
2643 	va_end(ap);
2644 	if (written > elem_size - 1) {
2645 		/* Indicate truncation */
2646 		pmcs_tbuf_ptr->buf[elem_size - 1] = '+';
2647 	}
2648 	if (++pmcs_tbuf_idx == pmcs_tbuf_num_elems) {
2649 		pmcs_tbuf_ptr = pmcs_tbuf;
2650 		pmcs_tbuf_wrap = B_TRUE;
2651 		pmcs_tbuf_idx = 0;
2652 	} else {
2653 		++pmcs_tbuf_ptr;
2654 	}
2655 	mutex_exit(&pmcs_trace_lock);
2656 
2657 	/*
2658 	 * When pmcs_force_syslog in non-zero, everything goes also
2659 	 * to syslog, at CE_CONT level.
2660 	 */
2661 	if (pmcs_force_syslog) {
2662 		system_log = B_TRUE;
2663 		system_log_level = CE_CONT;
2664 	}
2665 
2666 	/*
2667 	 * Anything that comes in with PMCS_PRT_INFO, WARN, or ERR also
2668 	 * goes to syslog.
2669 	 */
2670 	if (system_log) {
2671 		char local[196];
2672 
2673 		switch (system_log_level) {
2674 		case CE_CONT:
2675 			(void) snprintf(local, sizeof (local), "%sINFO: ",
2676 			    pmcs_console ? "" : "?");
2677 			break;
2678 		case CE_NOTE:
2679 		case CE_WARN:
2680 			local[0] = 0;
2681 			break;
2682 		default:
2683 			return;
2684 		}
2685 
2686 		ptr = local;
2687 		ptr += strlen(local);
2688 		(void) snprintf(ptr, (sizeof (local)) -
2689 		    ((size_t)ptr - (size_t)local), "pmcs%d: ",
2690 		    ddi_get_instance(pwp->dip));
2691 		ptr += strlen(ptr);
2692 		va_start(ap, fmt);
2693 		(void) vsnprintf(ptr,
2694 		    (sizeof (local)) - ((size_t)ptr - (size_t)local), fmt, ap);
2695 		va_end(ap);
2696 		if (level == CE_CONT) {
2697 			(void) strlcat(local, "\n", sizeof (local));
2698 		}
2699 		cmn_err(system_log_level, local);
2700 	}
2701 
2702 }
2703 
2704 /*
2705  * pmcs_acquire_scratch
2706  *
2707  * If "wait" is true, the caller will wait until it can acquire the scratch.
2708  * This implies the caller needs to be in a context where spinning for an
2709  * indeterminate amount of time is acceptable.
2710  */
2711 int
2712 pmcs_acquire_scratch(pmcs_hw_t *pwp, boolean_t wait)
2713 {
2714 	int rval;
2715 
2716 	if (!wait) {
2717 		return (atomic_swap_8(&pwp->scratch_locked, 1));
2718 	}
2719 
2720 	/*
2721 	 * Caller will wait for scratch.
2722 	 */
2723 	while ((rval = atomic_swap_8(&pwp->scratch_locked, 1)) != 0) {
2724 		drv_usecwait(100);
2725 	}
2726 
2727 	return (rval);
2728 }
2729 
2730 void
2731 pmcs_release_scratch(pmcs_hw_t *pwp)
2732 {
2733 	pwp->scratch_locked = 0;
2734 }
2735 
2736 static void
2737 pmcs_create_phy_stats(pmcs_iport_t *iport)
2738 {
2739 	sas_phy_stats_t		*ps;
2740 	pmcs_hw_t		*pwp;
2741 	pmcs_phy_t		*phyp;
2742 	int			ndata;
2743 	char			ks_name[KSTAT_STRLEN];
2744 
2745 	ASSERT(iport != NULL);
2746 	pwp = iport->pwp;
2747 	ASSERT(pwp != NULL);
2748 
2749 	mutex_enter(&iport->lock);
2750 
2751 	for (phyp = list_head(&iport->phys);
2752 	    phyp != NULL;
2753 	    phyp = list_next(&iport->phys, phyp)) {
2754 
2755 		pmcs_lock_phy(phyp);
2756 
2757 		if (phyp->phy_stats != NULL) {
2758 			pmcs_unlock_phy(phyp);
2759 			/* We've already created this kstat instance */
2760 			continue;
2761 		}
2762 
2763 		ndata = (sizeof (sas_phy_stats_t)/sizeof (kstat_named_t));
2764 
2765 		(void) snprintf(ks_name, sizeof (ks_name),
2766 		    "%s.%llx.%d.%d", ddi_driver_name(iport->dip),
2767 		    (longlong_t)pwp->sas_wwns[0],
2768 		    ddi_get_instance(iport->dip), phyp->phynum);
2769 
2770 		phyp->phy_stats = kstat_create("pmcs",
2771 		    ddi_get_instance(iport->dip), ks_name, KSTAT_SAS_PHY_CLASS,
2772 		    KSTAT_TYPE_NAMED, ndata, 0);
2773 
2774 		if (phyp->phy_stats == NULL) {
2775 			pmcs_unlock_phy(phyp);
2776 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
2777 			    "%s: Failed to create %s kstats", __func__,
2778 			    ks_name);
2779 			continue;
2780 		}
2781 
2782 		ps = (sas_phy_stats_t *)phyp->phy_stats->ks_data;
2783 
2784 		kstat_named_init(&ps->seconds_since_last_reset,
2785 		    "SecondsSinceLastReset", KSTAT_DATA_ULONGLONG);
2786 		kstat_named_init(&ps->tx_frames,
2787 		    "TxFrames", KSTAT_DATA_ULONGLONG);
2788 		kstat_named_init(&ps->rx_frames,
2789 		    "RxFrames", KSTAT_DATA_ULONGLONG);
2790 		kstat_named_init(&ps->tx_words,
2791 		    "TxWords", KSTAT_DATA_ULONGLONG);
2792 		kstat_named_init(&ps->rx_words,
2793 		    "RxWords", KSTAT_DATA_ULONGLONG);
2794 		kstat_named_init(&ps->invalid_dword_count,
2795 		    "InvalidDwordCount", KSTAT_DATA_ULONGLONG);
2796 		kstat_named_init(&ps->running_disparity_error_count,
2797 		    "RunningDisparityErrorCount", KSTAT_DATA_ULONGLONG);
2798 		kstat_named_init(&ps->loss_of_dword_sync_count,
2799 		    "LossofDwordSyncCount", KSTAT_DATA_ULONGLONG);
2800 		kstat_named_init(&ps->phy_reset_problem_count,
2801 		    "PhyResetProblemCount", KSTAT_DATA_ULONGLONG);
2802 
2803 		phyp->phy_stats->ks_private = phyp;
2804 		phyp->phy_stats->ks_update = pmcs_update_phy_stats;
2805 		kstat_install(phyp->phy_stats);
2806 		pmcs_unlock_phy(phyp);
2807 	}
2808 
2809 	mutex_exit(&iport->lock);
2810 }
2811 
2812 int
2813 pmcs_update_phy_stats(kstat_t *ks, int rw)
2814 {
2815 	int		val, ret = DDI_FAILURE;
2816 	pmcs_phy_t	*pptr = (pmcs_phy_t *)ks->ks_private;
2817 	pmcs_hw_t	*pwp = pptr->pwp;
2818 	sas_phy_stats_t	*ps = ks->ks_data;
2819 
2820 	_NOTE(ARGUNUSED(rw));
2821 	ASSERT((pptr != NULL) && (pwp != NULL));
2822 
2823 	/*
2824 	 * We just want to lock against other invocations of kstat;
2825 	 * we don't need to pmcs_lock_phy() for this.
2826 	 */
2827 	mutex_enter(&pptr->phy_lock);
2828 
2829 	/* Get Stats from Chip */
2830 	val = pmcs_get_diag_report(pwp, PMCS_INVALID_DWORD_CNT, pptr->phynum);
2831 	if (val == DDI_FAILURE)
2832 		goto fail;
2833 	ps->invalid_dword_count.value.ull = (unsigned long long)val;
2834 
2835 	val = pmcs_get_diag_report(pwp, PMCS_DISPARITY_ERR_CNT, pptr->phynum);
2836 	if (val == DDI_FAILURE)
2837 		goto fail;
2838 	ps->running_disparity_error_count.value.ull = (unsigned long long)val;
2839 
2840 	val = pmcs_get_diag_report(pwp, PMCS_LOST_DWORD_SYNC_CNT, pptr->phynum);
2841 	if (val == DDI_FAILURE)
2842 		goto fail;
2843 	ps->loss_of_dword_sync_count.value.ull = (unsigned long long)val;
2844 
2845 	val = pmcs_get_diag_report(pwp, PMCS_RESET_FAILED_CNT, pptr->phynum);
2846 	if (val == DDI_FAILURE)
2847 		goto fail;
2848 	ps->phy_reset_problem_count.value.ull = (unsigned long long)val;
2849 
2850 	ret = DDI_SUCCESS;
2851 fail:
2852 	mutex_exit(&pptr->phy_lock);
2853 	return (ret);
2854 }
2855 
2856 static void
2857 pmcs_destroy_phy_stats(pmcs_iport_t *iport)
2858 {
2859 	pmcs_phy_t		*phyp;
2860 
2861 	ASSERT(iport != NULL);
2862 	mutex_enter(&iport->lock);
2863 	phyp = iport->pptr;
2864 	if (phyp == NULL) {
2865 		mutex_exit(&iport->lock);
2866 		return;
2867 	}
2868 
2869 	pmcs_lock_phy(phyp);
2870 	if (phyp->phy_stats != NULL) {
2871 		kstat_delete(phyp->phy_stats);
2872 		phyp->phy_stats = NULL;
2873 	}
2874 	pmcs_unlock_phy(phyp);
2875 
2876 	mutex_exit(&iport->lock);
2877 }
2878 
2879 /*ARGSUSED*/
2880 static int
2881 pmcs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
2882 {
2883 	/*
2884 	 * as the driver can always deal with an error in any dma or
2885 	 * access handle, we can just return the fme_status value.
2886 	 */
2887 	pci_ereport_post(dip, err, NULL);
2888 	return (err->fme_status);
2889 }
2890 
2891 static void
2892 pmcs_fm_init(pmcs_hw_t *pwp)
2893 {
2894 	ddi_iblock_cookie_t	fm_ibc;
2895 
2896 	/* Only register with IO Fault Services if we have some capability */
2897 	if (pwp->fm_capabilities) {
2898 		/* Adjust access and dma attributes for FMA */
2899 		pwp->reg_acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC;
2900 		pwp->dev_acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC;
2901 		pwp->iqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
2902 		pwp->oqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
2903 		pwp->cip_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
2904 		pwp->fwlog_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
2905 
2906 		/*
2907 		 * Register capabilities with IO Fault Services.
2908 		 */
2909 		ddi_fm_init(pwp->dip, &pwp->fm_capabilities, &fm_ibc);
2910 
2911 		/*
2912 		 * Initialize pci ereport capabilities if ereport
2913 		 * capable (should always be.)
2914 		 */
2915 		if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) ||
2916 		    DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
2917 			pci_ereport_setup(pwp->dip);
2918 		}
2919 
2920 		/*
2921 		 * Register error callback if error callback capable.
2922 		 */
2923 		if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
2924 			ddi_fm_handler_register(pwp->dip,
2925 			    pmcs_fm_error_cb, (void *) pwp);
2926 		}
2927 	}
2928 }
2929 
2930 static void
2931 pmcs_fm_fini(pmcs_hw_t *pwp)
2932 {
2933 	/* Only unregister FMA capabilities if registered */
2934 	if (pwp->fm_capabilities) {
2935 		/*
2936 		 * Un-register error callback if error callback capable.
2937 		 */
2938 		if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
2939 			ddi_fm_handler_unregister(pwp->dip);
2940 		}
2941 
2942 		/*
2943 		 * Release any resources allocated by pci_ereport_setup()
2944 		 */
2945 		if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) ||
2946 		    DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
2947 			pci_ereport_teardown(pwp->dip);
2948 		}
2949 
2950 		/* Unregister from IO Fault Services */
2951 		ddi_fm_fini(pwp->dip);
2952 
2953 		/* Adjust access and dma attributes for FMA */
2954 		pwp->reg_acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC;
2955 		pwp->dev_acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC;
2956 		pwp->iqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
2957 		pwp->oqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
2958 		pwp->cip_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
2959 		pwp->fwlog_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
2960 	}
2961 }
2962 
2963 static boolean_t
2964 pmcs_fabricate_wwid(pmcs_hw_t *pwp)
2965 {
2966 	char *cp, c;
2967 	uint64_t adr;
2968 	int i;
2969 
2970 	cp = &c;
2971 	(void) ddi_strtoul(hw_serial, &cp, 10, (unsigned long *)&adr);
2972 	if (adr == 0) {
2973 		static const char foo[] = __DATE__ __TIME__;
2974 		/* Oh, dear, we're toast */
2975 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
2976 		    "%s: No serial number available to fabricate WWN",
2977 		    __func__);
2978 		for (i = 0; foo[i]; i++) {
2979 			adr += foo[i];
2980 		}
2981 	}
2982 	adr <<= 8;
2983 	adr |= ((uint64_t)ddi_get_instance(pwp->dip) << 52);
2984 	adr |= (5ULL << 60);
2985 	for (i = 0; i < PMCS_MAX_PORTS; i++) {
2986 		pwp->sas_wwns[i] = adr + i;
2987 	}
2988 
2989 	return (B_TRUE);
2990 }
2991