1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  *
28  * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
29  * based chipsets.
30  *
31  * NCQ
32  * ---
33  *
34  * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
35  * and is likely to be revisited in the future.
36  *
37  *
38  * Power Management
39  * ----------------
40  *
41  * Normally power management would be responsible for ensuring the device
42  * is quiescent and then changing power states to the device, such as
43  * powering down parts or all of the device.  mcp5x/ck804 is unique in
44  * that it is only available as part of a larger southbridge chipset, so
45  * removing power to the device isn't possible.  Switches to control
46  * power management states D0/D3 in the PCI configuration space appear to
47  * be supported but changes to these states are apparently are ignored.
48  * The only further PM that the driver _could_ do is shut down the PHY,
49  * but in order to deliver the first rev of the driver sooner than later,
50  * that will be deferred until some future phase.
51  *
52  * Since the driver currently will not directly change any power state to
53  * the device, no power() entry point will be required.  However, it is
54  * possible that in ACPI power state S3, aka suspend to RAM, that power
55  * can be removed to the device, and the driver cannot rely on BIOS to
56  * have reset any state.  For the time being, there is no known
57  * non-default configurations that need to be programmed.  This judgement
58  * is based on the port of the legacy ata driver not having any such
59  * functionality and based on conversations with the PM team.  If such a
60  * restoration is later deemed necessary it can be incorporated into the
61  * DDI_RESUME processing.
62  *
63  */
64 
65 #include <sys/scsi/scsi.h>
66 #include <sys/pci.h>
67 #include <sys/byteorder.h>
68 #include <sys/sunddi.h>
69 #include <sys/sata/sata_hba.h>
70 #ifdef SGPIO_SUPPORT
71 #include <sys/sata/adapters/nv_sata/nv_sgpio.h>
72 #include <sys/devctl.h>
73 #include <sys/sdt.h>
74 #endif
75 #include <sys/sata/adapters/nv_sata/nv_sata.h>
76 #include <sys/disp.h>
77 #include <sys/note.h>
78 #include <sys/promif.h>
79 
80 
81 /*
82  * Function prototypes for driver entry points
83  */
84 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
85 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
86 static int nv_quiesce(dev_info_t *dip);
87 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
88     void *arg, void **result);
89 
90 /*
91  * Function prototypes for entry points from sata service module
92  * These functions are distinguished from other local functions
93  * by the prefix "nv_sata_"
94  */
95 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
96 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
97 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
98 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
99 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
100 
101 /*
102  * Local function prototypes
103  */
104 static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
105 static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
106 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
107 #ifdef NV_MSI_SUPPORTED
108 static int nv_add_msi_intrs(nv_ctl_t *nvc);
109 #endif
110 static void nv_rem_intrs(nv_ctl_t *nvc);
111 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
112 static int nv_start_nodata(nv_port_t *nvp, int slot);
113 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
114 static int nv_start_pio_in(nv_port_t *nvp, int slot);
115 static int nv_start_pio_out(nv_port_t *nvp, int slot);
116 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
117 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
118 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
119 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
120 static int nv_start_dma(nv_port_t *nvp, int slot);
121 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
122 static void nv_uninit_ctl(nv_ctl_t *nvc);
123 static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
124 static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
125 static void nv_uninit_port(nv_port_t *nvp);
126 static void nv_init_port(nv_port_t *nvp);
127 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
128 static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
129 #ifdef NCQ
130 static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131 #endif
132 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
133 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
134     int state);
135 static void nv_common_reg_init(nv_ctl_t *nvc);
136 static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
137 static void nv_reset(nv_port_t *nvp, char *reason);
138 static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
139 static void nv_timeout(void *);
140 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
141 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
142 static void nv_read_signature(nv_port_t *nvp);
143 static void mcp5x_set_intr(nv_port_t *nvp, int flag);
144 static void ck804_set_intr(nv_port_t *nvp, int flag);
145 static void nv_resume(nv_port_t *nvp);
146 static void nv_suspend(nv_port_t *nvp);
147 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
148 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
149     boolean_t reset);
150 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
151     sata_pkt_t *spkt);
152 static void nv_link_event(nv_port_t *nvp, int flags);
153 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
154 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
155     uchar_t failure_onbits2, uchar_t failure_offbits2,
156     uchar_t failure_onbits3, uchar_t failure_offbits3,
157     uint_t timeout_usec, int type_wait);
158 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
159     uint_t timeout_usec, int type_wait);
160 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
161 static void nv_setup_timeout(nv_port_t *nvp, clock_t microseconds);
162 static clock_t nv_monitor_reset(nv_port_t *nvp);
163 static int nv_bm_status_clear(nv_port_t *nvp);
164 static void nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...);
165 
166 #ifdef SGPIO_SUPPORT
167 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
168 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
169 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
170     cred_t *credp, int *rvalp);
171 
172 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
173 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
174     uint32_t *cbpp);
175 static int nv_sgp_init(nv_ctl_t *nvc);
176 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
177 static int nv_sgp_csr_read(nv_ctl_t *nvc);
178 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
179 static int nv_sgp_write_data(nv_ctl_t *nvc);
180 static void nv_sgp_activity_led_ctl(void *arg);
181 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
182 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
183 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
184 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
185 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
186 static void nv_sgp_cleanup(nv_ctl_t *nvc);
187 #endif
188 
189 
190 /*
191  * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
192  * Verify if needed if ported to other ISA.
193  */
194 static ddi_dma_attr_t buffer_dma_attr = {
195 	DMA_ATTR_V0,		/* dma_attr_version */
196 	0,			/* dma_attr_addr_lo: lowest bus address */
197 	0xffffffffull,		/* dma_attr_addr_hi: */
198 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
199 	4,			/* dma_attr_align */
200 	1,			/* dma_attr_burstsizes. */
201 	1,			/* dma_attr_minxfer */
202 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
203 	0xffffffffull,		/* dma_attr_seg */
204 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
205 	512,			/* dma_attr_granular */
206 	0,			/* dma_attr_flags */
207 };
208 static ddi_dma_attr_t buffer_dma_40bit_attr = {
209 	DMA_ATTR_V0,		/* dma_attr_version */
210 	0,			/* dma_attr_addr_lo: lowest bus address */
211 	0xffffffffffull,	/* dma_attr_addr_hi: */
212 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
213 	4,			/* dma_attr_align */
214 	1,			/* dma_attr_burstsizes. */
215 	1,			/* dma_attr_minxfer */
216 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
217 	0xffffffffull,		/* dma_attr_seg */
218 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
219 	512,			/* dma_attr_granular */
220 	0,			/* dma_attr_flags */
221 };
222 
223 
224 /*
225  * DMA attributes for PRD tables
226  */
227 ddi_dma_attr_t nv_prd_dma_attr = {
228 	DMA_ATTR_V0,		/* dma_attr_version */
229 	0,			/* dma_attr_addr_lo */
230 	0xffffffffull,		/* dma_attr_addr_hi */
231 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
232 	4,			/* dma_attr_align */
233 	1,			/* dma_attr_burstsizes */
234 	1,			/* dma_attr_minxfer */
235 	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
236 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
237 	1,			/* dma_attr_sgllen */
238 	1,			/* dma_attr_granular */
239 	0			/* dma_attr_flags */
240 };
241 
242 /*
243  * Device access attributes
244  */
245 static ddi_device_acc_attr_t accattr = {
246     DDI_DEVICE_ATTR_V0,
247     DDI_STRUCTURE_LE_ACC,
248     DDI_STRICTORDER_ACC
249 };
250 
251 
252 #ifdef SGPIO_SUPPORT
253 static struct cb_ops nv_cb_ops = {
254 	nv_open,		/* open */
255 	nv_close,		/* close */
256 	nodev,			/* strategy (block) */
257 	nodev,			/* print (block) */
258 	nodev,			/* dump (block) */
259 	nodev,			/* read */
260 	nodev,			/* write */
261 	nv_ioctl,		/* ioctl */
262 	nodev,			/* devmap */
263 	nodev,			/* mmap */
264 	nodev,			/* segmap */
265 	nochpoll,		/* chpoll */
266 	ddi_prop_op,		/* prop_op */
267 	NULL,			/* streams */
268 	D_NEW | D_MP |
269 	D_64BIT | D_HOTPLUG,	/* flags */
270 	CB_REV			/* rev */
271 };
272 #endif  /* SGPIO_SUPPORT */
273 
274 
275 static struct dev_ops nv_dev_ops = {
276 	DEVO_REV,		/* devo_rev */
277 	0,			/* refcnt  */
278 	nv_getinfo,		/* info */
279 	nulldev,		/* identify */
280 	nulldev,		/* probe */
281 	nv_attach,		/* attach */
282 	nv_detach,		/* detach */
283 	nodev,			/* no reset */
284 #ifdef SGPIO_SUPPORT
285 	&nv_cb_ops,		/* driver operations */
286 #else
287 	(struct cb_ops *)0,	/* driver operations */
288 #endif
289 	NULL,			/* bus operations */
290 	NULL,			/* power */
291 	nv_quiesce		/* quiesce */
292 };
293 
294 
295 /*
296  * Request Sense CDB for ATAPI
297  */
298 static const uint8_t nv_rqsense_cdb[16] = {
299 	SCMD_REQUEST_SENSE,
300 	0,
301 	0,
302 	0,
303 	SATA_ATAPI_MIN_RQSENSE_LEN,
304 	0,
305 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
306 };
307 
308 
309 static sata_tran_hotplug_ops_t nv_hotplug_ops;
310 
311 extern struct mod_ops mod_driverops;
312 
313 static  struct modldrv modldrv = {
314 	&mod_driverops,	/* driverops */
315 	"Nvidia ck804/mcp51/mcp55 HBA",
316 	&nv_dev_ops,	/* driver ops */
317 };
318 
319 static  struct modlinkage modlinkage = {
320 	MODREV_1,
321 	&modldrv,
322 	NULL
323 };
324 
325 /*
326  * Maximum number of consecutive interrupts processed in the loop in the
327  * single invocation of the port interrupt routine.
328  */
329 int nv_max_intr_loops = NV_MAX_INTR_PER_DEV;
330 
331 /*
332  * wait between checks of reg status
333  */
334 int nv_usec_delay = NV_WAIT_REG_CHECK;
335 
336 /*
337  * The following used for nv_vcmn_err() and nv_log()
338  */
339 
340 /*
341  * temp buffer to save from wasting limited stack space
342  */
343 static char nv_log_buf[NV_LOGBUF_LEN];
344 
345 /*
346  * protects nv_log_buf
347  */
348 static kmutex_t nv_log_mutex;
349 
350 /*
351  * these on-by-default flags were chosen so that the driver
352  * logs as much non-usual run-time information as possible
353  * without overflowing the ring with useless information or
354  * causing any significant performance penalty.
355  */
356 int nv_debug_flags =
357     NVDBG_HOT|NVDBG_RESET|NVDBG_ALWAYS|NVDBG_TIMEOUT|NVDBG_EVENT;
358 
359 /*
360  * normally debug information is not logged to the console
361  * but this allows it to be enabled.
362  */
363 int nv_log_to_console = B_FALSE;
364 
365 /*
366  * normally debug information is not logged to cmn_err but
367  * in some cases it may be desired.
368  */
369 int nv_log_to_cmn_err = B_FALSE;
370 
371 /*
372  * using prom print avoids using cmn_err/syslog and goes right
373  * to the console which may be desirable in some situations, but
374  * it may be synchronous, which would change timings and
375  * impact performance.  Use with caution.
376  */
377 int nv_prom_print = B_FALSE;
378 
379 /*
380  * Opaque state pointer to be initialized by ddi_soft_state_init()
381  */
382 static void *nv_statep	= NULL;
383 
384 /*
385  * Map from CBP to shared space
386  *
387  * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
388  * Control Block Pointer as well as the corresponding Control Block) that
389  * is shared across all driver instances associated with that part.  The
390  * Control Block is used to update and query the LED state for the devices
391  * on the controllers associated with those instances.  There is also some
392  * driver state (called the 'common' area here) associated with each SGPIO
393  * Control Block.  The nv_sgp_cpb2cmn is used to map a given CBP to its
394  * control area.
395  *
396  * The driver can also use this mapping array to determine whether the
397  * common area for a given CBP has been initialized, and, if it isn't
398  * initialized, initialize it.
399  *
400  * When a driver instance with a CBP value that is already in the array is
401  * initialized, it will use the pointer to the previously initialized common
402  * area associated with that SGPIO CBP value, rather than initialize it
403  * itself.
404  *
405  * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
406  */
407 #ifdef SGPIO_SUPPORT
408 static kmutex_t nv_sgp_c2c_mutex;
409 static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
410 #endif
411 
412 /*
413  * control whether 40bit DMA is used or not
414  */
415 int nv_sata_40bit_dma = B_TRUE;
416 
417 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
418 	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
419 	nv_sata_activate,	/* activate port. cfgadm -c connect */
420 	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
421 };
422 
423 
424 /*
425  *  nv module initialization
426  */
427 int
428 _init(void)
429 {
430 	int	error;
431 #ifdef SGPIO_SUPPORT
432 	int	i;
433 #endif
434 
435 	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
436 
437 	if (error != 0) {
438 
439 		return (error);
440 	}
441 
442 	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
443 #ifdef SGPIO_SUPPORT
444 	mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
445 
446 	for (i = 0; i < NV_MAX_CBPS; i++) {
447 		nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
448 		nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
449 	}
450 #endif
451 
452 	if ((error = sata_hba_init(&modlinkage)) != 0) {
453 		ddi_soft_state_fini(&nv_statep);
454 		mutex_destroy(&nv_log_mutex);
455 
456 		return (error);
457 	}
458 
459 	error = mod_install(&modlinkage);
460 	if (error != 0) {
461 		sata_hba_fini(&modlinkage);
462 		ddi_soft_state_fini(&nv_statep);
463 		mutex_destroy(&nv_log_mutex);
464 
465 		return (error);
466 	}
467 
468 	return (error);
469 }
470 
471 
472 /*
473  * nv module uninitialize
474  */
475 int
476 _fini(void)
477 {
478 	int	error;
479 
480 	error = mod_remove(&modlinkage);
481 
482 	if (error != 0) {
483 		return (error);
484 	}
485 
486 	/*
487 	 * remove the resources allocated in _init()
488 	 */
489 	mutex_destroy(&nv_log_mutex);
490 #ifdef SGPIO_SUPPORT
491 	mutex_destroy(&nv_sgp_c2c_mutex);
492 #endif
493 	sata_hba_fini(&modlinkage);
494 	ddi_soft_state_fini(&nv_statep);
495 
496 	return (error);
497 }
498 
499 
500 /*
501  * nv _info entry point
502  */
503 int
504 _info(struct modinfo *modinfop)
505 {
506 	return (mod_info(&modlinkage, modinfop));
507 }
508 
509 
510 /*
511  * these wrappers for ddi_{get,put}8 are for observability
512  * with dtrace
513  */
514 #ifdef DEBUG
515 
516 static void
517 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
518 {
519 	ddi_put8(handle, dev_addr, value);
520 }
521 
522 static void
523 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
524 {
525 	ddi_put32(handle, dev_addr, value);
526 }
527 
528 static uint32_t
529 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
530 {
531 	return (ddi_get32(handle, dev_addr));
532 }
533 
534 static void
535 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
536 {
537 	ddi_put16(handle, dev_addr, value);
538 }
539 
540 static uint16_t
541 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
542 {
543 	return (ddi_get16(handle, dev_addr));
544 }
545 
546 static uint8_t
547 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
548 {
549 	return (ddi_get8(handle, dev_addr));
550 }
551 
552 #else
553 
554 #define	nv_put8 ddi_put8
555 #define	nv_put32 ddi_put32
556 #define	nv_get32 ddi_get32
557 #define	nv_put16 ddi_put16
558 #define	nv_get16 ddi_get16
559 #define	nv_get8 ddi_get8
560 
561 #endif
562 
563 
564 /*
565  * Driver attach
566  */
567 static int
568 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
569 {
570 	int status, attach_state, intr_types, bar, i, j, command;
571 	int inst = ddi_get_instance(dip);
572 	ddi_acc_handle_t pci_conf_handle;
573 	nv_ctl_t *nvc;
574 	uint8_t subclass;
575 	uint32_t reg32;
576 #ifdef SGPIO_SUPPORT
577 	pci_regspec_t *regs;
578 	int rlen;
579 #endif
580 
581 	switch (cmd) {
582 
583 	case DDI_ATTACH:
584 
585 		attach_state = ATTACH_PROGRESS_NONE;
586 
587 		status = ddi_soft_state_zalloc(nv_statep, inst);
588 
589 		if (status != DDI_SUCCESS) {
590 			break;
591 		}
592 
593 		nvc = ddi_get_soft_state(nv_statep, inst);
594 
595 		nvc->nvc_dip = dip;
596 
597 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach(): DDI_ATTACH", NULL);
598 
599 		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
600 
601 		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
602 			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
603 			    PCI_CONF_REVID);
604 			NVLOG(NVDBG_INIT, nvc, NULL,
605 			    "inst %d: silicon revid is %x nv_debug_flags=%x",
606 			    inst, nvc->nvc_revid, nv_debug_flags);
607 		} else {
608 			break;
609 		}
610 
611 		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
612 
613 		/*
614 		 * Set the PCI command register: enable IO/MEM/Master.
615 		 */
616 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
617 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
618 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
619 
620 		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
621 
622 		if (subclass & PCI_MASS_RAID) {
623 			cmn_err(CE_WARN,
624 			    "attach failed: RAID mode not supported");
625 
626 			break;
627 		}
628 
629 		/*
630 		 * the 6 bars of the controller are:
631 		 * 0: port 0 task file
632 		 * 1: port 0 status
633 		 * 2: port 1 task file
634 		 * 3: port 1 status
635 		 * 4: bus master for both ports
636 		 * 5: extended registers for SATA features
637 		 */
638 		for (bar = 0; bar < 6; bar++) {
639 			status = ddi_regs_map_setup(dip, bar + 1,
640 			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
641 			    &nvc->nvc_bar_hdl[bar]);
642 
643 			if (status != DDI_SUCCESS) {
644 				NVLOG(NVDBG_INIT, nvc, NULL,
645 				    "ddi_regs_map_setup failure for bar"
646 				    " %d status = %d", bar, status);
647 				break;
648 			}
649 		}
650 
651 		attach_state |= ATTACH_PROGRESS_BARS;
652 
653 		/*
654 		 * initialize controller structures
655 		 */
656 		status = nv_init_ctl(nvc, pci_conf_handle);
657 
658 		if (status == NV_FAILURE) {
659 			NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl failed",
660 			    NULL);
661 
662 			break;
663 		}
664 
665 		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
666 
667 		/*
668 		 * initialize mutexes
669 		 */
670 		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
671 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
672 
673 		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
674 
675 		/*
676 		 * get supported interrupt types
677 		 */
678 		if (ddi_intr_get_supported_types(dip, &intr_types) !=
679 		    DDI_SUCCESS) {
680 			nv_cmn_err(CE_WARN, nvc, NULL,
681 			    "ddi_intr_get_supported_types failed");
682 
683 			break;
684 		}
685 
686 		NVLOG(NVDBG_INIT, nvc, NULL,
687 		    "ddi_intr_get_supported_types() returned: 0x%x",
688 		    intr_types);
689 
690 #ifdef NV_MSI_SUPPORTED
691 		if (intr_types & DDI_INTR_TYPE_MSI) {
692 			NVLOG(NVDBG_INIT, nvc, NULL,
693 			    "using MSI interrupt type", NULL);
694 
695 			/*
696 			 * Try MSI first, but fall back to legacy if MSI
697 			 * attach fails
698 			 */
699 			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
700 				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
701 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
702 				NVLOG(NVDBG_INIT, nvc, NULL,
703 				    "MSI interrupt setup done", NULL);
704 			} else {
705 				nv_cmn_err(CE_CONT, nvc, NULL,
706 				    "MSI registration failed "
707 				    "will try Legacy interrupts");
708 			}
709 		}
710 #endif
711 
712 		/*
713 		 * Either the MSI interrupt setup has failed or only
714 		 * the fixed interrupts are available on the system.
715 		 */
716 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
717 		    (intr_types & DDI_INTR_TYPE_FIXED)) {
718 
719 			NVLOG(NVDBG_INIT, nvc, NULL,
720 			    "using Legacy interrupt type", NULL);
721 
722 			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
723 				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
724 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
725 				NVLOG(NVDBG_INIT, nvc, NULL,
726 				    "Legacy interrupt setup done", NULL);
727 			} else {
728 				nv_cmn_err(CE_WARN, nvc, NULL,
729 				    "legacy interrupt setup failed");
730 				NVLOG(NVDBG_INIT, nvc, NULL,
731 				    "legacy interrupt setup failed", NULL);
732 				break;
733 			}
734 		}
735 
736 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
737 			NVLOG(NVDBG_INIT, nvc, NULL,
738 			    "no interrupts registered", NULL);
739 			break;
740 		}
741 
742 #ifdef SGPIO_SUPPORT
743 		/*
744 		 * save off the controller number
745 		 */
746 		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
747 		    "reg", (caddr_t)&regs, &rlen);
748 		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
749 		kmem_free(regs, rlen);
750 
751 		/*
752 		 * initialize SGPIO
753 		 */
754 		nv_sgp_led_init(nvc, pci_conf_handle);
755 #endif	/* SGPIO_SUPPORT */
756 
757 		/*
758 		 * Do initial reset so that signature can be gathered
759 		 */
760 		for (j = 0; j < NV_NUM_PORTS; j++) {
761 			ddi_acc_handle_t bar5_hdl;
762 			uint32_t sstatus;
763 			nv_port_t *nvp;
764 
765 			nvp = &(nvc->nvc_port[j]);
766 			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
767 			sstatus = ddi_get32(bar5_hdl, nvp->nvp_sstatus);
768 
769 			if (SSTATUS_GET_DET(sstatus) ==
770 			    SSTATUS_DET_DEVPRE_PHYCOM) {
771 
772 				nvp->nvp_state |= NV_ATTACH;
773 				nvp->nvp_type = SATA_DTYPE_UNKNOWN;
774 				mutex_enter(&nvp->nvp_mutex);
775 				nv_reset(nvp, "attach");
776 
777 				while (nvp->nvp_state & NV_RESET) {
778 					cv_wait(&nvp->nvp_reset_cv,
779 					    &nvp->nvp_mutex);
780 				}
781 
782 				mutex_exit(&nvp->nvp_mutex);
783 			}
784 		}
785 
786 		/*
787 		 * attach to sata module
788 		 */
789 		if (sata_hba_attach(nvc->nvc_dip,
790 		    &nvc->nvc_sata_hba_tran,
791 		    DDI_ATTACH) != DDI_SUCCESS) {
792 			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
793 
794 			break;
795 		}
796 
797 		pci_config_teardown(&pci_conf_handle);
798 
799 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS", NULL);
800 
801 		return (DDI_SUCCESS);
802 
803 	case DDI_RESUME:
804 
805 		nvc = ddi_get_soft_state(nv_statep, inst);
806 
807 		NVLOG(NVDBG_INIT, nvc, NULL,
808 		    "nv_attach(): DDI_RESUME inst %d", inst);
809 
810 		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
811 			return (DDI_FAILURE);
812 		}
813 
814 		/*
815 		 * Set the PCI command register: enable IO/MEM/Master.
816 		 */
817 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
818 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
819 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
820 
821 		/*
822 		 * Need to set bit 2 to 1 at config offset 0x50
823 		 * to enable access to the bar5 registers.
824 		 */
825 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
826 
827 		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
828 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
829 			    reg32 | NV_BAR5_SPACE_EN);
830 		}
831 
832 		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
833 
834 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
835 			nv_resume(&(nvc->nvc_port[i]));
836 		}
837 
838 		pci_config_teardown(&pci_conf_handle);
839 
840 		return (DDI_SUCCESS);
841 
842 	default:
843 		return (DDI_FAILURE);
844 	}
845 
846 
847 	/*
848 	 * DDI_ATTACH failure path starts here
849 	 */
850 
851 	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
852 		nv_rem_intrs(nvc);
853 	}
854 
855 	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
856 		/*
857 		 * Remove timers
858 		 */
859 		int port = 0;
860 		nv_port_t *nvp;
861 
862 		for (; port < NV_MAX_PORTS(nvc); port++) {
863 			nvp = &(nvc->nvc_port[port]);
864 			if (nvp->nvp_timeout_id != 0) {
865 				(void) untimeout(nvp->nvp_timeout_id);
866 			}
867 		}
868 	}
869 
870 	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
871 		mutex_destroy(&nvc->nvc_mutex);
872 	}
873 
874 	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
875 		nv_uninit_ctl(nvc);
876 	}
877 
878 	if (attach_state & ATTACH_PROGRESS_BARS) {
879 		while (--bar >= 0) {
880 			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
881 		}
882 	}
883 
884 	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
885 		ddi_soft_state_free(nv_statep, inst);
886 	}
887 
888 	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
889 		pci_config_teardown(&pci_conf_handle);
890 	}
891 
892 	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
893 
894 	return (DDI_FAILURE);
895 }
896 
897 
898 static int
899 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
900 {
901 	int i, port, inst = ddi_get_instance(dip);
902 	nv_ctl_t *nvc;
903 	nv_port_t *nvp;
904 
905 	nvc = ddi_get_soft_state(nv_statep, inst);
906 
907 	switch (cmd) {
908 
909 	case DDI_DETACH:
910 
911 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH", NULL);
912 
913 		/*
914 		 * Remove interrupts
915 		 */
916 		nv_rem_intrs(nvc);
917 
918 		/*
919 		 * Remove timers
920 		 */
921 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
922 			nvp = &(nvc->nvc_port[port]);
923 			if (nvp->nvp_timeout_id != 0) {
924 				(void) untimeout(nvp->nvp_timeout_id);
925 			}
926 		}
927 
928 		/*
929 		 * Remove maps
930 		 */
931 		for (i = 0; i < 6; i++) {
932 			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
933 		}
934 
935 		/*
936 		 * Destroy mutexes
937 		 */
938 		mutex_destroy(&nvc->nvc_mutex);
939 
940 		/*
941 		 * Uninitialize the controller structures
942 		 */
943 		nv_uninit_ctl(nvc);
944 
945 #ifdef SGPIO_SUPPORT
946 		/*
947 		 * release SGPIO resources
948 		 */
949 		nv_sgp_cleanup(nvc);
950 #endif
951 
952 		/*
953 		 * unregister from the sata module
954 		 */
955 		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
956 
957 		/*
958 		 * Free soft state
959 		 */
960 		ddi_soft_state_free(nv_statep, inst);
961 
962 		return (DDI_SUCCESS);
963 
964 	case DDI_SUSPEND:
965 
966 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND", NULL);
967 
968 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
969 			nv_suspend(&(nvc->nvc_port[i]));
970 		}
971 
972 		nvc->nvc_state |= NV_CTRL_SUSPEND;
973 
974 		return (DDI_SUCCESS);
975 
976 	default:
977 		return (DDI_FAILURE);
978 	}
979 }
980 
981 
982 /*ARGSUSED*/
983 static int
984 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
985 {
986 	nv_ctl_t *nvc;
987 	int instance;
988 	dev_t dev;
989 
990 	dev = (dev_t)arg;
991 	instance = getminor(dev);
992 
993 	switch (infocmd) {
994 	case DDI_INFO_DEVT2DEVINFO:
995 		nvc = ddi_get_soft_state(nv_statep,  instance);
996 		if (nvc != NULL) {
997 			*result = nvc->nvc_dip;
998 			return (DDI_SUCCESS);
999 		} else {
1000 			*result = NULL;
1001 			return (DDI_FAILURE);
1002 		}
1003 	case DDI_INFO_DEVT2INSTANCE:
1004 		*(int *)result = instance;
1005 		break;
1006 	default:
1007 		break;
1008 	}
1009 	return (DDI_SUCCESS);
1010 }
1011 
1012 
1013 #ifdef SGPIO_SUPPORT
1014 /* ARGSUSED */
1015 static int
1016 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1017 {
1018 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
1019 
1020 	if (nvc == NULL) {
1021 		return (ENXIO);
1022 	}
1023 
1024 	return (0);
1025 }
1026 
1027 
1028 /* ARGSUSED */
1029 static int
1030 nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
1031 {
1032 	return (0);
1033 }
1034 
1035 
1036 /* ARGSUSED */
1037 static int
1038 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1039 {
1040 	nv_ctl_t *nvc;
1041 	int inst;
1042 	int status;
1043 	int ctlr, port;
1044 	int drive;
1045 	uint8_t curr_led;
1046 	struct dc_led_ctl led;
1047 
1048 	inst = getminor(dev);
1049 	if (inst == -1) {
1050 		return (EBADF);
1051 	}
1052 
1053 	nvc = ddi_get_soft_state(nv_statep, inst);
1054 	if (nvc == NULL) {
1055 		return (EBADF);
1056 	}
1057 
1058 	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
1059 		return (EIO);
1060 	}
1061 
1062 	switch (cmd) {
1063 	case DEVCTL_SET_LED:
1064 		status = ddi_copyin((void *)arg, &led,
1065 		    sizeof (struct dc_led_ctl), mode);
1066 		if (status != 0)
1067 			return (EFAULT);
1068 
1069 		/*
1070 		 * Since only the first two controller currently support
1071 		 * SGPIO (as per NVIDIA docs), this code will as well.
1072 		 * Note that this validate the port value within led_state
1073 		 * as well.
1074 		 */
1075 
1076 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1077 		if ((ctlr != 0) && (ctlr != 1))
1078 			return (ENXIO);
1079 
1080 		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1081 		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
1082 			return (EINVAL);
1083 		}
1084 
1085 		drive = led.led_number;
1086 
1087 		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1088 		    (led.led_state == DCL_STATE_OFF)) {
1089 
1090 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1091 				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1092 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1093 				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1094 			} else {
1095 				return (ENXIO);
1096 			}
1097 
1098 			port = SGP_DRV_TO_PORT(led.led_number);
1099 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1100 		}
1101 
1102 		if (led.led_ctl_active == DCL_CNTRL_ON) {
1103 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1104 				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1105 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1106 				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1107 			} else {
1108 				return (ENXIO);
1109 			}
1110 
1111 			port = SGP_DRV_TO_PORT(led.led_number);
1112 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1113 		}
1114 
1115 		break;
1116 
1117 	case DEVCTL_GET_LED:
1118 		status = ddi_copyin((void *)arg, &led,
1119 		    sizeof (struct dc_led_ctl), mode);
1120 		if (status != 0)
1121 			return (EFAULT);
1122 
1123 		/*
1124 		 * Since only the first two controller currently support
1125 		 * SGPIO (as per NVIDIA docs), this code will as well.
1126 		 * Note that this validate the port value within led_state
1127 		 * as well.
1128 		 */
1129 
1130 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1131 		if ((ctlr != 0) && (ctlr != 1))
1132 			return (ENXIO);
1133 
1134 		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1135 		    led.led_number);
1136 
1137 		port = SGP_DRV_TO_PORT(led.led_number);
1138 		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1139 			led.led_ctl_active = DCL_CNTRL_ON;
1140 
1141 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1142 				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1143 					led.led_state = DCL_STATE_OFF;
1144 				else
1145 					led.led_state = DCL_STATE_ON;
1146 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1147 				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1148 					led.led_state = DCL_STATE_OFF;
1149 				else
1150 					led.led_state = DCL_STATE_ON;
1151 			} else {
1152 				return (ENXIO);
1153 			}
1154 		} else {
1155 			led.led_ctl_active = DCL_CNTRL_OFF;
1156 			/*
1157 			 * Not really off, but never set and no constant for
1158 			 * tri-state
1159 			 */
1160 			led.led_state = DCL_STATE_OFF;
1161 		}
1162 
1163 		status = ddi_copyout(&led, (void *)arg,
1164 		    sizeof (struct dc_led_ctl), mode);
1165 		if (status != 0)
1166 			return (EFAULT);
1167 
1168 		break;
1169 
1170 	case DEVCTL_NUM_LEDS:
1171 		led.led_number = SGPIO_DRV_CNT_VALUE;
1172 		led.led_ctl_active = 1;
1173 		led.led_type = 3;
1174 
1175 		/*
1176 		 * According to documentation, NVIDIA SGPIO is supposed to
1177 		 * support blinking, but it does not seem to work in practice.
1178 		 */
1179 		led.led_state = DCL_STATE_ON;
1180 
1181 		status = ddi_copyout(&led, (void *)arg,
1182 		    sizeof (struct dc_led_ctl), mode);
1183 		if (status != 0)
1184 			return (EFAULT);
1185 
1186 		break;
1187 
1188 	default:
1189 		return (EINVAL);
1190 	}
1191 
1192 	return (0);
1193 }
1194 #endif	/* SGPIO_SUPPORT */
1195 
1196 
1197 /*
1198  * Called by sata module to probe a port.  Port and device state
1199  * are not changed here... only reported back to the sata module.
1200  *
1201  */
1202 static int
1203 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1204 {
1205 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1206 	uint8_t cport = sd->satadev_addr.cport;
1207 	uint8_t pmport = sd->satadev_addr.pmport;
1208 	uint8_t qual = sd->satadev_addr.qual;
1209 	uint8_t det;
1210 
1211 	nv_port_t *nvp;
1212 
1213 	if (cport >= NV_MAX_PORTS(nvc)) {
1214 		sd->satadev_type = SATA_DTYPE_NONE;
1215 		sd->satadev_state = SATA_STATE_UNKNOWN;
1216 
1217 		return (SATA_FAILURE);
1218 	}
1219 
1220 	ASSERT(nvc->nvc_port != NULL);
1221 	nvp = &(nvc->nvc_port[cport]);
1222 	ASSERT(nvp != NULL);
1223 
1224 	NVLOG(NVDBG_ENTRY, nvc, nvp,
1225 	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1226 	    "qual: 0x%x", cport, pmport, qual);
1227 
1228 	mutex_enter(&nvp->nvp_mutex);
1229 
1230 	/*
1231 	 * This check seems to be done in the SATA module.
1232 	 * It may not be required here
1233 	 */
1234 	if (nvp->nvp_state & NV_DEACTIVATED) {
1235 		nv_cmn_err(CE_WARN, nvc, nvp,
1236 		    "port inactive.  Use cfgadm to activate");
1237 		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1238 		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1239 		mutex_exit(&nvp->nvp_mutex);
1240 
1241 		return (SATA_SUCCESS);
1242 	}
1243 
1244 	if (nvp->nvp_state & NV_FAILED) {
1245 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
1246 		    "probe: port failed", NULL);
1247 		sd->satadev_type = nvp->nvp_type;
1248 		sd->satadev_state = SATA_PSTATE_FAILED;
1249 		mutex_exit(&nvp->nvp_mutex);
1250 
1251 		return (SATA_SUCCESS);
1252 	}
1253 
1254 	if (qual == SATA_ADDR_PMPORT) {
1255 		sd->satadev_type = SATA_DTYPE_NONE;
1256 		sd->satadev_state = SATA_STATE_UNKNOWN;
1257 		mutex_exit(&nvp->nvp_mutex);
1258 		nv_cmn_err(CE_WARN, nvc, nvp,
1259 		    "controller does not support port multiplier");
1260 
1261 		return (SATA_SUCCESS);
1262 	}
1263 
1264 	sd->satadev_state = SATA_PSTATE_PWRON;
1265 
1266 	nv_copy_registers(nvp, sd, NULL);
1267 
1268 	if (nvp->nvp_state & (NV_RESET|NV_LINK_EVENT)) {
1269 		/*
1270 		 * during a reset or link event, fake the status
1271 		 * as it may be changing as a result of the reset
1272 		 * or link event.
1273 		 */
1274 		DTRACE_PROBE(state_reset_link_event_faking_status_p);
1275 		DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
1276 
1277 		SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1278 		    SSTATUS_IPM_ACTIVE);
1279 		SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1280 		    SSTATUS_DET_DEVPRE_PHYCOM);
1281 		sd->satadev_type = nvp->nvp_type;
1282 		mutex_exit(&nvp->nvp_mutex);
1283 
1284 		return (SATA_SUCCESS);
1285 	}
1286 
1287 	det = SSTATUS_GET_DET(sd->satadev_scr.sstatus);
1288 
1289 	/*
1290 	 * determine link status
1291 	 */
1292 	if (det != SSTATUS_DET_DEVPRE_PHYCOM) {
1293 		switch (det) {
1294 
1295 		case SSTATUS_DET_NODEV:
1296 		case SSTATUS_DET_PHYOFFLINE:
1297 			sd->satadev_type = SATA_DTYPE_NONE;
1298 			break;
1299 
1300 		default:
1301 			sd->satadev_type = SATA_DTYPE_UNKNOWN;
1302 			break;
1303 		}
1304 
1305 		mutex_exit(&nvp->nvp_mutex);
1306 
1307 		return (SATA_SUCCESS);
1308 	}
1309 
1310 	/*
1311 	 * Just report the current port state
1312 	 */
1313 	sd->satadev_type = nvp->nvp_type;
1314 	DTRACE_PROBE1(nvp_type_h, int, nvp->nvp_type);
1315 
1316 	mutex_exit(&nvp->nvp_mutex);
1317 
1318 	return (SATA_SUCCESS);
1319 }
1320 
1321 
1322 /*
1323  * Called by sata module to start a new command.
1324  */
1325 static int
1326 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1327 {
1328 	int cport = spkt->satapkt_device.satadev_addr.cport;
1329 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1330 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1331 	int ret;
1332 
1333 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1334 	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg);
1335 
1336 	mutex_enter(&nvp->nvp_mutex);
1337 
1338 	if (nvp->nvp_state & NV_DEACTIVATED) {
1339 
1340 		NVLOG(NVDBG_ERRS, nvc, nvp,
1341 		    "nv_sata_start: NV_DEACTIVATED", NULL);
1342 		DTRACE_PROBE(nvp_state_inactive_p);
1343 
1344 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1345 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1346 		mutex_exit(&nvp->nvp_mutex);
1347 
1348 		return (SATA_TRAN_PORT_ERROR);
1349 	}
1350 
1351 	if (nvp->nvp_state & NV_FAILED) {
1352 
1353 		NVLOG(NVDBG_ERRS, nvc, nvp,
1354 		    "nv_sata_start: NV_FAILED state", NULL);
1355 		DTRACE_PROBE(nvp_state_failed_p);
1356 
1357 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1358 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1359 		mutex_exit(&nvp->nvp_mutex);
1360 
1361 		return (SATA_TRAN_PORT_ERROR);
1362 	}
1363 
1364 	if (nvp->nvp_state & NV_RESET) {
1365 
1366 		NVLOG(NVDBG_ERRS, nvc, nvp,
1367 		    "still waiting for reset completion", NULL);
1368 		DTRACE_PROBE(nvp_state_reset_p);
1369 
1370 		spkt->satapkt_reason = SATA_PKT_BUSY;
1371 
1372 		/*
1373 		 * If in panic, timeouts do not occur, so invoke
1374 		 * reset handling directly so that the signature
1375 		 * can be acquired to complete the reset handling.
1376 		 */
1377 		if (ddi_in_panic()) {
1378 			NVLOG(NVDBG_ERRS, nvc, nvp,
1379 			    "nv_sata_start: calling nv_monitor_reset "
1380 			    "synchronously", NULL);
1381 
1382 			(void) nv_monitor_reset(nvp);
1383 		}
1384 
1385 		mutex_exit(&nvp->nvp_mutex);
1386 
1387 		return (SATA_TRAN_BUSY);
1388 	}
1389 
1390 	if (nvp->nvp_state & NV_LINK_EVENT) {
1391 
1392 		NVLOG(NVDBG_ERRS, nvc, nvp,
1393 		    "nv_sata_start(): link event ret bsy", NULL);
1394 		DTRACE_PROBE(nvp_state_link_event_p);
1395 
1396 		spkt->satapkt_reason = SATA_PKT_BUSY;
1397 
1398 		if (ddi_in_panic()) {
1399 			NVLOG(NVDBG_ERRS, nvc, nvp,
1400 			    "nv_sata_start: calling nv_timeout "
1401 			    "synchronously", NULL);
1402 
1403 			nv_timeout(nvp);
1404 		}
1405 
1406 		mutex_exit(&nvp->nvp_mutex);
1407 
1408 		return (SATA_TRAN_BUSY);
1409 	}
1410 
1411 
1412 	if ((nvp->nvp_type == SATA_DTYPE_NONE) ||
1413 	    (nvp->nvp_type == SATA_DTYPE_UNKNOWN)) {
1414 
1415 		NVLOG(NVDBG_ERRS, nvc, nvp,
1416 		    "nv_sata_start: nvp_type 0x%x", nvp->nvp_type);
1417 		DTRACE_PROBE1(not_ready_nvp_type_h, int, nvp->nvp_type);
1418 
1419 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1420 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1421 		mutex_exit(&nvp->nvp_mutex);
1422 
1423 		return (SATA_TRAN_PORT_ERROR);
1424 	}
1425 
1426 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1427 
1428 		nv_cmn_err(CE_WARN, nvc, nvp,
1429 		    "port multiplier not supported by controller");
1430 
1431 		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1432 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1433 		mutex_exit(&nvp->nvp_mutex);
1434 
1435 		return (SATA_TRAN_CMD_UNSUPPORTED);
1436 	}
1437 
1438 	/*
1439 	 * after a device reset, and then when sata module restore processing
1440 	 * is complete, the sata module will set sata_clear_dev_reset which
1441 	 * indicates that restore processing has completed and normal
1442 	 * non-restore related commands should be processed.
1443 	 */
1444 	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1445 
1446 		NVLOG(NVDBG_RESET, nvc, nvp,
1447 		    "nv_sata_start: clearing NV_RESTORE", NULL);
1448 		DTRACE_PROBE(clearing_restore_p);
1449 		DTRACE_PROBE1(nvp_state_before_clear_h, int, nvp->nvp_state);
1450 
1451 		nvp->nvp_state &= ~NV_RESTORE;
1452 	}
1453 
1454 	/*
1455 	 * if the device was recently reset as indicated by NV_RESTORE,
1456 	 * only allow commands which restore device state.  The sata module
1457 	 * marks such commands with sata_ignore_dev_reset.
1458 	 *
1459 	 * during coredump, nv_reset is called but the restore isn't
1460 	 * processed, so ignore the wait for restore if the system
1461 	 * is panicing.
1462 	 */
1463 	if ((nvp->nvp_state & NV_RESTORE) &&
1464 	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1465 	    (ddi_in_panic() == 0)) {
1466 
1467 		NVLOG(NVDBG_RESET, nvc, nvp,
1468 		    "nv_sata_start: waiting for restore ", NULL);
1469 		DTRACE_PROBE1(restore_no_ignore_reset_nvp_state_h,
1470 		    int, nvp->nvp_state);
1471 
1472 		spkt->satapkt_reason = SATA_PKT_BUSY;
1473 		mutex_exit(&nvp->nvp_mutex);
1474 
1475 		return (SATA_TRAN_BUSY);
1476 	}
1477 
1478 	if (nvp->nvp_state & NV_ABORTING) {
1479 
1480 		NVLOG(NVDBG_ERRS, nvc, nvp,
1481 		    "nv_sata_start: NV_ABORTING", NULL);
1482 		DTRACE_PROBE1(aborting_nvp_state_h, int, nvp->nvp_state);
1483 
1484 		spkt->satapkt_reason = SATA_PKT_BUSY;
1485 		mutex_exit(&nvp->nvp_mutex);
1486 
1487 		return (SATA_TRAN_BUSY);
1488 	}
1489 
1490 	/*
1491 	 * record command sequence for debugging.
1492 	 */
1493 	nvp->nvp_seq++;
1494 
1495 	DTRACE_PROBE2(command_start, int *, nvp, int,
1496 	    spkt->satapkt_cmd.satacmd_cmd_reg);
1497 
1498 	/*
1499 	 * clear SError to be able to check errors after the command failure
1500 	 */
1501 	nv_put32(nvp->nvp_ctlp->nvc_bar_hdl[5], nvp->nvp_serror, 0xffffffff);
1502 
1503 	if (spkt->satapkt_op_mode &
1504 	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1505 
1506 		ret = nv_start_sync(nvp, spkt);
1507 
1508 		mutex_exit(&nvp->nvp_mutex);
1509 
1510 		return (ret);
1511 	}
1512 
1513 	/*
1514 	 * start command asynchronous command
1515 	 */
1516 	ret = nv_start_async(nvp, spkt);
1517 
1518 	mutex_exit(&nvp->nvp_mutex);
1519 
1520 	return (ret);
1521 }
1522 
1523 
1524 /*
1525  * SATA_OPMODE_POLLING implies the driver is in a
1526  * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1527  * If only SATA_OPMODE_SYNCH is set, the driver can use
1528  * interrupts and sleep wait on a cv.
1529  *
1530  * If SATA_OPMODE_POLLING is set, the driver can't use
1531  * interrupts and must busy wait and simulate the
1532  * interrupts by waiting for BSY to be cleared.
1533  *
1534  * Synchronous mode has to return BUSY if there are
1535  * any other commands already on the drive.
1536  */
1537 static int
1538 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1539 {
1540 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1541 	int ret;
1542 
1543 	NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry",
1544 	    NULL);
1545 
1546 	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1547 		spkt->satapkt_reason = SATA_PKT_BUSY;
1548 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1549 		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1550 		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1551 		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1552 		    (&(nvp->nvp_slot[0]))->nvslot_spkt);
1553 
1554 		return (SATA_TRAN_BUSY);
1555 	}
1556 
1557 	/*
1558 	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1559 	 */
1560 	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1561 	    servicing_interrupt()) {
1562 		spkt->satapkt_reason = SATA_PKT_BUSY;
1563 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1564 		    "SYNC mode not allowed during interrupt", NULL);
1565 
1566 		return (SATA_TRAN_BUSY);
1567 
1568 	}
1569 
1570 	/*
1571 	 * disable interrupt generation if in polled mode
1572 	 */
1573 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1574 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1575 	}
1576 
1577 	/*
1578 	 * overload the satapkt_reason with BUSY so code below
1579 	 * will know when it's done
1580 	 */
1581 	spkt->satapkt_reason = SATA_PKT_BUSY;
1582 
1583 	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1584 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1585 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1586 		}
1587 
1588 		return (ret);
1589 	}
1590 
1591 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1592 		mutex_exit(&nvp->nvp_mutex);
1593 		ret = nv_poll_wait(nvp, spkt);
1594 		mutex_enter(&nvp->nvp_mutex);
1595 
1596 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1597 
1598 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1599 		    " done % reason %d", ret);
1600 
1601 		return (ret);
1602 	}
1603 
1604 	/*
1605 	 * non-polling synchronous mode handling.  The interrupt will signal
1606 	 * when device IO is completed.
1607 	 */
1608 	while (spkt->satapkt_reason == SATA_PKT_BUSY) {
1609 		cv_wait(&nvp->nvp_sync_cv, &nvp->nvp_mutex);
1610 	}
1611 
1612 
1613 	NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1614 	    " done % reason %d", spkt->satapkt_reason);
1615 
1616 	return (SATA_TRAN_ACCEPTED);
1617 }
1618 
1619 
1620 static int
1621 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1622 {
1623 	int ret;
1624 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1625 #if ! defined(__lock_lint)
1626 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1627 #endif
1628 
1629 	NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter", NULL);
1630 
1631 	for (;;) {
1632 
1633 		NV_DELAY_NSEC(400);
1634 
1635 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait",
1636 		    NULL);
1637 		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1638 		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1639 			mutex_enter(&nvp->nvp_mutex);
1640 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1641 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1642 			nv_reset(nvp, "poll_wait");
1643 			nv_complete_io(nvp, spkt, 0);
1644 			mutex_exit(&nvp->nvp_mutex);
1645 			NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1646 			    "SATA_STATUS_BSY", NULL);
1647 
1648 			return (SATA_TRAN_ACCEPTED);
1649 		}
1650 
1651 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr",
1652 		    NULL);
1653 
1654 		/*
1655 		 * Simulate interrupt.
1656 		 */
1657 		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1658 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr",
1659 		    NULL);
1660 
1661 		if (ret != DDI_INTR_CLAIMED) {
1662 			NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1663 			    " unclaimed -- resetting", NULL);
1664 			mutex_enter(&nvp->nvp_mutex);
1665 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1666 			nv_reset(nvp, "poll_wait intr not claimed");
1667 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1668 			nv_complete_io(nvp, spkt, 0);
1669 			mutex_exit(&nvp->nvp_mutex);
1670 
1671 			return (SATA_TRAN_ACCEPTED);
1672 		}
1673 
1674 #if ! defined(__lock_lint)
1675 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1676 			/*
1677 			 * packet is complete
1678 			 */
1679 			return (SATA_TRAN_ACCEPTED);
1680 		}
1681 #endif
1682 	}
1683 	/*NOTREACHED*/
1684 }
1685 
1686 
1687 /*
1688  * Called by sata module to abort outstanding packets.
1689  */
1690 /*ARGSUSED*/
1691 static int
1692 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1693 {
1694 	int cport = spkt->satapkt_device.satadev_addr.cport;
1695 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1696 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1697 	int c_a, ret;
1698 
1699 	ASSERT(cport < NV_MAX_PORTS(nvc));
1700 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt);
1701 
1702 	mutex_enter(&nvp->nvp_mutex);
1703 
1704 	if (nvp->nvp_state & NV_DEACTIVATED) {
1705 		mutex_exit(&nvp->nvp_mutex);
1706 		nv_cmn_err(CE_WARN, nvc, nvp,
1707 		    "abort request failed: port inactive");
1708 
1709 		return (SATA_FAILURE);
1710 	}
1711 
1712 	/*
1713 	 * spkt == NULL then abort all commands
1714 	 */
1715 	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED, B_TRUE);
1716 
1717 	if (c_a) {
1718 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1719 		    "packets aborted running=%d", c_a);
1720 		ret = SATA_SUCCESS;
1721 	} else {
1722 		if (spkt == NULL) {
1723 			NVLOG(NVDBG_ENTRY, nvc, nvp, "no spkts to abort", NULL);
1724 		} else {
1725 			NVLOG(NVDBG_ENTRY, nvc, nvp,
1726 			    "can't find spkt to abort", NULL);
1727 		}
1728 		ret = SATA_FAILURE;
1729 	}
1730 
1731 	mutex_exit(&nvp->nvp_mutex);
1732 
1733 	return (ret);
1734 }
1735 
1736 
1737 /*
1738  * if spkt == NULL abort all pkts running, otherwise
1739  * abort the requested packet.  must be called with nv_mutex
1740  * held and returns with it held.  Not NCQ aware.
1741  */
1742 static int
1743 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
1744     boolean_t reset)
1745 {
1746 	int aborted = 0, i, reset_once = B_FALSE;
1747 	struct nv_slot *nv_slotp;
1748 	sata_pkt_t *spkt_slot;
1749 
1750 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1751 
1752 	NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active", NULL);
1753 
1754 	nvp->nvp_state |= NV_ABORTING;
1755 
1756 	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1757 
1758 		nv_slotp = &(nvp->nvp_slot[i]);
1759 		spkt_slot = nv_slotp->nvslot_spkt;
1760 
1761 		/*
1762 		 * skip if not active command in slot
1763 		 */
1764 		if (spkt_slot == NULL) {
1765 			continue;
1766 		}
1767 
1768 		/*
1769 		 * if a specific packet was requested, skip if
1770 		 * this is not a match
1771 		 */
1772 		if ((spkt != NULL) && (spkt != spkt_slot)) {
1773 			continue;
1774 		}
1775 
1776 		/*
1777 		 * stop the hardware.  This could need reworking
1778 		 * when NCQ is enabled in the driver.
1779 		 */
1780 		if (reset_once == B_FALSE) {
1781 			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1782 
1783 			/*
1784 			 * stop DMA engine
1785 			 */
1786 			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1787 
1788 			/*
1789 			 * Reset only if explicitly specified by the arg reset
1790 			 */
1791 			if (reset == B_TRUE) {
1792 				reset_once = B_TRUE;
1793 				nv_reset(nvp, "abort_active");
1794 			}
1795 		}
1796 
1797 		spkt_slot->satapkt_reason = abort_reason;
1798 		nv_complete_io(nvp, spkt_slot, i);
1799 		aborted++;
1800 	}
1801 
1802 	nvp->nvp_state &= ~NV_ABORTING;
1803 
1804 	return (aborted);
1805 }
1806 
1807 
1808 /*
1809  * Called by sata module to reset a port, device, or the controller.
1810  */
1811 static int
1812 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1813 {
1814 	int cport = sd->satadev_addr.cport;
1815 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1816 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1817 	int ret = SATA_FAILURE;
1818 
1819 	ASSERT(cport < NV_MAX_PORTS(nvc));
1820 
1821 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_reset", NULL);
1822 
1823 	mutex_enter(&nvp->nvp_mutex);
1824 
1825 	switch (sd->satadev_addr.qual) {
1826 
1827 	case SATA_ADDR_CPORT:
1828 		/*FALLTHROUGH*/
1829 	case SATA_ADDR_DCPORT:
1830 
1831 		ret = SATA_SUCCESS;
1832 
1833 		/*
1834 		 * If a reset is already in progress, don't disturb it
1835 		 */
1836 		if ((nvp->nvp_state & (NV_RESET|NV_RESTORE)) &&
1837 		    (ddi_in_panic() == 0)) {
1838 			NVLOG(NVDBG_RESET, nvc, nvp,
1839 			    "nv_sata_reset: reset already in progress", NULL);
1840 			DTRACE_PROBE(reset_already_in_progress_p);
1841 
1842 			break;
1843 		}
1844 
1845 		/*
1846 		 * log the pre-reset state of the driver because dumping the
1847 		 * blocks will disturb it.
1848 		 */
1849 		if (ddi_in_panic() == 1) {
1850 			NVLOG(NVDBG_RESET, nvc, nvp, "in_panic.  nvp_state: "
1851 			    "0x%x nvp_reset_time: %d nvp_last_cmd: 0x%x "
1852 			    "nvp_previous_cmd: 0x%x nvp_reset_count: %d "
1853 			    "nvp_first_reset_reason: %s "
1854 			    "nvp_reset_reason: %s nvp_seq: %d "
1855 			    "in_interrupt: %d", nvp->nvp_state,
1856 			    nvp->nvp_reset_time, nvp->nvp_last_cmd,
1857 			    nvp->nvp_previous_cmd, nvp->nvp_reset_count,
1858 			    nvp->nvp_first_reset_reason,
1859 			    nvp->nvp_reset_reason, nvp->nvp_seq,
1860 			    servicing_interrupt());
1861 		}
1862 
1863 		nv_reset(nvp, "sata_reset");
1864 
1865 		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET, B_FALSE);
1866 
1867 		/*
1868 		 * If the port is inactive, do a quiet reset and don't attempt
1869 		 * to wait for reset completion or do any post reset processing
1870 		 *
1871 		 */
1872 		if (nvp->nvp_state & NV_DEACTIVATED) {
1873 			nvp->nvp_state &= ~NV_RESET;
1874 			nvp->nvp_reset_time = 0;
1875 
1876 			break;
1877 		}
1878 
1879 		/*
1880 		 * clear the port failed flag.  It will get set again
1881 		 * if the port is still not functioning.
1882 		 */
1883 		nvp->nvp_state &= ~NV_FAILED;
1884 
1885 		/*
1886 		 * timeouts are not available while the system is
1887 		 * dropping core, so call nv_monitor_reset() directly
1888 		 */
1889 		if (ddi_in_panic() != 0) {
1890 			while (nvp->nvp_state & NV_RESET) {
1891 				drv_usecwait(1000);
1892 				(void) nv_monitor_reset(nvp);
1893 			}
1894 
1895 			break;
1896 		}
1897 
1898 		break;
1899 	case SATA_ADDR_CNTRL:
1900 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1901 		    "nv_sata_reset: controller reset not supported", NULL);
1902 
1903 		break;
1904 	case SATA_ADDR_PMPORT:
1905 	case SATA_ADDR_DPMPORT:
1906 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1907 		    "nv_sata_reset: port multipliers not supported", NULL);
1908 		/*FALLTHROUGH*/
1909 	default:
1910 		/*
1911 		 * unsupported case
1912 		 */
1913 		break;
1914 	}
1915 
1916 	mutex_exit(&nvp->nvp_mutex);
1917 
1918 	return (ret);
1919 }
1920 
1921 
1922 /*
1923  * Sata entry point to handle port activation.  cfgadm -c connect
1924  */
1925 static int
1926 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1927 {
1928 	int cport = sd->satadev_addr.cport;
1929 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1930 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1931 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1932 	uint32_t sstatus;
1933 
1934 	ASSERT(cport < NV_MAX_PORTS(nvc));
1935 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_activate", NULL);
1936 
1937 	mutex_enter(&nvp->nvp_mutex);
1938 
1939 	sd->satadev_state = SATA_STATE_READY;
1940 
1941 	nv_copy_registers(nvp, sd, NULL);
1942 
1943 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1944 
1945 	/*
1946 	 * initiate link probing and device signature acquisition
1947 	 */
1948 
1949 	bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1950 
1951 	sstatus = ddi_get32(bar5_hdl, nvp->nvp_sstatus);
1952 
1953 	nvp->nvp_type = SATA_DTYPE_NONE;
1954 	nvp->nvp_signature = NV_NO_SIG;
1955 	nvp->nvp_state &= ~NV_DEACTIVATED;
1956 
1957 	if (SSTATUS_GET_DET(sstatus) ==
1958 	    SSTATUS_DET_DEVPRE_PHYCOM) {
1959 
1960 		nvp->nvp_state |= NV_ATTACH;
1961 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1962 		nv_reset(nvp, "sata_activate");
1963 
1964 		while (nvp->nvp_state & NV_RESET) {
1965 			cv_wait(&nvp->nvp_reset_cv, &nvp->nvp_mutex);
1966 		}
1967 
1968 	}
1969 
1970 	mutex_exit(&nvp->nvp_mutex);
1971 
1972 	return (SATA_SUCCESS);
1973 }
1974 
1975 
1976 /*
1977  * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1978  */
1979 static int
1980 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1981 {
1982 	int cport = sd->satadev_addr.cport;
1983 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1984 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1985 
1986 	ASSERT(cport < NV_MAX_PORTS(nvc));
1987 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate", NULL);
1988 
1989 	mutex_enter(&nvp->nvp_mutex);
1990 
1991 	(void) nv_abort_active(nvp, NULL, SATA_PKT_ABORTED, B_FALSE);
1992 
1993 	/*
1994 	 * make the device inaccessible
1995 	 */
1996 	nvp->nvp_state |= NV_DEACTIVATED;
1997 
1998 	/*
1999 	 * disable the interrupts on port
2000 	 */
2001 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2002 
2003 	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
2004 	nv_copy_registers(nvp, sd, NULL);
2005 
2006 	mutex_exit(&nvp->nvp_mutex);
2007 
2008 	return (SATA_SUCCESS);
2009 }
2010 
2011 
2012 /*
2013  * find an empty slot in the driver's queue, increment counters,
2014  * and then invoke the appropriate PIO or DMA start routine.
2015  */
2016 static int
2017 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
2018 {
2019 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
2020 	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
2021 	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2022 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
2023 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2024 	nv_slot_t *nv_slotp;
2025 	boolean_t dma_cmd;
2026 
2027 	NVLOG(NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
2028 	    sata_cmdp->satacmd_cmd_reg);
2029 
2030 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
2031 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
2032 		nvp->nvp_ncq_run++;
2033 		/*
2034 		 * search for an empty NCQ slot.  by the time, it's already
2035 		 * been determined by the caller that there is room on the
2036 		 * queue.
2037 		 */
2038 		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
2039 		    on_bit <<= 1) {
2040 			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
2041 				break;
2042 			}
2043 		}
2044 
2045 		/*
2046 		 * the first empty slot found, should not exceed the queue
2047 		 * depth of the drive.  if it does it's an error.
2048 		 */
2049 		ASSERT(slot != nvp->nvp_queue_depth);
2050 
2051 		sactive = nv_get32(nvc->nvc_bar_hdl[5],
2052 		    nvp->nvp_sactive);
2053 		ASSERT((sactive & on_bit) == 0);
2054 		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
2055 		NVLOG(NVDBG_DELIVER, nvc, nvp, "setting SACTIVE onbit: %X",
2056 		    on_bit);
2057 		nvp->nvp_sactive_cache |= on_bit;
2058 
2059 		ncq = NVSLOT_NCQ;
2060 
2061 	} else {
2062 		nvp->nvp_non_ncq_run++;
2063 		slot = 0;
2064 	}
2065 
2066 	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
2067 
2068 	ASSERT(nv_slotp->nvslot_spkt == NULL);
2069 
2070 	nv_slotp->nvslot_spkt = spkt;
2071 	nv_slotp->nvslot_flags = ncq;
2072 
2073 	/*
2074 	 * the sata module doesn't indicate which commands utilize the
2075 	 * DMA engine, so find out using this switch table.
2076 	 */
2077 	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
2078 	case SATAC_READ_DMA_EXT:
2079 	case SATAC_WRITE_DMA_EXT:
2080 	case SATAC_WRITE_DMA:
2081 	case SATAC_READ_DMA:
2082 	case SATAC_READ_DMA_QUEUED:
2083 	case SATAC_READ_DMA_QUEUED_EXT:
2084 	case SATAC_WRITE_DMA_QUEUED:
2085 	case SATAC_WRITE_DMA_QUEUED_EXT:
2086 	case SATAC_READ_FPDMA_QUEUED:
2087 	case SATAC_WRITE_FPDMA_QUEUED:
2088 	case SATAC_DSM:
2089 		dma_cmd = B_TRUE;
2090 		break;
2091 	default:
2092 		dma_cmd = B_FALSE;
2093 	}
2094 
2095 	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
2096 		NVLOG(NVDBG_DELIVER, nvc,  nvp, "DMA command", NULL);
2097 		nv_slotp->nvslot_start = nv_start_dma;
2098 		nv_slotp->nvslot_intr = nv_intr_dma;
2099 	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
2100 		NVLOG(NVDBG_DELIVER, nvc,  nvp, "packet command", NULL);
2101 		nv_slotp->nvslot_start = nv_start_pkt_pio;
2102 		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
2103 		if ((direction == SATA_DIR_READ) ||
2104 		    (direction == SATA_DIR_WRITE)) {
2105 			nv_slotp->nvslot_byte_count =
2106 			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2107 			nv_slotp->nvslot_v_addr =
2108 			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2109 			/*
2110 			 * Freeing DMA resources allocated by the sata common
2111 			 * module to avoid buffer overwrite (dma sync) problems
2112 			 * when the buffer is released at command completion.
2113 			 * Primarily an issue on systems with more than
2114 			 * 4GB of memory.
2115 			 */
2116 			sata_free_dma_resources(spkt);
2117 		}
2118 	} else if (direction == SATA_DIR_NODATA_XFER) {
2119 		NVLOG(NVDBG_DELIVER, nvc, nvp, "non-data command", NULL);
2120 		nv_slotp->nvslot_start = nv_start_nodata;
2121 		nv_slotp->nvslot_intr = nv_intr_nodata;
2122 	} else if (direction == SATA_DIR_READ) {
2123 		NVLOG(NVDBG_DELIVER, nvc, nvp, "pio in command", NULL);
2124 		nv_slotp->nvslot_start = nv_start_pio_in;
2125 		nv_slotp->nvslot_intr = nv_intr_pio_in;
2126 		nv_slotp->nvslot_byte_count =
2127 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2128 		nv_slotp->nvslot_v_addr =
2129 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2130 		/*
2131 		 * Freeing DMA resources allocated by the sata common module to
2132 		 * avoid buffer overwrite (dma sync) problems when the buffer
2133 		 * is released at command completion.  This is not an issue
2134 		 * for write because write does not update the buffer.
2135 		 * Primarily an issue on systems with more than 4GB of memory.
2136 		 */
2137 		sata_free_dma_resources(spkt);
2138 	} else if (direction == SATA_DIR_WRITE) {
2139 		NVLOG(NVDBG_DELIVER, nvc, nvp, "pio out command", NULL);
2140 		nv_slotp->nvslot_start = nv_start_pio_out;
2141 		nv_slotp->nvslot_intr = nv_intr_pio_out;
2142 		nv_slotp->nvslot_byte_count =
2143 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2144 		nv_slotp->nvslot_v_addr =
2145 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2146 	} else {
2147 		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2148 		    " %d cookies %d cmd %x",
2149 		    sata_cmdp->satacmd_flags.sata_data_direction,
2150 		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2151 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2152 		ret = SATA_TRAN_CMD_UNSUPPORTED;
2153 
2154 		goto fail;
2155 	}
2156 
2157 	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2158 	    SATA_TRAN_ACCEPTED) {
2159 #ifdef SGPIO_SUPPORT
2160 		nv_sgp_drive_active(nvp->nvp_ctlp,
2161 		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2162 #endif
2163 		nv_slotp->nvslot_stime = ddi_get_lbolt();
2164 
2165 		/*
2166 		 * start timer if it's not already running and this packet
2167 		 * is not requesting polled mode.
2168 		 */
2169 		if ((nvp->nvp_timeout_id == 0) &&
2170 		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2171 			nv_setup_timeout(nvp, NV_ONE_SEC);
2172 		}
2173 
2174 		nvp->nvp_previous_cmd = nvp->nvp_last_cmd;
2175 		nvp->nvp_last_cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2176 
2177 		return (SATA_TRAN_ACCEPTED);
2178 	}
2179 
2180 	fail:
2181 
2182 	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2183 
2184 	if (ncq == NVSLOT_NCQ) {
2185 		nvp->nvp_ncq_run--;
2186 		nvp->nvp_sactive_cache &= ~on_bit;
2187 	} else {
2188 		nvp->nvp_non_ncq_run--;
2189 	}
2190 	nv_slotp->nvslot_spkt = NULL;
2191 	nv_slotp->nvslot_flags = 0;
2192 
2193 	return (ret);
2194 }
2195 
2196 
2197 /*
2198  * Check if the signature is ready and if non-zero translate
2199  * it into a solaris sata defined type.
2200  */
2201 static void
2202 nv_read_signature(nv_port_t *nvp)
2203 {
2204 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2205 	int retry_count = 0;
2206 
2207 	retry:
2208 
2209 	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2210 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2211 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2212 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2213 
2214 	NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2215 	    "nv_read_signature: 0x%x ", nvp->nvp_signature);
2216 
2217 	switch (nvp->nvp_signature) {
2218 
2219 	case NV_DISK_SIG:
2220 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp, "drive is a disk", NULL);
2221 		DTRACE_PROBE(signature_is_disk_device_p)
2222 		nvp->nvp_type = SATA_DTYPE_ATADISK;
2223 
2224 		break;
2225 	case NV_ATAPI_SIG:
2226 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2227 		    "drive is an optical device", NULL);
2228 		DTRACE_PROBE(signature_is_optical_device_p)
2229 		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2230 		break;
2231 	case NV_PM_SIG:
2232 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2233 		    "device is a port multiplier", NULL);
2234 		DTRACE_PROBE(signature_is_port_multiplier_p)
2235 		nvp->nvp_type = SATA_DTYPE_PMULT;
2236 		break;
2237 	case NV_NO_SIG:
2238 		NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2239 		    "signature not available", NULL);
2240 		DTRACE_PROBE(sig_not_available_p);
2241 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2242 		break;
2243 	default:
2244 		if (retry_count++ == 0) {
2245 			/*
2246 			 * this is a rare corner case where the controller
2247 			 * is updating the task file registers as the driver
2248 			 * is reading them.  If this happens, wait a bit and
2249 			 * retry once.
2250 			 */
2251 			NV_DELAY_NSEC(1000000);
2252 			NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2253 			    "invalid signature 0x%x retry once",
2254 			    nvp->nvp_signature);
2255 			DTRACE_PROBE1(signature_invalid_retry_once_h,
2256 			    int, nvp->nvp_signature);
2257 
2258 			goto retry;
2259 		}
2260 
2261 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp,
2262 		    "invalid signature 0x%x", nvp->nvp_signature);
2263 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2264 
2265 		break;
2266 	}
2267 }
2268 
2269 
2270 /*
2271  * Set up a new timeout or complete a timeout in microseconds.
2272  * If microseconds is zero, no new timeout is scheduled.  Must be
2273  * called at the end of the timeout routine.
2274  */
2275 static void
2276 nv_setup_timeout(nv_port_t *nvp, clock_t microseconds)
2277 {
2278 	clock_t old_duration = nvp->nvp_timeout_duration;
2279 
2280 	if (microseconds == 0) {
2281 
2282 		return;
2283 	}
2284 
2285 	if (nvp->nvp_timeout_id != 0 && nvp->nvp_timeout_duration == 0) {
2286 		/*
2287 		 * Since we are dropping the mutex for untimeout,
2288 		 * the timeout may be executed while we are trying to
2289 		 * untimeout and setting up a new timeout.
2290 		 * If nvp_timeout_duration is 0, then this function
2291 		 * was re-entered. Just exit.
2292 		 */
2293 		cmn_err(CE_WARN, "nv_setup_timeout re-entered");
2294 
2295 		return;
2296 	}
2297 
2298 	nvp->nvp_timeout_duration = 0;
2299 
2300 	if (nvp->nvp_timeout_id == 0) {
2301 		/*
2302 		 * start new timer
2303 		 */
2304 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2305 		    drv_usectohz(microseconds));
2306 	} else {
2307 		/*
2308 		 * If the currently running timeout is due later than the
2309 		 * requested one, restart it with a new expiration.
2310 		 * Our timeouts do not need to be accurate - we would be just
2311 		 * checking that the specified time was exceeded.
2312 		 */
2313 		if (old_duration > microseconds) {
2314 			mutex_exit(&nvp->nvp_mutex);
2315 			(void) untimeout(nvp->nvp_timeout_id);
2316 			mutex_enter(&nvp->nvp_mutex);
2317 			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2318 			    drv_usectohz(microseconds));
2319 		}
2320 	}
2321 
2322 	nvp->nvp_timeout_duration = microseconds;
2323 }
2324 
2325 
2326 
2327 int nv_reset_length = NV_RESET_LENGTH;
2328 
2329 /*
2330  * Reset the port
2331  */
2332 static void
2333 nv_reset(nv_port_t *nvp, char *reason)
2334 {
2335 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2336 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2337 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2338 	uint32_t sctrl, serr, sstatus;
2339 	uint8_t bmicx;
2340 	int i, j;
2341 	boolean_t reset_success = B_FALSE;
2342 
2343 	ASSERT(mutex_owned(&nvp->nvp_mutex));
2344 
2345 	/*
2346 	 * If the port is reset right after the controller receives
2347 	 * the DMA activate command (or possibly any other FIS),
2348 	 * controller operation freezes without any known recovery
2349 	 * procedure.  Until Nvidia advises on a recovery mechanism,
2350 	 * avoid the situation by waiting sufficiently long to
2351 	 * ensure the link is not actively transmitting any FIS.
2352 	 * 100ms was empirically determined to be large enough to
2353 	 * ensure no transaction was left in flight but not too long
2354 	 * as to cause any significant thread delay.
2355 	 */
2356 	drv_usecwait(100000);
2357 
2358 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2359 	DTRACE_PROBE1(serror_h, int, serr);
2360 
2361 	/*
2362 	 * stop DMA engine.
2363 	 */
2364 	bmicx = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmicx);
2365 	nv_put8(nvp->nvp_bm_hdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2366 
2367 	/*
2368 	 * the current setting of the NV_RESET in nvp_state indicates whether
2369 	 * this is the first reset attempt or a retry.
2370 	 */
2371 	if (nvp->nvp_state & NV_RESET) {
2372 		nvp->nvp_reset_retry_count++;
2373 
2374 		NVLOG(NVDBG_RESET, nvc, nvp, "npv_reset_retry_count: %d",
2375 		    nvp->nvp_reset_retry_count);
2376 
2377 	} else {
2378 		nvp->nvp_reset_retry_count = 0;
2379 		nvp->nvp_reset_count++;
2380 		nvp->nvp_state |= NV_RESET;
2381 
2382 		NVLOG(NVDBG_RESET, nvc, nvp, "nvp_reset_count: %d reason: %s "
2383 		    "serror: 0x%x seq: %d run: %d cmd: 0x%x",
2384 		    nvp->nvp_reset_count, reason, serr, nvp->nvp_seq,
2385 		    nvp->nvp_non_ncq_run, nvp->nvp_last_cmd);
2386 	}
2387 
2388 	/*
2389 	 * a link event could have occurred slightly before excessive
2390 	 * interrupt processing invokes a reset.  Reset handling overrides
2391 	 * link event processing so it's safe to clear it here.
2392 	 */
2393 	nvp->nvp_state &= ~(NV_RESTORE|NV_LINK_EVENT);
2394 
2395 	nvp->nvp_reset_time = ddi_get_lbolt();
2396 
2397 	if ((nvp->nvp_state & (NV_ATTACH|NV_HOTPLUG)) == 0) {
2398 		nv_cmn_err(CE_NOTE, nvc, nvp, "nv_reset: reason: %s serr 0x%x"
2399 		    " nvp_state: 0x%x", reason, serr, nvp->nvp_state);
2400 		/*
2401 		 * keep a record of why the first reset occurred, for debugging
2402 		 */
2403 		if (nvp->nvp_first_reset_reason[0] == '\0') {
2404 			(void) strncpy(nvp->nvp_first_reset_reason,
2405 			    reason, NV_REASON_LEN);
2406 			nvp->nvp_first_reset_reason[NV_REASON_LEN - 1] = '\0';
2407 		}
2408 	}
2409 
2410 	(void) strncpy(nvp->nvp_reset_reason, reason, NV_REASON_LEN);
2411 
2412 	/*
2413 	 * ensure there is terminating NULL
2414 	 */
2415 	nvp->nvp_reset_reason[NV_REASON_LEN - 1] = '\0';
2416 
2417 	/*
2418 	 * Issue hardware reset; retry if necessary.
2419 	 */
2420 	for (i = 0; i < NV_COMRESET_ATTEMPTS; i++) {
2421 
2422 		/*
2423 		 * clear signature registers and the error register too
2424 		 */
2425 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
2426 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2427 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2428 		nv_put8(cmdhdl, nvp->nvp_count, 0);
2429 
2430 		nv_put8(nvp->nvp_cmd_hdl, nvp->nvp_error, 0);
2431 
2432 		/*
2433 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
2434 		 */
2435 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2436 
2437 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2438 		    sctrl | SCONTROL_DET_COMRESET);
2439 
2440 		/* Wait at least 1ms, as required by the spec */
2441 		drv_usecwait(nv_reset_length);
2442 
2443 		serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2444 		DTRACE_PROBE1(aftercomreset_serror_h, int, serr);
2445 
2446 		/* Reset all accumulated error bits */
2447 		nv_put32(bar5_hdl, nvp->nvp_serror, 0xffffffff);
2448 
2449 
2450 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2451 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2452 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset: applied (%d); "
2453 		    "sctrl 0x%x, sstatus 0x%x", i, sctrl, sstatus);
2454 
2455 		/* de-assert reset in PHY */
2456 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2457 		    sctrl & ~SCONTROL_DET_COMRESET);
2458 
2459 		/*
2460 		 * Wait up to 10ms for COMINIT to arrive, indicating that
2461 		 * the device recognized COMRESET.
2462 		 */
2463 		for (j = 0; j < 10; j++) {
2464 			drv_usecwait(NV_ONE_MSEC);
2465 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2466 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2467 			    (SSTATUS_GET_DET(sstatus) ==
2468 			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2469 				reset_success = B_TRUE;
2470 				break;
2471 			}
2472 		}
2473 
2474 		if (reset_success == B_TRUE)
2475 			break;
2476 	}
2477 
2478 
2479 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2480 	DTRACE_PROBE1(last_serror_h, int, serr);
2481 
2482 	if (reset_success == B_FALSE) {
2483 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset not succeeded "
2484 		    "after %d attempts. serr: 0x%x", i, serr);
2485 	} else {
2486 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset succeeded"
2487 		    " after %dms. serr: 0x%x", TICK_TO_MSEC(ddi_get_lbolt() -
2488 		    nvp->nvp_reset_time), serr);
2489 	}
2490 
2491 	nvp->nvp_wait_sig  = NV_WAIT_SIG;
2492 	nv_setup_timeout(nvp, nvp->nvp_wait_sig);
2493 }
2494 
2495 
2496 /*
2497  * Initialize register handling specific to mcp51/mcp55
2498  */
2499 /* ARGSUSED */
2500 static void
2501 mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2502 {
2503 	nv_port_t *nvp;
2504 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2505 	uint8_t off, port;
2506 
2507 	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2508 	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2509 
2510 	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2511 		nvp = &(nvc->nvc_port[port]);
2512 		nvp->nvp_mcp5x_int_status =
2513 		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2514 		nvp->nvp_mcp5x_int_ctl =
2515 		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2516 
2517 		/*
2518 		 * clear any previous interrupts asserted
2519 		 */
2520 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2521 		    MCP5X_INT_CLEAR);
2522 
2523 		/*
2524 		 * These are the interrupts to accept for now.  The spec
2525 		 * says these are enable bits, but nvidia has indicated
2526 		 * these are masking bits.  Even though they may be masked
2527 		 * out to prevent asserting the main interrupt, they can
2528 		 * still be asserted while reading the interrupt status
2529 		 * register, so that needs to be considered in the interrupt
2530 		 * handler.
2531 		 */
2532 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2533 		    ~(MCP5X_INT_IGNORE));
2534 	}
2535 
2536 	/*
2537 	 * Allow the driver to program the BM on the first command instead
2538 	 * of waiting for an interrupt.
2539 	 */
2540 #ifdef NCQ
2541 	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2542 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2543 	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2544 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2545 #endif
2546 
2547 	/*
2548 	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2549 	 * Enable DMA to take advantage of that.
2550 	 *
2551 	 */
2552 	if (nvc->nvc_revid >= 0xa3) {
2553 		if (nv_sata_40bit_dma == B_TRUE) {
2554 			uint32_t reg32;
2555 			NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2556 			    "rev id is %X.  40-bit DMA addressing"
2557 			    " enabled", nvc->nvc_revid);
2558 			nvc->dma_40bit = B_TRUE;
2559 
2560 			reg32 = pci_config_get32(pci_conf_handle,
2561 			    NV_SATA_CFG_20);
2562 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2563 			    reg32 | NV_40BIT_PRD);
2564 
2565 			/*
2566 			 * CFG_23 bits 0-7 contain the top 8 bits (of 40
2567 			 * bits) for the primary PRD table, and bits 8-15
2568 			 * contain the top 8 bits for the secondary.  Set
2569 			 * to zero because the DMA attribute table for PRD
2570 			 * allocation forces it into 32 bit address space
2571 			 * anyway.
2572 			 */
2573 			reg32 = pci_config_get32(pci_conf_handle,
2574 			    NV_SATA_CFG_23);
2575 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_23,
2576 			    reg32 & 0xffff0000);
2577 		} else {
2578 			NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2579 			    "40-bit DMA disabled by nv_sata_40bit_dma", NULL);
2580 		}
2581 	} else {
2582 		nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "rev id is %X and is "
2583 		    "not capable of 40-bit DMA addressing", nvc->nvc_revid);
2584 	}
2585 }
2586 
2587 
2588 /*
2589  * Initialize register handling specific to ck804
2590  */
2591 static void
2592 ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2593 {
2594 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2595 	uint32_t reg32;
2596 	uint16_t reg16;
2597 	nv_port_t *nvp;
2598 	int j;
2599 
2600 	/*
2601 	 * delay hotplug interrupts until PHYRDY.
2602 	 */
2603 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2604 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2605 	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2606 
2607 	/*
2608 	 * enable hot plug interrupts for channel x and y
2609 	 */
2610 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2611 	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2612 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2613 	    NV_HIRQ_EN | reg16);
2614 
2615 
2616 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2617 	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2618 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2619 	    NV_HIRQ_EN | reg16);
2620 
2621 	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2622 
2623 	/*
2624 	 * clear any existing interrupt pending then enable
2625 	 */
2626 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2627 		nvp = &(nvc->nvc_port[j]);
2628 		mutex_enter(&nvp->nvp_mutex);
2629 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2630 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2631 		mutex_exit(&nvp->nvp_mutex);
2632 	}
2633 }
2634 
2635 
2636 /*
2637  * Initialize the controller and set up driver data structures.
2638  * determine if ck804 or mcp5x class.
2639  */
2640 static int
2641 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2642 {
2643 	struct sata_hba_tran stran;
2644 	nv_port_t *nvp;
2645 	int j, ck804;
2646 	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2647 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2648 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2649 	uint32_t reg32;
2650 	uint8_t reg8, reg8_save;
2651 
2652 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl entered", NULL);
2653 
2654 	ck804 = B_TRUE;
2655 #ifdef SGPIO_SUPPORT
2656 	nvc->nvc_mcp5x_flag = B_FALSE;
2657 #endif
2658 
2659 	/*
2660 	 * Need to set bit 2 to 1 at config offset 0x50
2661 	 * to enable access to the bar5 registers.
2662 	 */
2663 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2664 	if (!(reg32 & NV_BAR5_SPACE_EN)) {
2665 		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2666 		    reg32 | NV_BAR5_SPACE_EN);
2667 	}
2668 
2669 	/*
2670 	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2671 	 * task file registers into bar5 while mcp5x won't.  The offset of
2672 	 * the task file registers in mcp5x's space is unused, so it will
2673 	 * return zero.  So check one of the task file registers to see if it is
2674 	 * writable and reads back what was written.  If it's mcp5x it will
2675 	 * return back 0xff whereas ck804 will return the value written.
2676 	 */
2677 	reg8_save = nv_get8(bar5_hdl,
2678 	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2679 
2680 
2681 	for (j = 1; j < 3; j++) {
2682 
2683 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2684 		reg8 = nv_get8(bar5_hdl,
2685 		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2686 
2687 		if (reg8 != j) {
2688 			ck804 = B_FALSE;
2689 			nvc->nvc_mcp5x_flag = B_TRUE;
2690 			break;
2691 		}
2692 	}
2693 
2694 	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2695 
2696 	if (ck804 == B_TRUE) {
2697 		NVLOG(NVDBG_INIT, nvc, NULL, "controller is CK804", NULL);
2698 		nvc->nvc_interrupt = ck804_intr;
2699 		nvc->nvc_reg_init = ck804_reg_init;
2700 		nvc->nvc_set_intr = ck804_set_intr;
2701 	} else {
2702 		NVLOG(NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55", NULL);
2703 		nvc->nvc_interrupt = mcp5x_intr;
2704 		nvc->nvc_reg_init = mcp5x_reg_init;
2705 		nvc->nvc_set_intr = mcp5x_set_intr;
2706 	}
2707 
2708 
2709 	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2710 	stran.sata_tran_hba_dip = nvc->nvc_dip;
2711 	stran.sata_tran_hba_num_cports = NV_NUM_PORTS;
2712 	stran.sata_tran_hba_features_support =
2713 	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2714 	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2715 	stran.sata_tran_probe_port = nv_sata_probe;
2716 	stran.sata_tran_start = nv_sata_start;
2717 	stran.sata_tran_abort = nv_sata_abort;
2718 	stran.sata_tran_reset_dport = nv_sata_reset;
2719 	stran.sata_tran_selftest = NULL;
2720 	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2721 	stran.sata_tran_pwrmgt_ops = NULL;
2722 	stran.sata_tran_ioctl = NULL;
2723 	nvc->nvc_sata_hba_tran = stran;
2724 
2725 	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2726 	    KM_SLEEP);
2727 
2728 	/*
2729 	 * initialize registers common to all chipsets
2730 	 */
2731 	nv_common_reg_init(nvc);
2732 
2733 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2734 		nvp = &(nvc->nvc_port[j]);
2735 
2736 		cmd_addr = nvp->nvp_cmd_addr;
2737 		ctl_addr = nvp->nvp_ctl_addr;
2738 		bm_addr = nvp->nvp_bm_addr;
2739 
2740 		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2741 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2742 
2743 		cv_init(&nvp->nvp_sync_cv, NULL, CV_DRIVER, NULL);
2744 		cv_init(&nvp->nvp_reset_cv, NULL, CV_DRIVER, NULL);
2745 
2746 		nvp->nvp_data	= cmd_addr + NV_DATA;
2747 		nvp->nvp_error	= cmd_addr + NV_ERROR;
2748 		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2749 		nvp->nvp_count	= cmd_addr + NV_COUNT;
2750 		nvp->nvp_sect	= cmd_addr + NV_SECT;
2751 		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2752 		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2753 		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2754 		nvp->nvp_status	= cmd_addr + NV_STATUS;
2755 		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2756 		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2757 		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2758 
2759 		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2760 		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2761 		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2762 
2763 		nvp->nvp_state = 0;
2764 
2765 		/*
2766 		 * Initialize dma handles, etc.
2767 		 * If it fails, the port is in inactive state.
2768 		 */
2769 		nv_init_port(nvp);
2770 	}
2771 
2772 	/*
2773 	 * initialize register by calling chip specific reg initialization
2774 	 */
2775 	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2776 
2777 	/* initialize the hba dma attribute */
2778 	if (nvc->dma_40bit == B_TRUE)
2779 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2780 		    &buffer_dma_40bit_attr;
2781 	else
2782 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2783 		    &buffer_dma_attr;
2784 
2785 	return (NV_SUCCESS);
2786 }
2787 
2788 
2789 /*
2790  * Initialize data structures with enough slots to handle queuing, if
2791  * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2792  * NCQ support is built into the driver and enabled.  It might have been
2793  * better to derive the true size from the drive itself, but the sata
2794  * module only sends down that information on the first NCQ command,
2795  * which means possibly re-sizing the structures on an interrupt stack,
2796  * making error handling more messy.  The easy way is to just allocate
2797  * all 32 slots, which is what most drives support anyway.
2798  */
2799 static void
2800 nv_init_port(nv_port_t *nvp)
2801 {
2802 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2803 	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2804 	dev_info_t *dip = nvc->nvc_dip;
2805 	ddi_device_acc_attr_t dev_attr;
2806 	size_t buf_size;
2807 	ddi_dma_cookie_t cookie;
2808 	uint_t count;
2809 	int rc, i;
2810 
2811 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2812 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2813 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2814 
2815 	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2816 	    NV_QUEUE_SLOTS, KM_SLEEP);
2817 
2818 	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2819 	    NV_QUEUE_SLOTS, KM_SLEEP);
2820 
2821 	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2822 	    NV_QUEUE_SLOTS, KM_SLEEP);
2823 
2824 	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2825 	    NV_QUEUE_SLOTS, KM_SLEEP);
2826 
2827 	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2828 	    KM_SLEEP);
2829 
2830 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2831 
2832 		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2833 		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2834 
2835 		if (rc != DDI_SUCCESS) {
2836 			nv_uninit_port(nvp);
2837 
2838 			return;
2839 		}
2840 
2841 		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2842 		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2843 		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2844 		    &(nvp->nvp_sg_acc_hdl[i]));
2845 
2846 		if (rc != DDI_SUCCESS) {
2847 			nv_uninit_port(nvp);
2848 
2849 			return;
2850 		}
2851 
2852 		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2853 		    nvp->nvp_sg_addr[i], buf_size,
2854 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2855 		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2856 
2857 		if (rc != DDI_DMA_MAPPED) {
2858 			nv_uninit_port(nvp);
2859 
2860 			return;
2861 		}
2862 
2863 		ASSERT(count == 1);
2864 		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2865 
2866 		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2867 
2868 		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2869 	}
2870 
2871 	/*
2872 	 * nvp_queue_depth represents the actual drive queue depth, not the
2873 	 * number of slots allocated in the structures (which may be more).
2874 	 * Actual queue depth is only learned after the first NCQ command, so
2875 	 * initialize it to 1 for now.
2876 	 */
2877 	nvp->nvp_queue_depth = 1;
2878 
2879 	/*
2880 	 * Port is initialized whether the device is attached or not.
2881 	 * Link processing and device identification will be started later,
2882 	 * after interrupts are initialized.
2883 	 */
2884 	nvp->nvp_type = SATA_DTYPE_NONE;
2885 }
2886 
2887 
2888 /*
2889  * Free dynamically allocated structures for port.
2890  */
2891 static void
2892 nv_uninit_port(nv_port_t *nvp)
2893 {
2894 	int i;
2895 
2896 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2897 	    "nv_uninit_port uninitializing", NULL);
2898 
2899 #ifdef SGPIO_SUPPORT
2900 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
2901 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2902 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2903 	}
2904 #endif
2905 
2906 	nvp->nvp_type = SATA_DTYPE_NONE;
2907 
2908 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2909 		if (nvp->nvp_sg_paddr[i]) {
2910 			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2911 		}
2912 
2913 		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2914 			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2915 		}
2916 
2917 		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2918 			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2919 		}
2920 	}
2921 
2922 	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2923 	nvp->nvp_slot = NULL;
2924 
2925 	kmem_free(nvp->nvp_sg_dma_hdl,
2926 	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2927 	nvp->nvp_sg_dma_hdl = NULL;
2928 
2929 	kmem_free(nvp->nvp_sg_acc_hdl,
2930 	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2931 	nvp->nvp_sg_acc_hdl = NULL;
2932 
2933 	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2934 	nvp->nvp_sg_addr = NULL;
2935 
2936 	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2937 	nvp->nvp_sg_paddr = NULL;
2938 }
2939 
2940 
2941 /*
2942  * Cache register offsets and access handles to frequently accessed registers
2943  * which are common to either chipset.
2944  */
2945 static void
2946 nv_common_reg_init(nv_ctl_t *nvc)
2947 {
2948 	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2949 	uchar_t *bm_addr_offset, *sreg_offset;
2950 	uint8_t bar, port;
2951 	nv_port_t *nvp;
2952 
2953 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2954 		if (port == 0) {
2955 			bar = NV_BAR_0;
2956 			bm_addr_offset = 0;
2957 			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2958 		} else {
2959 			bar = NV_BAR_2;
2960 			bm_addr_offset = (uchar_t *)8;
2961 			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2962 		}
2963 
2964 		nvp = &(nvc->nvc_port[port]);
2965 		nvp->nvp_ctlp = nvc;
2966 		nvp->nvp_port_num = port;
2967 		NVLOG(NVDBG_INIT, nvc, nvp, "setting up port mappings", NULL);
2968 
2969 		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2970 		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2971 		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2972 		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2973 		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2974 		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2975 		    (long)bm_addr_offset;
2976 
2977 		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2978 		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2979 		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2980 		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2981 	}
2982 }
2983 
2984 
2985 static void
2986 nv_uninit_ctl(nv_ctl_t *nvc)
2987 {
2988 	int port;
2989 	nv_port_t *nvp;
2990 
2991 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered", NULL);
2992 
2993 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2994 		nvp = &(nvc->nvc_port[port]);
2995 		mutex_enter(&nvp->nvp_mutex);
2996 		NVLOG(NVDBG_INIT, nvc, nvp, "uninitializing port", NULL);
2997 		nv_uninit_port(nvp);
2998 		mutex_exit(&nvp->nvp_mutex);
2999 		mutex_destroy(&nvp->nvp_mutex);
3000 		cv_destroy(&nvp->nvp_sync_cv);
3001 		cv_destroy(&nvp->nvp_reset_cv);
3002 	}
3003 
3004 	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
3005 	nvc->nvc_port = NULL;
3006 }
3007 
3008 
3009 /*
3010  * ck804 interrupt.  This is a wrapper around ck804_intr_process so
3011  * that interrupts from other devices can be disregarded while dtracing.
3012  */
3013 /* ARGSUSED */
3014 static uint_t
3015 ck804_intr(caddr_t arg1, caddr_t arg2)
3016 {
3017 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3018 	uint8_t intr_status;
3019 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3020 
3021 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3022 		return (DDI_INTR_UNCLAIMED);
3023 
3024 	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3025 
3026 	if (intr_status == 0) {
3027 
3028 		return (DDI_INTR_UNCLAIMED);
3029 	}
3030 
3031 	ck804_intr_process(nvc, intr_status);
3032 
3033 	return (DDI_INTR_CLAIMED);
3034 }
3035 
3036 
3037 /*
3038  * Main interrupt handler for ck804.  handles normal device
3039  * interrupts and hot plug and remove interrupts.
3040  *
3041  */
3042 static void
3043 ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
3044 {
3045 
3046 	int port, i;
3047 	nv_port_t *nvp;
3048 	nv_slot_t *nv_slotp;
3049 	uchar_t	status;
3050 	sata_pkt_t *spkt;
3051 	uint8_t bmstatus, clear_bits;
3052 	ddi_acc_handle_t bmhdl;
3053 	int nvcleared = 0;
3054 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3055 	uint32_t sstatus;
3056 	int port_mask_hot[] = {
3057 		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
3058 	};
3059 	int port_mask_pm[] = {
3060 		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
3061 	};
3062 
3063 	NVLOG(NVDBG_INTR, nvc, NULL,
3064 	    "ck804_intr_process entered intr_status=%x", intr_status);
3065 
3066 	/*
3067 	 * For command completion interrupt, explicit clear is not required.
3068 	 * however, for the error cases explicit clear is performed.
3069 	 */
3070 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3071 
3072 		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
3073 
3074 		if ((port_mask[port] & intr_status) == 0) {
3075 
3076 			continue;
3077 		}
3078 
3079 		NVLOG(NVDBG_INTR, nvc, NULL,
3080 		    "ck804_intr_process interrupt on port %d", port);
3081 
3082 		nvp = &(nvc->nvc_port[port]);
3083 
3084 		mutex_enter(&nvp->nvp_mutex);
3085 
3086 		/*
3087 		 * this case might be encountered when the other port
3088 		 * is active
3089 		 */
3090 		if (nvp->nvp_state & NV_DEACTIVATED) {
3091 
3092 			/*
3093 			 * clear interrupt bits
3094 			 */
3095 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3096 			    port_mask[port]);
3097 
3098 			mutex_exit(&nvp->nvp_mutex);
3099 
3100 			continue;
3101 		}
3102 
3103 
3104 		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
3105 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3106 			NVLOG(NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3107 			    " no command in progress status=%x", status);
3108 			mutex_exit(&nvp->nvp_mutex);
3109 
3110 			/*
3111 			 * clear interrupt bits
3112 			 */
3113 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3114 			    port_mask[port]);
3115 
3116 			continue;
3117 		}
3118 
3119 		bmhdl = nvp->nvp_bm_hdl;
3120 		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3121 
3122 		if (!(bmstatus & BMISX_IDEINTS)) {
3123 			mutex_exit(&nvp->nvp_mutex);
3124 
3125 			continue;
3126 		}
3127 
3128 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3129 
3130 		if (status & SATA_STATUS_BSY) {
3131 			mutex_exit(&nvp->nvp_mutex);
3132 
3133 			continue;
3134 		}
3135 
3136 		nv_slotp = &(nvp->nvp_slot[0]);
3137 
3138 		ASSERT(nv_slotp);
3139 
3140 		spkt = nv_slotp->nvslot_spkt;
3141 
3142 		if (spkt == NULL) {
3143 			mutex_exit(&nvp->nvp_mutex);
3144 
3145 			continue;
3146 		}
3147 
3148 		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3149 
3150 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3151 
3152 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3153 
3154 			nv_complete_io(nvp, spkt, 0);
3155 		}
3156 
3157 		mutex_exit(&nvp->nvp_mutex);
3158 	}
3159 
3160 	/*
3161 	 * ck804 often doesn't correctly distinguish hot add/remove
3162 	 * interrupts.  Frequently both the ADD and the REMOVE bits
3163 	 * are asserted, whether it was a remove or add.  Use sstatus
3164 	 * to distinguish hot add from hot remove.
3165 	 */
3166 
3167 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3168 		clear_bits = 0;
3169 
3170 		nvp = &(nvc->nvc_port[port]);
3171 		mutex_enter(&nvp->nvp_mutex);
3172 
3173 		if ((port_mask_pm[port] & intr_status) != 0) {
3174 			clear_bits = port_mask_pm[port];
3175 			NVLOG(NVDBG_HOT, nvc, nvp,
3176 			    "clearing PM interrupt bit: %x",
3177 			    intr_status & port_mask_pm[port]);
3178 		}
3179 
3180 		if ((port_mask_hot[port] & intr_status) == 0) {
3181 			if (clear_bits != 0) {
3182 				goto clear;
3183 			} else {
3184 				mutex_exit(&nvp->nvp_mutex);
3185 				continue;
3186 			}
3187 		}
3188 
3189 		/*
3190 		 * reaching here means there was a hot add or remove.
3191 		 */
3192 		clear_bits |= port_mask_hot[port];
3193 
3194 		ASSERT(nvc->nvc_port[port].nvp_sstatus);
3195 
3196 		sstatus = nv_get32(bar5_hdl,
3197 		    nvc->nvc_port[port].nvp_sstatus);
3198 
3199 		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
3200 		    SSTATUS_DET_DEVPRE_PHYCOM) {
3201 			nv_link_event(nvp, NV_REM_DEV);
3202 		} else {
3203 			nv_link_event(nvp, NV_ADD_DEV);
3204 		}
3205 	clear:
3206 		/*
3207 		 * clear interrupt bits.  explicit interrupt clear is
3208 		 * required for hotplug interrupts.
3209 		 */
3210 		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
3211 
3212 		/*
3213 		 * make sure it's flushed and cleared.  If not try
3214 		 * again.  Sometimes it has been observed to not clear
3215 		 * on the first try.
3216 		 */
3217 		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3218 
3219 		/*
3220 		 * make 10 additional attempts to clear the interrupt
3221 		 */
3222 		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
3223 			NVLOG(NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
3224 			    "still not clear try=%d", intr_status,
3225 			    ++nvcleared);
3226 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3227 			    clear_bits);
3228 			intr_status = nv_get8(bar5_hdl,
3229 			    nvc->nvc_ck804_int_status);
3230 		}
3231 
3232 		/*
3233 		 * if still not clear, log a message and disable the
3234 		 * port. highly unlikely that this path is taken, but it
3235 		 * gives protection against a wedged interrupt.
3236 		 */
3237 		if (intr_status & clear_bits) {
3238 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3239 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3240 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3241 			nvp->nvp_state |= NV_FAILED;
3242 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3243 			    B_TRUE);
3244 			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
3245 			    "interrupt.  disabling port intr_status=%X",
3246 			    intr_status);
3247 		}
3248 
3249 		mutex_exit(&nvp->nvp_mutex);
3250 	}
3251 }
3252 
3253 
3254 /*
3255  * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
3256  * on the controller, to handle completion and hot plug and remove events.
3257  */
3258 static uint_t
3259 mcp5x_intr_port(nv_port_t *nvp)
3260 {
3261 	nv_ctl_t *nvc = nvp->nvp_ctlp;
3262 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3263 	uint8_t clear = 0, intr_cycles = 0;
3264 	int ret = DDI_INTR_UNCLAIMED;
3265 	uint16_t int_status;
3266 	clock_t intr_time;
3267 	int loop_cnt = 0;
3268 
3269 	nvp->intr_start_time = ddi_get_lbolt();
3270 
3271 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered", NULL);
3272 
3273 	do {
3274 		/*
3275 		 * read current interrupt status
3276 		 */
3277 		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3278 
3279 		/*
3280 		 * if the port is deactivated, just clear the interrupt and
3281 		 * return.  can get here even if interrupts were disabled
3282 		 * on this port but enabled on the other.
3283 		 */
3284 		if (nvp->nvp_state & NV_DEACTIVATED) {
3285 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3286 			    int_status);
3287 
3288 			return (DDI_INTR_CLAIMED);
3289 		}
3290 
3291 		NVLOG(NVDBG_INTR, nvc, nvp, "int_status = %x", int_status);
3292 
3293 		DTRACE_PROBE1(int_status_before_h, int, int_status);
3294 
3295 		/*
3296 		 * MCP5X_INT_IGNORE interrupts will show up in the status,
3297 		 * but are masked out from causing an interrupt to be generated
3298 		 * to the processor.  Ignore them here by masking them out.
3299 		 */
3300 		int_status &= ~(MCP5X_INT_IGNORE);
3301 
3302 		DTRACE_PROBE1(int_status_after_h, int, int_status);
3303 
3304 		/*
3305 		 * exit the loop when no more interrupts to process
3306 		 */
3307 		if (int_status == 0) {
3308 
3309 			break;
3310 		}
3311 
3312 		if (int_status & MCP5X_INT_COMPLETE) {
3313 			NVLOG(NVDBG_INTR, nvc, nvp,
3314 			    "mcp5x_packet_complete_intr", NULL);
3315 			/*
3316 			 * since int_status was set, return DDI_INTR_CLAIMED
3317 			 * from the DDI's perspective even though the packet
3318 			 * completion may not have succeeded.  If it fails,
3319 			 * need to manually clear the interrupt, otherwise
3320 			 * clearing is implicit as a result of reading the
3321 			 * task file status register.
3322 			 */
3323 			ret = DDI_INTR_CLAIMED;
3324 			if (mcp5x_packet_complete_intr(nvc, nvp) ==
3325 			    NV_FAILURE) {
3326 				clear |= MCP5X_INT_COMPLETE;
3327 			} else {
3328 				intr_cycles = 0;
3329 			}
3330 		}
3331 
3332 		if (int_status & MCP5X_INT_DMA_SETUP) {
3333 			NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr",
3334 			    NULL);
3335 
3336 			/*
3337 			 * Needs to be cleared before starting the BM, so do it
3338 			 * now.  make sure this is still working.
3339 			 */
3340 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3341 			    MCP5X_INT_DMA_SETUP);
3342 #ifdef NCQ
3343 			ret = mcp5x_dma_setup_intr(nvc, nvp);
3344 #endif
3345 		}
3346 
3347 		if (int_status & MCP5X_INT_REM) {
3348 			clear |= MCP5X_INT_REM;
3349 			ret = DDI_INTR_CLAIMED;
3350 
3351 			mutex_enter(&nvp->nvp_mutex);
3352 			nv_link_event(nvp, NV_REM_DEV);
3353 			mutex_exit(&nvp->nvp_mutex);
3354 
3355 		} else if (int_status & MCP5X_INT_ADD) {
3356 			clear |= MCP5X_INT_ADD;
3357 			ret = DDI_INTR_CLAIMED;
3358 
3359 			mutex_enter(&nvp->nvp_mutex);
3360 			nv_link_event(nvp, NV_ADD_DEV);
3361 			mutex_exit(&nvp->nvp_mutex);
3362 		}
3363 		if (clear) {
3364 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3365 			clear = 0;
3366 		}
3367 
3368 		/*
3369 		 * protect against a stuck interrupt
3370 		 */
3371 		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3372 
3373 			NVLOG(NVDBG_INTR, nvc, nvp, "excessive interrupt "
3374 			    "processing.  Disabling interrupts int_status=%X"
3375 			    " clear=%X", int_status, clear);
3376 			DTRACE_PROBE(excessive_interrupts_f);
3377 
3378 			mutex_enter(&nvp->nvp_mutex);
3379 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3380 			/*
3381 			 * reset the device.  If it remains inaccessible
3382 			 * after a reset it will be failed then.
3383 			 */
3384 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3385 			    B_TRUE);
3386 			mutex_exit(&nvp->nvp_mutex);
3387 		}
3388 
3389 	} while (loop_cnt++ < nv_max_intr_loops);
3390 
3391 	if (loop_cnt > nvp->intr_loop_cnt) {
3392 		NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp,
3393 		    "Exiting with multiple intr loop count %d", loop_cnt);
3394 		nvp->intr_loop_cnt = loop_cnt;
3395 	}
3396 
3397 	if ((nv_debug_flags & (NVDBG_INTR | NVDBG_VERBOSE)) ==
3398 	    (NVDBG_INTR | NVDBG_VERBOSE)) {
3399 		uint8_t status, bmstatus;
3400 		uint16_t int_status2;
3401 
3402 		if (int_status & MCP5X_INT_COMPLETE) {
3403 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3404 			bmstatus = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmisx);
3405 			int_status2 = nv_get16(nvp->nvp_ctlp->nvc_bar_hdl[5],
3406 			    nvp->nvp_mcp5x_int_status);
3407 			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
3408 			    "mcp55_intr_port: Exiting with altstatus %x, "
3409 			    "bmicx %x, int_status2 %X, int_status %X, ret %x,"
3410 			    " loop_cnt %d ", status, bmstatus, int_status2,
3411 			    int_status, ret, loop_cnt);
3412 		}
3413 	}
3414 
3415 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret);
3416 
3417 	/*
3418 	 * To facilitate debugging, keep track of the length of time spent in
3419 	 * the port interrupt routine.
3420 	 */
3421 	intr_time = ddi_get_lbolt() - nvp->intr_start_time;
3422 	if (intr_time > nvp->intr_duration)
3423 		nvp->intr_duration = intr_time;
3424 
3425 	return (ret);
3426 }
3427 
3428 
3429 /* ARGSUSED */
3430 static uint_t
3431 mcp5x_intr(caddr_t arg1, caddr_t arg2)
3432 {
3433 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3434 	int ret;
3435 
3436 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3437 		return (DDI_INTR_UNCLAIMED);
3438 
3439 	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3440 	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3441 
3442 	return (ret);
3443 }
3444 
3445 
3446 #ifdef NCQ
3447 /*
3448  * with software driven NCQ on mcp5x, an interrupt occurs right
3449  * before the drive is ready to do a DMA transfer.  At this point,
3450  * the PRD table needs to be programmed and the DMA engine enabled
3451  * and ready to go.
3452  *
3453  * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3454  * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3455  * -- clear bit 0 of master command reg
3456  * -- program PRD
3457  * -- clear the interrupt status bit for the DMA Setup FIS
3458  * -- set bit 0 of the bus master command register
3459  */
3460 static int
3461 mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3462 {
3463 	int slot;
3464 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3465 	uint8_t bmicx;
3466 	int port = nvp->nvp_port_num;
3467 	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3468 	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3469 
3470 	nv_cmn_err(CE_PANIC, nvc, nvp,
3471 	    "this is should not be executed at all until NCQ");
3472 
3473 	mutex_enter(&nvp->nvp_mutex);
3474 
3475 	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3476 
3477 	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3478 
3479 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3480 	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache);
3481 
3482 	/*
3483 	 * halt the DMA engine.  This step is necessary according to
3484 	 * the mcp5x spec, probably since there may have been a "first" packet
3485 	 * that already programmed the DMA engine, but may not turn out to
3486 	 * be the first one processed.
3487 	 */
3488 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3489 
3490 	if (bmicx & BMICX_SSBM) {
3491 		NVLOG(NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3492 		    "another packet.  Cancelling and reprogramming", NULL);
3493 		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3494 	}
3495 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3496 
3497 	nv_start_dma_engine(nvp, slot);
3498 
3499 	mutex_exit(&nvp->nvp_mutex);
3500 
3501 	return (DDI_INTR_CLAIMED);
3502 }
3503 #endif /* NCQ */
3504 
3505 
3506 /*
3507  * packet completion interrupt.  If the packet is complete, invoke
3508  * the packet completion callback.
3509  */
3510 static int
3511 mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3512 {
3513 	uint8_t status, bmstatus;
3514 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3515 	int sactive;
3516 	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3517 	sata_pkt_t *spkt;
3518 	nv_slot_t *nv_slotp;
3519 
3520 	mutex_enter(&nvp->nvp_mutex);
3521 
3522 	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3523 
3524 	if (!(bmstatus & (BMISX_IDEINTS | BMISX_IDERR))) {
3525 		DTRACE_PROBE1(bmstatus_h, int, bmstatus);
3526 		NVLOG(NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set %x",
3527 		    bmstatus);
3528 		mutex_exit(&nvp->nvp_mutex);
3529 
3530 		return (NV_FAILURE);
3531 	}
3532 
3533 	/*
3534 	 * Commands may have been processed by abort or timeout before
3535 	 * interrupt processing acquired the mutex. So we may be processing
3536 	 * an interrupt for packets that were already removed.
3537 	 * For functioning NCQ processing all slots may be checked, but
3538 	 * with NCQ disabled (current code), relying on *_run flags is OK.
3539 	 */
3540 	if (nvp->nvp_non_ncq_run) {
3541 		/*
3542 		 * If the just completed item is a non-ncq command, the busy
3543 		 * bit should not be set
3544 		 */
3545 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3546 		if (status & SATA_STATUS_BSY) {
3547 			nv_cmn_err(CE_WARN, nvc, nvp,
3548 			    "unexpected SATA_STATUS_BSY set");
3549 			DTRACE_PROBE(unexpected_status_bsy_p);
3550 			mutex_exit(&nvp->nvp_mutex);
3551 			/*
3552 			 * calling function will clear interrupt.  then
3553 			 * the real interrupt will either arrive or the
3554 			 * packet timeout handling will take over and
3555 			 * reset.
3556 			 */
3557 			return (NV_FAILURE);
3558 		}
3559 		ASSERT(nvp->nvp_ncq_run == 0);
3560 	} else {
3561 		ASSERT(nvp->nvp_non_ncq_run == 0);
3562 		/*
3563 		 * Pre-NCQ code!
3564 		 * Nothing to do. The packet for the command that just
3565 		 * completed is already gone. Just clear the interrupt.
3566 		 */
3567 		(void) nv_bm_status_clear(nvp);
3568 		(void) nv_get8(nvp->nvp_cmd_hdl, nvp->nvp_status);
3569 		mutex_exit(&nvp->nvp_mutex);
3570 		return (NV_SUCCESS);
3571 
3572 		/*
3573 		 * NCQ check for BSY here and wait if still bsy before
3574 		 * continuing. Rather than wait for it to be cleared
3575 		 * when starting a packet and wasting CPU time, the starting
3576 		 * thread can exit immediate, but might have to spin here
3577 		 * for a bit possibly.  Needs more work and experimentation.
3578 		 *
3579 		 */
3580 	}
3581 
3582 	/*
3583 	 * active_pkt_bit will represent the bitmap of the single completed
3584 	 * packet.  Because of the nature of sw assisted NCQ, only one
3585 	 * command will complete per interrupt.
3586 	 */
3587 
3588 	if (ncq_command == B_FALSE) {
3589 		active_pkt = 0;
3590 	} else {
3591 		/*
3592 		 * NCQ: determine which command just completed, by examining
3593 		 * which bit cleared in the register since last written.
3594 		 */
3595 		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3596 
3597 		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3598 
3599 		ASSERT(active_pkt_bit);
3600 
3601 
3602 		/*
3603 		 * this failure path needs more work to handle the
3604 		 * error condition and recovery.
3605 		 */
3606 		if (active_pkt_bit == 0) {
3607 			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3608 
3609 			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3610 			    "nvp->nvp_sactive %X", sactive,
3611 			    nvp->nvp_sactive_cache);
3612 
3613 			(void) nv_get8(cmdhdl, nvp->nvp_status);
3614 
3615 			mutex_exit(&nvp->nvp_mutex);
3616 
3617 			return (NV_FAILURE);
3618 		}
3619 
3620 		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3621 		    active_pkt++, active_pkt_bit >>= 1) {
3622 		}
3623 
3624 		/*
3625 		 * make sure only one bit is ever turned on
3626 		 */
3627 		ASSERT(active_pkt_bit == 1);
3628 
3629 		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3630 	}
3631 
3632 	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3633 
3634 	spkt = nv_slotp->nvslot_spkt;
3635 
3636 	ASSERT(spkt != NULL);
3637 
3638 	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3639 
3640 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3641 
3642 	if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3643 
3644 		nv_complete_io(nvp, spkt, active_pkt);
3645 	}
3646 
3647 	mutex_exit(&nvp->nvp_mutex);
3648 
3649 	return (NV_SUCCESS);
3650 }
3651 
3652 
3653 static void
3654 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3655 {
3656 
3657 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3658 
3659 	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3660 		nvp->nvp_ncq_run--;
3661 	} else {
3662 		nvp->nvp_non_ncq_run--;
3663 	}
3664 
3665 	/*
3666 	 * mark the packet slot idle so it can be reused.  Do this before
3667 	 * calling satapkt_comp so the slot can be reused.
3668 	 */
3669 	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3670 
3671 	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3672 		/*
3673 		 * If this is not timed polled mode cmd, which has an
3674 		 * active thread monitoring for completion, then need
3675 		 * to signal the sleeping thread that the cmd is complete.
3676 		 */
3677 		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3678 			cv_signal(&nvp->nvp_sync_cv);
3679 		}
3680 
3681 		return;
3682 	}
3683 
3684 	if (spkt->satapkt_comp != NULL) {
3685 		mutex_exit(&nvp->nvp_mutex);
3686 		(*spkt->satapkt_comp)(spkt);
3687 		mutex_enter(&nvp->nvp_mutex);
3688 	}
3689 }
3690 
3691 
3692 /*
3693  * check whether packet is ncq command or not.  for ncq command,
3694  * start it if there is still room on queue.  for non-ncq command only
3695  * start if no other command is running.
3696  */
3697 static int
3698 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3699 {
3700 	uint8_t cmd, ncq;
3701 
3702 	NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry", NULL);
3703 
3704 	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3705 
3706 	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3707 	    (cmd == SATAC_READ_FPDMA_QUEUED));
3708 
3709 	if (ncq == B_FALSE) {
3710 
3711 		if ((nvp->nvp_non_ncq_run == 1) ||
3712 		    (nvp->nvp_ncq_run > 0)) {
3713 			/*
3714 			 * next command is non-ncq which can't run
3715 			 * concurrently.  exit and return queue full.
3716 			 */
3717 			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3718 
3719 			return (SATA_TRAN_QUEUE_FULL);
3720 		}
3721 
3722 		return (nv_start_common(nvp, spkt));
3723 	}
3724 
3725 	/*
3726 	 * ncq == B_TRUE
3727 	 */
3728 	if (nvp->nvp_non_ncq_run == 1) {
3729 		/*
3730 		 * cannot start any NCQ commands when there
3731 		 * is a non-NCQ command running.
3732 		 */
3733 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3734 
3735 		return (SATA_TRAN_QUEUE_FULL);
3736 	}
3737 
3738 #ifdef NCQ
3739 	/*
3740 	 * this is not compiled for now as satapkt_device.satadev_qdepth
3741 	 * is being pulled out until NCQ support is later addressed
3742 	 *
3743 	 * nvp_queue_depth is initialized by the first NCQ command
3744 	 * received.
3745 	 */
3746 	if (nvp->nvp_queue_depth == 1) {
3747 		nvp->nvp_queue_depth =
3748 		    spkt->satapkt_device.satadev_qdepth;
3749 
3750 		ASSERT(nvp->nvp_queue_depth > 1);
3751 
3752 		NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3753 		    "nv_process_queue: nvp_queue_depth set to %d",
3754 		    nvp->nvp_queue_depth);
3755 	}
3756 #endif
3757 
3758 	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3759 		/*
3760 		 * max number of NCQ commands already active
3761 		 */
3762 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3763 
3764 		return (SATA_TRAN_QUEUE_FULL);
3765 	}
3766 
3767 	return (nv_start_common(nvp, spkt));
3768 }
3769 
3770 
3771 /*
3772  * configure INTx and legacy interrupts
3773  */
3774 static int
3775 nv_add_legacy_intrs(nv_ctl_t *nvc)
3776 {
3777 	dev_info_t	*devinfo = nvc->nvc_dip;
3778 	int		actual, count = 0;
3779 	int		x, y, rc, inum = 0;
3780 
3781 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_legacy_intrs", NULL);
3782 
3783 	/*
3784 	 * get number of interrupts
3785 	 */
3786 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3787 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3788 		NVLOG(NVDBG_INIT, nvc, NULL,
3789 		    "ddi_intr_get_nintrs() failed, "
3790 		    "rc %d count %d", rc, count);
3791 
3792 		return (DDI_FAILURE);
3793 	}
3794 
3795 	/*
3796 	 * allocate an array of interrupt handles
3797 	 */
3798 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3799 	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3800 
3801 	/*
3802 	 * call ddi_intr_alloc()
3803 	 */
3804 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3805 	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3806 
3807 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3808 		nv_cmn_err(CE_WARN, nvc, NULL,
3809 		    "ddi_intr_alloc() failed, rc %d", rc);
3810 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3811 
3812 		return (DDI_FAILURE);
3813 	}
3814 
3815 	if (actual < count) {
3816 		nv_cmn_err(CE_WARN, nvc, NULL,
3817 		    "ddi_intr_alloc: requested: %d, received: %d",
3818 		    count, actual);
3819 
3820 		goto failure;
3821 	}
3822 
3823 	nvc->nvc_intr_cnt = actual;
3824 
3825 	/*
3826 	 * get intr priority
3827 	 */
3828 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3829 	    DDI_SUCCESS) {
3830 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3831 
3832 		goto failure;
3833 	}
3834 
3835 	/*
3836 	 * Test for high level mutex
3837 	 */
3838 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3839 		nv_cmn_err(CE_WARN, nvc, NULL,
3840 		    "nv_add_legacy_intrs: high level intr not supported");
3841 
3842 		goto failure;
3843 	}
3844 
3845 	for (x = 0; x < actual; x++) {
3846 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3847 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3848 			nv_cmn_err(CE_WARN, nvc, NULL,
3849 			    "ddi_intr_add_handler() failed");
3850 
3851 			goto failure;
3852 		}
3853 	}
3854 
3855 	/*
3856 	 * call ddi_intr_enable() for legacy interrupts
3857 	 */
3858 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3859 		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3860 	}
3861 
3862 	return (DDI_SUCCESS);
3863 
3864 	failure:
3865 	/*
3866 	 * free allocated intr and nvc_htable
3867 	 */
3868 	for (y = 0; y < actual; y++) {
3869 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3870 	}
3871 
3872 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3873 
3874 	return (DDI_FAILURE);
3875 }
3876 
3877 #ifdef	NV_MSI_SUPPORTED
3878 /*
3879  * configure MSI interrupts
3880  */
3881 static int
3882 nv_add_msi_intrs(nv_ctl_t *nvc)
3883 {
3884 	dev_info_t	*devinfo = nvc->nvc_dip;
3885 	int		count, avail, actual;
3886 	int		x, y, rc, inum = 0;
3887 
3888 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_msi_intrs", NULL);
3889 
3890 	/*
3891 	 * get number of interrupts
3892 	 */
3893 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3894 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3895 		nv_cmn_err(CE_WARN, nvc, NULL,
3896 		    "ddi_intr_get_nintrs() failed, "
3897 		    "rc %d count %d", rc, count);
3898 
3899 		return (DDI_FAILURE);
3900 	}
3901 
3902 	/*
3903 	 * get number of available interrupts
3904 	 */
3905 	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3906 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3907 		nv_cmn_err(CE_WARN, nvc, NULL,
3908 		    "ddi_intr_get_navail() failed, "
3909 		    "rc %d avail %d", rc, avail);
3910 
3911 		return (DDI_FAILURE);
3912 	}
3913 
3914 	if (avail < count) {
3915 		nv_cmn_err(CE_WARN, nvc, NULL,
3916 		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3917 		    avail, count);
3918 	}
3919 
3920 	/*
3921 	 * allocate an array of interrupt handles
3922 	 */
3923 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3924 	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3925 
3926 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3927 	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3928 
3929 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3930 		nv_cmn_err(CE_WARN, nvc, NULL,
3931 		    "ddi_intr_alloc() failed, rc %d", rc);
3932 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3933 
3934 		return (DDI_FAILURE);
3935 	}
3936 
3937 	/*
3938 	 * Use interrupt count returned or abort?
3939 	 */
3940 	if (actual < count) {
3941 		NVLOG(NVDBG_INIT, nvc, NULL,
3942 		    "Requested: %d, Received: %d", count, actual);
3943 	}
3944 
3945 	nvc->nvc_intr_cnt = actual;
3946 
3947 	/*
3948 	 * get priority for first msi, assume remaining are all the same
3949 	 */
3950 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3951 	    DDI_SUCCESS) {
3952 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3953 
3954 		goto failure;
3955 	}
3956 
3957 	/*
3958 	 * test for high level mutex
3959 	 */
3960 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3961 		nv_cmn_err(CE_WARN, nvc, NULL,
3962 		    "nv_add_msi_intrs: high level intr not supported");
3963 
3964 		goto failure;
3965 	}
3966 
3967 	/*
3968 	 * Call ddi_intr_add_handler()
3969 	 */
3970 	for (x = 0; x < actual; x++) {
3971 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3972 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3973 			nv_cmn_err(CE_WARN, nvc, NULL,
3974 			    "ddi_intr_add_handler() failed");
3975 
3976 			goto failure;
3977 		}
3978 	}
3979 
3980 	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3981 
3982 	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3983 		(void) ddi_intr_block_enable(nvc->nvc_htable,
3984 		    nvc->nvc_intr_cnt);
3985 	} else {
3986 		/*
3987 		 * Call ddi_intr_enable() for MSI non block enable
3988 		 */
3989 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3990 			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3991 		}
3992 	}
3993 
3994 	return (DDI_SUCCESS);
3995 
3996 	failure:
3997 	/*
3998 	 * free allocated intr and nvc_htable
3999 	 */
4000 	for (y = 0; y < actual; y++) {
4001 		(void) ddi_intr_free(nvc->nvc_htable[y]);
4002 	}
4003 
4004 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4005 
4006 	return (DDI_FAILURE);
4007 }
4008 #endif
4009 
4010 
4011 static void
4012 nv_rem_intrs(nv_ctl_t *nvc)
4013 {
4014 	int x, i;
4015 	nv_port_t *nvp;
4016 
4017 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_rem_intrs", NULL);
4018 
4019 	/*
4020 	 * prevent controller from generating interrupts by
4021 	 * masking them out.  This is an extra precaution.
4022 	 */
4023 	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
4024 		nvp = (&nvc->nvc_port[i]);
4025 		mutex_enter(&nvp->nvp_mutex);
4026 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
4027 		mutex_exit(&nvp->nvp_mutex);
4028 	}
4029 
4030 	/*
4031 	 * disable all interrupts
4032 	 */
4033 	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
4034 	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
4035 		(void) ddi_intr_block_disable(nvc->nvc_htable,
4036 		    nvc->nvc_intr_cnt);
4037 	} else {
4038 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4039 			(void) ddi_intr_disable(nvc->nvc_htable[x]);
4040 		}
4041 	}
4042 
4043 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4044 		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
4045 		(void) ddi_intr_free(nvc->nvc_htable[x]);
4046 	}
4047 
4048 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4049 }
4050 
4051 
4052 /*
4053  * variable argument wrapper for cmn_err.  prefixes the instance and port
4054  * number if possible
4055  */
4056 static void
4057 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, va_list ap,
4058 	boolean_t log_to_sata_ring)
4059 {
4060 	char port[NV_STR_LEN];
4061 	char inst[NV_STR_LEN];
4062 	dev_info_t *dip;
4063 
4064 	if (nvc) {
4065 		(void) snprintf(inst, NV_STR_LEN, "inst%d ",
4066 		    ddi_get_instance(nvc->nvc_dip));
4067 		dip = nvc->nvc_dip;
4068 	} else {
4069 		inst[0] = '\0';
4070 	}
4071 
4072 	if (nvp) {
4073 		(void) snprintf(port, NV_STR_LEN, "port%d",
4074 		    nvp->nvp_port_num);
4075 		dip = nvp->nvp_ctlp->nvc_dip;
4076 	} else {
4077 		port[0] = '\0';
4078 	}
4079 
4080 	mutex_enter(&nv_log_mutex);
4081 
4082 	(void) sprintf(nv_log_buf, "%s%s%s", inst, port,
4083 	    (inst[0]|port[0] ? ": " :""));
4084 
4085 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4086 	    NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4087 
4088 	/*
4089 	 * Log to console or log to file, depending on
4090 	 * nv_log_to_console setting.
4091 	 */
4092 	if (nv_log_to_console) {
4093 		if (nv_prom_print) {
4094 			prom_printf("%s\n", nv_log_buf);
4095 		} else {
4096 			cmn_err(ce, "%s\n", nv_log_buf);
4097 		}
4098 	} else {
4099 		cmn_err(ce, "!%s", nv_log_buf);
4100 	}
4101 
4102 	if (log_to_sata_ring == B_TRUE) {
4103 		(void) sprintf(nv_log_buf, "%s%s", port, (port[0] ? ": " :""));
4104 
4105 		(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4106 		    NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4107 
4108 		sata_trace_debug(dip, nv_log_buf);
4109 	}
4110 
4111 	mutex_exit(&nv_log_mutex);
4112 }
4113 
4114 
4115 /*
4116  * wrapper for cmn_err
4117  */
4118 static void
4119 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4120 {
4121 	va_list ap;
4122 
4123 	va_start(ap, fmt);
4124 	nv_vcmn_err(ce, nvc, nvp, fmt, ap, B_TRUE);
4125 	va_end(ap);
4126 }
4127 
4128 
4129 static void
4130 nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...)
4131 {
4132 	va_list ap;
4133 
4134 	if (nv_log_to_cmn_err == B_TRUE) {
4135 		va_start(ap, fmt);
4136 		nv_vcmn_err(CE_CONT, nvc, nvp, fmt, ap, B_FALSE);
4137 		va_end(ap);
4138 
4139 	}
4140 
4141 	va_start(ap, fmt);
4142 
4143 	if (nvp == NULL && nvc == NULL) {
4144 		sata_vtrace_debug(NULL, fmt, ap);
4145 		va_end(ap);
4146 
4147 		return;
4148 	}
4149 
4150 	if (nvp == NULL && nvc != NULL) {
4151 		sata_vtrace_debug(nvc->nvc_dip, fmt, ap);
4152 		va_end(ap);
4153 
4154 		return;
4155 	}
4156 
4157 	/*
4158 	 * nvp is not NULL, but nvc might be.  Reference nvp for both
4159 	 * port and dip, to get the port number prefixed on the
4160 	 * message.
4161 	 */
4162 	mutex_enter(&nv_log_mutex);
4163 
4164 	(void) snprintf(nv_log_buf, NV_LOGBUF_LEN, "port%d: %s",
4165 	    nvp->nvp_port_num, fmt);
4166 
4167 	sata_vtrace_debug(nvp->nvp_ctlp->nvc_dip, nv_log_buf, ap);
4168 
4169 	mutex_exit(&nv_log_mutex);
4170 
4171 	va_end(ap);
4172 }
4173 
4174 
4175 /*
4176  * program registers which are common to all commands
4177  */
4178 static void
4179 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
4180 {
4181 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4182 	sata_pkt_t *spkt;
4183 	sata_cmd_t *satacmd;
4184 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4185 	uint8_t cmd, ncq = B_FALSE;
4186 
4187 	spkt = nv_slotp->nvslot_spkt;
4188 	satacmd = &spkt->satapkt_cmd;
4189 	cmd = satacmd->satacmd_cmd_reg;
4190 
4191 	ASSERT(nvp->nvp_slot);
4192 
4193 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4194 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4195 		ncq = B_TRUE;
4196 	}
4197 
4198 	/*
4199 	 * select the drive
4200 	 */
4201 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4202 
4203 	/*
4204 	 * make certain the drive selected
4205 	 */
4206 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4207 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4208 
4209 		return;
4210 	}
4211 
4212 	switch (spkt->satapkt_cmd.satacmd_addr_type) {
4213 
4214 	case ATA_ADDR_LBA:
4215 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode",
4216 		    NULL);
4217 
4218 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4219 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4220 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4221 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4222 		nv_put8(cmdhdl, nvp->nvp_feature,
4223 		    satacmd->satacmd_features_reg);
4224 
4225 
4226 		break;
4227 
4228 	case ATA_ADDR_LBA28:
4229 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4230 		    "ATA_ADDR_LBA28 mode", NULL);
4231 		/*
4232 		 * NCQ only uses 48-bit addressing
4233 		 */
4234 		ASSERT(ncq != B_TRUE);
4235 
4236 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4237 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4238 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4239 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4240 		nv_put8(cmdhdl, nvp->nvp_feature,
4241 		    satacmd->satacmd_features_reg);
4242 
4243 		break;
4244 
4245 	case ATA_ADDR_LBA48:
4246 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4247 		    "ATA_ADDR_LBA48 mode", NULL);
4248 
4249 		/*
4250 		 * for NCQ, tag goes into count register and real sector count
4251 		 * into features register.  The sata module does the translation
4252 		 * in the satacmd.
4253 		 */
4254 		if (ncq == B_TRUE) {
4255 			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
4256 		} else {
4257 			nv_put8(cmdhdl, nvp->nvp_count,
4258 			    satacmd->satacmd_sec_count_msb);
4259 			nv_put8(cmdhdl, nvp->nvp_count,
4260 			    satacmd->satacmd_sec_count_lsb);
4261 		}
4262 
4263 		nv_put8(cmdhdl, nvp->nvp_feature,
4264 		    satacmd->satacmd_features_reg_ext);
4265 		nv_put8(cmdhdl, nvp->nvp_feature,
4266 		    satacmd->satacmd_features_reg);
4267 
4268 		/*
4269 		 * send the high-order half first
4270 		 */
4271 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
4272 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
4273 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
4274 
4275 		/*
4276 		 * Send the low-order half
4277 		 */
4278 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4279 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4280 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4281 
4282 		break;
4283 
4284 	case 0:
4285 		/*
4286 		 * non-media access commands such as identify and features
4287 		 * take this path.
4288 		 */
4289 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4290 		nv_put8(cmdhdl, nvp->nvp_feature,
4291 		    satacmd->satacmd_features_reg);
4292 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4293 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4294 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4295 
4296 		break;
4297 
4298 	default:
4299 		break;
4300 	}
4301 
4302 	ASSERT(nvp->nvp_slot);
4303 }
4304 
4305 
4306 /*
4307  * start a command that involves no media access
4308  */
4309 static int
4310 nv_start_nodata(nv_port_t *nvp, int slot)
4311 {
4312 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4313 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4314 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4315 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4316 
4317 	nv_program_taskfile_regs(nvp, slot);
4318 
4319 	/*
4320 	 * This next one sets the controller in motion
4321 	 */
4322 	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
4323 
4324 	return (SATA_TRAN_ACCEPTED);
4325 }
4326 
4327 
4328 static int
4329 nv_bm_status_clear(nv_port_t *nvp)
4330 {
4331 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4332 	uchar_t	status, ret;
4333 
4334 	/*
4335 	 * Get the current BM status
4336 	 */
4337 	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
4338 
4339 	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
4340 
4341 	/*
4342 	 * Clear the latches (and preserve the other bits)
4343 	 */
4344 	nv_put8(bmhdl, nvp->nvp_bmisx, status);
4345 
4346 	return (ret);
4347 }
4348 
4349 
4350 /*
4351  * program the bus master DMA engine with the PRD address for
4352  * the active slot command, and start the DMA engine.
4353  */
4354 static void
4355 nv_start_dma_engine(nv_port_t *nvp, int slot)
4356 {
4357 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4358 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4359 	uchar_t direction;
4360 
4361 	ASSERT(nv_slotp->nvslot_spkt != NULL);
4362 
4363 	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4364 	    == SATA_DIR_READ) {
4365 		direction = BMICX_RWCON_WRITE_TO_MEMORY;
4366 	} else {
4367 		direction = BMICX_RWCON_READ_FROM_MEMORY;
4368 	}
4369 
4370 	NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4371 	    "nv_start_dma_engine entered", NULL);
4372 
4373 #if NOT_USED
4374 	/*
4375 	 * NOT NEEDED. Left here of historical reason.
4376 	 * Reset the controller's interrupt and error status bits.
4377 	 */
4378 	(void) nv_bm_status_clear(nvp);
4379 #endif
4380 	/*
4381 	 * program the PRD table physical start address
4382 	 */
4383 	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4384 
4385 	/*
4386 	 * set the direction control and start the DMA controller
4387 	 */
4388 	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4389 }
4390 
4391 /*
4392  * start dma command, either in or out
4393  */
4394 static int
4395 nv_start_dma(nv_port_t *nvp, int slot)
4396 {
4397 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4398 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4399 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4400 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4401 	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4402 #ifdef NCQ
4403 	uint8_t ncq = B_FALSE;
4404 #endif
4405 	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4406 	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4407 	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4408 	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
4409 
4410 	ASSERT(sg_count != 0);
4411 
4412 	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4413 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4414 		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4415 		    sata_cmdp->satacmd_num_dma_cookies);
4416 
4417 		return (NV_FAILURE);
4418 	}
4419 
4420 	nv_program_taskfile_regs(nvp, slot);
4421 
4422 	/*
4423 	 * start the drive in motion
4424 	 */
4425 	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4426 
4427 	/*
4428 	 * the drive starts processing the transaction when the cmd register
4429 	 * is written.  This is done here before programming the DMA engine to
4430 	 * parallelize and save some time.  In the event that the drive is ready
4431 	 * before DMA, it will wait.
4432 	 */
4433 #ifdef NCQ
4434 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4435 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4436 		ncq = B_TRUE;
4437 	}
4438 #endif
4439 
4440 	/*
4441 	 * copy the PRD list to PRD table in DMA accessible memory
4442 	 * so that the controller can access it.
4443 	 */
4444 	for (idx = 0; idx < sg_count; idx++, srcp++) {
4445 		uint32_t size;
4446 
4447 		nv_put32(sghdl, dstp++, srcp->dmac_address);
4448 
4449 		/* Set the number of bytes to transfer, 0 implies 64KB */
4450 		size = srcp->dmac_size;
4451 		if (size == 0x10000)
4452 			size = 0;
4453 
4454 		/*
4455 		 * If this is a 40-bit address, copy bits 32-40 of the
4456 		 * physical address to bits 16-24 of the PRD count.
4457 		 */
4458 		if (srcp->dmac_laddress > UINT32_MAX) {
4459 			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4460 		}
4461 
4462 		/*
4463 		 * set the end of table flag for the last entry
4464 		 */
4465 		if (idx == (sg_count - 1)) {
4466 			size |= PRDE_EOT;
4467 		}
4468 
4469 		nv_put32(sghdl, dstp++, size);
4470 	}
4471 
4472 	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4473 	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4474 
4475 	nv_start_dma_engine(nvp, slot);
4476 
4477 #ifdef NCQ
4478 	/*
4479 	 * optimization:  for SWNCQ, start DMA engine if this is the only
4480 	 * command running.  Preliminary NCQ efforts indicated this needs
4481 	 * more debugging.
4482 	 *
4483 	 * if (nvp->nvp_ncq_run <= 1)
4484 	 */
4485 
4486 	if (ncq == B_FALSE) {
4487 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4488 		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4489 		    " cmd = %X", non_ncq_commands++, cmd);
4490 		nv_start_dma_engine(nvp, slot);
4491 	} else {
4492 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "NCQ, so program "
4493 		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd);
4494 	}
4495 #endif /* NCQ */
4496 
4497 	return (SATA_TRAN_ACCEPTED);
4498 }
4499 
4500 
4501 /*
4502  * start a PIO data-in ATA command
4503  */
4504 static int
4505 nv_start_pio_in(nv_port_t *nvp, int slot)
4506 {
4507 
4508 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4509 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4510 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4511 
4512 	nv_program_taskfile_regs(nvp, slot);
4513 
4514 	/*
4515 	 * This next one sets the drive in motion
4516 	 */
4517 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4518 
4519 	return (SATA_TRAN_ACCEPTED);
4520 }
4521 
4522 
4523 /*
4524  * start a PIO data-out ATA command
4525  */
4526 static int
4527 nv_start_pio_out(nv_port_t *nvp, int slot)
4528 {
4529 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4530 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4531 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4532 
4533 	nv_program_taskfile_regs(nvp, slot);
4534 
4535 	/*
4536 	 * this next one sets the drive in motion
4537 	 */
4538 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4539 
4540 	/*
4541 	 * wait for the busy bit to settle
4542 	 */
4543 	NV_DELAY_NSEC(400);
4544 
4545 	/*
4546 	 * wait for the drive to assert DRQ to send the first chunk
4547 	 * of data. Have to busy wait because there's no interrupt for
4548 	 * the first chunk. This is bad... uses a lot of cycles if the
4549 	 * drive responds too slowly or if the wait loop granularity
4550 	 * is too large. It's even worse if the drive is defective and
4551 	 * the loop times out.
4552 	 */
4553 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4554 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4555 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4556 	    4000000, 0) == B_FALSE) {
4557 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4558 
4559 		goto error;
4560 	}
4561 
4562 	/*
4563 	 * send the first block.
4564 	 */
4565 	nv_intr_pio_out(nvp, nv_slotp);
4566 
4567 	/*
4568 	 * If nvslot_flags is not set to COMPLETE yet, then processing
4569 	 * is OK so far, so return.  Otherwise, fall into error handling
4570 	 * below.
4571 	 */
4572 	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4573 
4574 		return (SATA_TRAN_ACCEPTED);
4575 	}
4576 
4577 	error:
4578 	/*
4579 	 * there was an error so reset the device and complete the packet.
4580 	 */
4581 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4582 	nv_complete_io(nvp, spkt, 0);
4583 	nv_reset(nvp, "pio_out");
4584 
4585 	return (SATA_TRAN_PORT_ERROR);
4586 }
4587 
4588 
4589 /*
4590  * start a ATAPI Packet command (PIO data in or out)
4591  */
4592 static int
4593 nv_start_pkt_pio(nv_port_t *nvp, int slot)
4594 {
4595 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4596 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4597 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4598 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4599 
4600 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4601 	    "nv_start_pkt_pio: start", NULL);
4602 
4603 	/*
4604 	 * Write the PACKET command to the command register.  Normally
4605 	 * this would be done through nv_program_taskfile_regs().  It
4606 	 * is done here because some values need to be overridden.
4607 	 */
4608 
4609 	/* select the drive */
4610 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4611 
4612 	/* make certain the drive selected */
4613 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4614 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4615 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4616 		    "nv_start_pkt_pio: drive select failed", NULL);
4617 		return (SATA_TRAN_PORT_ERROR);
4618 	}
4619 
4620 	/*
4621 	 * The command is always sent via PIO, despite whatever the SATA
4622 	 * common module sets in the command.  Overwrite the DMA bit to do this.
4623 	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4624 	 */
4625 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4626 
4627 	/* set appropriately by the sata common module */
4628 	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4629 	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4630 	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4631 	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4632 
4633 	/* initiate the command by writing the command register last */
4634 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4635 
4636 	/* Give the host controller time to do its thing */
4637 	NV_DELAY_NSEC(400);
4638 
4639 	/*
4640 	 * Wait for the device to indicate that it is ready for the command
4641 	 * ATAPI protocol state - HP0: Check_Status_A
4642 	 */
4643 
4644 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4645 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4646 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4647 	    4000000, 0) == B_FALSE) {
4648 		/*
4649 		 * Either an error or device fault occurred or the wait
4650 		 * timed out.  According to the ATAPI protocol, command
4651 		 * completion is also possible.  Other implementations of
4652 		 * this protocol don't handle this last case, so neither
4653 		 * does this code.
4654 		 */
4655 
4656 		if (nv_get8(cmdhdl, nvp->nvp_status) &
4657 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4658 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4659 
4660 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4661 			    "nv_start_pkt_pio: device error (HP0)", NULL);
4662 		} else {
4663 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4664 
4665 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4666 			    "nv_start_pkt_pio: timeout (HP0)", NULL);
4667 		}
4668 
4669 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4670 		nv_complete_io(nvp, spkt, 0);
4671 		nv_reset(nvp, "start_pkt_pio");
4672 
4673 		return (SATA_TRAN_PORT_ERROR);
4674 	}
4675 
4676 	/*
4677 	 * Put the ATAPI command in the data register
4678 	 * ATAPI protocol state - HP1: Send_Packet
4679 	 */
4680 
4681 	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4682 	    (ushort_t *)nvp->nvp_data,
4683 	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4684 
4685 	/*
4686 	 * See you in nv_intr_pkt_pio.
4687 	 * ATAPI protocol state - HP3: INTRQ_wait
4688 	 */
4689 
4690 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4691 	    "nv_start_pkt_pio: exiting into HP3", NULL);
4692 
4693 	return (SATA_TRAN_ACCEPTED);
4694 }
4695 
4696 
4697 /*
4698  * Interrupt processing for a non-data ATA command.
4699  */
4700 static void
4701 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4702 {
4703 	uchar_t status;
4704 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4705 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4706 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4707 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4708 
4709 	NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered", NULL);
4710 
4711 	status = nv_get8(cmdhdl, nvp->nvp_status);
4712 
4713 	/*
4714 	 * check for errors
4715 	 */
4716 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4717 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4718 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4719 		    nvp->nvp_altstatus);
4720 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4721 	} else {
4722 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4723 	}
4724 
4725 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4726 }
4727 
4728 
4729 /*
4730  * ATA command, PIO data in
4731  */
4732 static void
4733 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4734 {
4735 	uchar_t	status;
4736 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4737 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4738 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4739 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4740 	int count;
4741 
4742 	status = nv_get8(cmdhdl, nvp->nvp_status);
4743 
4744 	if (status & SATA_STATUS_BSY) {
4745 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4746 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4747 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4748 		    nvp->nvp_altstatus);
4749 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4750 		nv_reset(nvp, "intr_pio_in");
4751 
4752 		return;
4753 	}
4754 
4755 	/*
4756 	 * check for errors
4757 	 */
4758 	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4759 	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4760 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4761 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4762 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4763 
4764 		return;
4765 	}
4766 
4767 	/*
4768 	 * read the next chunk of data (if any)
4769 	 */
4770 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4771 
4772 	/*
4773 	 * read count bytes
4774 	 */
4775 	ASSERT(count != 0);
4776 
4777 	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4778 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4779 
4780 	nv_slotp->nvslot_v_addr += count;
4781 	nv_slotp->nvslot_byte_count -= count;
4782 
4783 
4784 	if (nv_slotp->nvslot_byte_count != 0) {
4785 		/*
4786 		 * more to transfer.  Wait for next interrupt.
4787 		 */
4788 		return;
4789 	}
4790 
4791 	/*
4792 	 * transfer is complete. wait for the busy bit to settle.
4793 	 */
4794 	NV_DELAY_NSEC(400);
4795 
4796 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4797 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4798 }
4799 
4800 
4801 /*
4802  * ATA command PIO data out
4803  */
4804 static void
4805 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4806 {
4807 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4808 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4809 	uchar_t status;
4810 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4811 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4812 	int count;
4813 
4814 	/*
4815 	 * clear the IRQ
4816 	 */
4817 	status = nv_get8(cmdhdl, nvp->nvp_status);
4818 
4819 	if (status & SATA_STATUS_BSY) {
4820 		/*
4821 		 * this should not happen
4822 		 */
4823 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4824 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4825 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4826 		    nvp->nvp_altstatus);
4827 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4828 
4829 		return;
4830 	}
4831 
4832 	/*
4833 	 * check for errors
4834 	 */
4835 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4836 		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4837 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4838 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4839 
4840 		return;
4841 	}
4842 
4843 	/*
4844 	 * this is the condition which signals the drive is
4845 	 * no longer ready to transfer.  Likely that the transfer
4846 	 * completed successfully, but check that byte_count is
4847 	 * zero.
4848 	 */
4849 	if ((status & SATA_STATUS_DRQ) == 0) {
4850 
4851 		if (nv_slotp->nvslot_byte_count == 0) {
4852 			/*
4853 			 * complete; successful transfer
4854 			 */
4855 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4856 		} else {
4857 			/*
4858 			 * error condition, incomplete transfer
4859 			 */
4860 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4861 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4862 		}
4863 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4864 
4865 		return;
4866 	}
4867 
4868 	/*
4869 	 * write the next chunk of data
4870 	 */
4871 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4872 
4873 	/*
4874 	 * read or write count bytes
4875 	 */
4876 
4877 	ASSERT(count != 0);
4878 
4879 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4880 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4881 
4882 	nv_slotp->nvslot_v_addr += count;
4883 	nv_slotp->nvslot_byte_count -= count;
4884 }
4885 
4886 
4887 /*
4888  * ATAPI PACKET command, PIO in/out interrupt
4889  *
4890  * Under normal circumstances, one of four different interrupt scenarios
4891  * will result in this function being called:
4892  *
4893  * 1. Packet command data transfer
4894  * 2. Packet command completion
4895  * 3. Request sense data transfer
4896  * 4. Request sense command completion
4897  */
4898 static void
4899 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4900 {
4901 	uchar_t	status;
4902 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4903 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4904 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4905 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4906 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4907 	uint16_t ctlr_count;
4908 	int count;
4909 
4910 	/* ATAPI protocol state - HP2: Check_Status_B */
4911 
4912 	status = nv_get8(cmdhdl, nvp->nvp_status);
4913 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4914 	    "nv_intr_pkt_pio: status 0x%x", status);
4915 
4916 	if (status & SATA_STATUS_BSY) {
4917 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4918 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4919 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4920 		} else {
4921 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4922 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4923 			nv_reset(nvp, "intr_pkt_pio");
4924 		}
4925 
4926 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4927 		    "nv_intr_pkt_pio: busy - status 0x%x", status);
4928 
4929 		return;
4930 	}
4931 
4932 	if ((status & SATA_STATUS_DF) != 0) {
4933 		/*
4934 		 * On device fault, just clean up and bail.  Request sense
4935 		 * will just default to its NO SENSE initialized value.
4936 		 */
4937 
4938 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4939 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4940 		}
4941 
4942 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4943 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4944 
4945 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4946 		    nvp->nvp_altstatus);
4947 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4948 		    nvp->nvp_error);
4949 
4950 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4951 		    "nv_intr_pkt_pio: device fault", NULL);
4952 
4953 		return;
4954 	}
4955 
4956 	if ((status & SATA_STATUS_ERR) != 0) {
4957 		/*
4958 		 * On command error, figure out whether we are processing a
4959 		 * request sense.  If so, clean up and bail.  Otherwise,
4960 		 * do a REQUEST SENSE.
4961 		 */
4962 
4963 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4964 			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4965 			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4966 			    NV_FAILURE) {
4967 				nv_copy_registers(nvp, &spkt->satapkt_device,
4968 				    spkt);
4969 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4970 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4971 			}
4972 
4973 			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4974 			    nvp->nvp_altstatus);
4975 			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4976 			    nvp->nvp_error);
4977 		} else {
4978 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4979 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4980 
4981 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4982 		}
4983 
4984 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4985 		    "nv_intr_pkt_pio: error (status 0x%x)", status);
4986 
4987 		return;
4988 	}
4989 
4990 	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4991 		/*
4992 		 * REQUEST SENSE command processing
4993 		 */
4994 
4995 		if ((status & (SATA_STATUS_DRQ)) != 0) {
4996 			/* ATAPI state - HP4: Transfer_Data */
4997 
4998 			/* read the byte count from the controller */
4999 			ctlr_count =
5000 			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5001 			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5002 
5003 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5004 			    "nv_intr_pkt_pio: ctlr byte count - %d",
5005 			    ctlr_count);
5006 
5007 			if (ctlr_count == 0) {
5008 				/* no data to transfer - some devices do this */
5009 
5010 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5011 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5012 
5013 				NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5014 				    "nv_intr_pkt_pio: done (no data)", NULL);
5015 
5016 				return;
5017 			}
5018 
5019 			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
5020 
5021 			/* transfer the data */
5022 			ddi_rep_get16(cmdhdl,
5023 			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
5024 			    (ushort_t *)nvp->nvp_data, (count >> 1),
5025 			    DDI_DEV_NO_AUTOINCR);
5026 
5027 			/* consume residual bytes */
5028 			ctlr_count -= count;
5029 
5030 			if (ctlr_count > 0) {
5031 				for (; ctlr_count > 0; ctlr_count -= 2)
5032 					(void) ddi_get16(cmdhdl,
5033 					    (ushort_t *)nvp->nvp_data);
5034 			}
5035 
5036 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5037 			    "nv_intr_pkt_pio: transition to HP2", NULL);
5038 		} else {
5039 			/* still in ATAPI state - HP2 */
5040 
5041 			/*
5042 			 * In order to avoid clobbering the rqsense data
5043 			 * set by the SATA common module, the sense data read
5044 			 * from the device is put in a separate buffer and
5045 			 * copied into the packet after the request sense
5046 			 * command successfully completes.
5047 			 */
5048 			bcopy(nv_slotp->nvslot_rqsense_buff,
5049 			    spkt->satapkt_cmd.satacmd_rqsense,
5050 			    SATA_ATAPI_RQSENSE_LEN);
5051 
5052 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5053 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5054 
5055 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5056 			    "nv_intr_pkt_pio: request sense done", NULL);
5057 		}
5058 
5059 		return;
5060 	}
5061 
5062 	/*
5063 	 * Normal command processing
5064 	 */
5065 
5066 	if ((status & (SATA_STATUS_DRQ)) != 0) {
5067 		/* ATAPI protocol state - HP4: Transfer_Data */
5068 
5069 		/* read the byte count from the controller */
5070 		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5071 		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5072 
5073 		if (ctlr_count == 0) {
5074 			/* no data to transfer - some devices do this */
5075 
5076 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
5077 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5078 
5079 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5080 			    "nv_intr_pkt_pio: done (no data)", NULL);
5081 
5082 			return;
5083 		}
5084 
5085 		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
5086 
5087 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5088 		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count);
5089 
5090 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5091 		    "nv_intr_pkt_pio: byte_count 0x%x",
5092 		    nv_slotp->nvslot_byte_count);
5093 
5094 		/* transfer the data */
5095 
5096 		if (direction == SATA_DIR_READ) {
5097 			ddi_rep_get16(cmdhdl,
5098 			    (ushort_t *)nv_slotp->nvslot_v_addr,
5099 			    (ushort_t *)nvp->nvp_data, (count >> 1),
5100 			    DDI_DEV_NO_AUTOINCR);
5101 
5102 			ctlr_count -= count;
5103 
5104 			if (ctlr_count > 0) {
5105 				/* consume remaining bytes */
5106 
5107 				for (; ctlr_count > 0;
5108 				    ctlr_count -= 2)
5109 					(void) ddi_get16(cmdhdl,
5110 					    (ushort_t *)nvp->nvp_data);
5111 
5112 				NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5113 				    "nv_intr_pkt_pio: bytes remained", NULL);
5114 			}
5115 		} else {
5116 			ddi_rep_put16(cmdhdl,
5117 			    (ushort_t *)nv_slotp->nvslot_v_addr,
5118 			    (ushort_t *)nvp->nvp_data, (count >> 1),
5119 			    DDI_DEV_NO_AUTOINCR);
5120 		}
5121 
5122 		nv_slotp->nvslot_v_addr += count;
5123 		nv_slotp->nvslot_byte_count -= count;
5124 
5125 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5126 		    "nv_intr_pkt_pio: transition to HP2", NULL);
5127 	} else {
5128 		/* still in ATAPI state - HP2 */
5129 
5130 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
5131 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5132 
5133 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5134 		    "nv_intr_pkt_pio: done", NULL);
5135 	}
5136 }
5137 
5138 
5139 /*
5140  * ATA command, DMA data in/out
5141  */
5142 static void
5143 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
5144 {
5145 	uchar_t status;
5146 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5147 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
5148 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5149 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5150 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
5151 	uchar_t	bmicx;
5152 	uchar_t bm_status;
5153 
5154 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5155 
5156 	/*
5157 	 * stop DMA engine.
5158 	 */
5159 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
5160 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
5161 
5162 	/*
5163 	 * get the status and clear the IRQ, and check for DMA error
5164 	 */
5165 	status = nv_get8(cmdhdl, nvp->nvp_status);
5166 
5167 	/*
5168 	 * check for drive errors
5169 	 */
5170 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
5171 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5172 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5173 		(void) nv_bm_status_clear(nvp);
5174 
5175 		return;
5176 	}
5177 
5178 	bm_status = nv_bm_status_clear(nvp);
5179 
5180 	/*
5181 	 * check for bus master errors
5182 	 */
5183 
5184 	if (bm_status & BMISX_IDERR) {
5185 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
5186 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
5187 		    nvp->nvp_altstatus);
5188 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5189 		nv_reset(nvp, "intr_dma");
5190 
5191 		return;
5192 	}
5193 
5194 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
5195 }
5196 
5197 
5198 /*
5199  * Wait for a register of a controller to achieve a specific state.
5200  * To return normally, all the bits in the first sub-mask must be ON,
5201  * all the bits in the second sub-mask must be OFF.
5202  * If timeout_usec microseconds pass without the controller achieving
5203  * the desired bit configuration, return TRUE, else FALSE.
5204  *
5205  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5206  * occur for the first 250 us, then switch over to a sleeping wait.
5207  *
5208  */
5209 int
5210 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
5211     int type_wait)
5212 {
5213 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5214 	hrtime_t end, cur, start_sleep, start;
5215 	int first_time = B_TRUE;
5216 	ushort_t val;
5217 
5218 	for (;;) {
5219 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5220 
5221 		if ((val & onbits) == onbits && (val & offbits) == 0) {
5222 
5223 			return (B_TRUE);
5224 		}
5225 
5226 		cur = gethrtime();
5227 
5228 		/*
5229 		 * store the start time and calculate the end
5230 		 * time.  also calculate "start_sleep" which is
5231 		 * the point after which the driver will stop busy
5232 		 * waiting and change to sleep waiting.
5233 		 */
5234 		if (first_time) {
5235 			first_time = B_FALSE;
5236 			/*
5237 			 * start and end are in nanoseconds
5238 			 */
5239 			start = cur;
5240 			end = start + timeout_usec * 1000;
5241 			/*
5242 			 * add 1 ms to start
5243 			 */
5244 			start_sleep =  start + 250000;
5245 
5246 			if (servicing_interrupt()) {
5247 				type_wait = NV_NOSLEEP;
5248 			}
5249 		}
5250 
5251 		if (cur > end) {
5252 
5253 			break;
5254 		}
5255 
5256 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5257 #if ! defined(__lock_lint)
5258 			delay(1);
5259 #endif
5260 		} else {
5261 			drv_usecwait(nv_usec_delay);
5262 		}
5263 	}
5264 
5265 	return (B_FALSE);
5266 }
5267 
5268 
5269 /*
5270  * This is a slightly more complicated version that checks
5271  * for error conditions and bails-out rather than looping
5272  * until the timeout is exceeded.
5273  *
5274  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5275  * occur for the first 250 us, then switch over to a sleeping wait.
5276  */
5277 int
5278 nv_wait3(
5279 	nv_port_t	*nvp,
5280 	uchar_t		onbits1,
5281 	uchar_t		offbits1,
5282 	uchar_t		failure_onbits2,
5283 	uchar_t		failure_offbits2,
5284 	uchar_t		failure_onbits3,
5285 	uchar_t		failure_offbits3,
5286 	uint_t		timeout_usec,
5287 	int		type_wait)
5288 {
5289 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5290 	hrtime_t end, cur, start_sleep, start;
5291 	int first_time = B_TRUE;
5292 	ushort_t val;
5293 
5294 	for (;;) {
5295 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5296 
5297 		/*
5298 		 * check for expected condition
5299 		 */
5300 		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
5301 
5302 			return (B_TRUE);
5303 		}
5304 
5305 		/*
5306 		 * check for error conditions
5307 		 */
5308 		if ((val & failure_onbits2) == failure_onbits2 &&
5309 		    (val & failure_offbits2) == 0) {
5310 
5311 			return (B_FALSE);
5312 		}
5313 
5314 		if ((val & failure_onbits3) == failure_onbits3 &&
5315 		    (val & failure_offbits3) == 0) {
5316 
5317 			return (B_FALSE);
5318 		}
5319 
5320 		/*
5321 		 * store the start time and calculate the end
5322 		 * time.  also calculate "start_sleep" which is
5323 		 * the point after which the driver will stop busy
5324 		 * waiting and change to sleep waiting.
5325 		 */
5326 		if (first_time) {
5327 			first_time = B_FALSE;
5328 			/*
5329 			 * start and end are in nanoseconds
5330 			 */
5331 			cur = start = gethrtime();
5332 			end = start + timeout_usec * 1000;
5333 			/*
5334 			 * add 1 ms to start
5335 			 */
5336 			start_sleep =  start + 250000;
5337 
5338 			if (servicing_interrupt()) {
5339 				type_wait = NV_NOSLEEP;
5340 			}
5341 		} else {
5342 			cur = gethrtime();
5343 		}
5344 
5345 		if (cur > end) {
5346 
5347 			break;
5348 		}
5349 
5350 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5351 #if ! defined(__lock_lint)
5352 			delay(1);
5353 #endif
5354 		} else {
5355 			drv_usecwait(nv_usec_delay);
5356 		}
5357 	}
5358 
5359 	return (B_FALSE);
5360 }
5361 
5362 
5363 /*
5364  * nv_port_state_change() reports the state of the port to the
5365  * sata module by calling sata_hba_event_notify().  This
5366  * function is called any time the state of the port is changed
5367  */
5368 static void
5369 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5370 {
5371 	sata_device_t sd;
5372 
5373 	NVLOG(NVDBG_EVENT, nvp->nvp_ctlp, nvp,
5374 	    "nv_port_state_change: event 0x%x type 0x%x state 0x%x "
5375 	    "lbolt %ld (ticks)", event, addr_type, state, ddi_get_lbolt());
5376 
5377 	if (ddi_in_panic() != 0) {
5378 
5379 		return;
5380 	}
5381 
5382 	bzero((void *)&sd, sizeof (sata_device_t));
5383 	sd.satadev_rev = SATA_DEVICE_REV;
5384 	nv_copy_registers(nvp, &sd, NULL);
5385 
5386 	/*
5387 	 * When NCQ is implemented sactive and snotific field need to be
5388 	 * updated.
5389 	 */
5390 	sd.satadev_addr.cport = nvp->nvp_port_num;
5391 	sd.satadev_addr.qual = addr_type;
5392 	sd.satadev_state = state;
5393 
5394 	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5395 }
5396 
5397 
5398 /*
5399  * Monitor reset progress and signature gathering.
5400  */
5401 static clock_t
5402 nv_monitor_reset(nv_port_t *nvp)
5403 {
5404 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5405 	uint32_t sstatus;
5406 
5407 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
5408 
5409 	sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5410 
5411 	/*
5412 	 * Check the link status. The link needs to be active before
5413 	 * checking the link's status.
5414 	 */
5415 	if ((SSTATUS_GET_IPM(sstatus) != SSTATUS_IPM_ACTIVE) ||
5416 	    (SSTATUS_GET_DET(sstatus) != SSTATUS_DET_DEVPRE_PHYCOM)) {
5417 		/*
5418 		 * Either link is not active or there is no device
5419 		 * If the link remains down for more than NV_LINK_EVENT_DOWN
5420 		 * (milliseconds), abort signature acquisition and complete
5421 		 * reset processing.  The link will go down when COMRESET is
5422 		 * sent by nv_reset().
5423 		 */
5424 
5425 		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5426 		    NV_LINK_EVENT_DOWN) {
5427 
5428 			nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5429 			    "nv_monitor_reset: no link - ending signature "
5430 			    "acquisition; time after reset %ldms",
5431 			    TICK_TO_MSEC(ddi_get_lbolt() -
5432 			    nvp->nvp_reset_time));
5433 
5434 			DTRACE_PROBE(no_link_reset_giving_up_f);
5435 
5436 			/*
5437 			 * If the drive was previously present and configured
5438 			 * and then subsequently removed, then send a removal
5439 			 * event to sata common module.
5440 			 */
5441 			if (nvp->nvp_type != SATA_DTYPE_NONE) {
5442 				nv_port_state_change(nvp,
5443 				    SATA_EVNT_DEVICE_DETACHED,
5444 				    SATA_ADDR_CPORT, 0);
5445 			}
5446 
5447 			nvp->nvp_type = SATA_DTYPE_NONE;
5448 			nvp->nvp_signature = NV_NO_SIG;
5449 			nvp->nvp_state &= ~(NV_DEACTIVATED);
5450 
5451 #ifdef SGPIO_SUPPORT
5452 			nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5453 			    SGP_CTLR_PORT_TO_DRV(
5454 			    nvp->nvp_ctlp->nvc_ctlr_num,
5455 			    nvp->nvp_port_num));
5456 #endif
5457 
5458 			cv_signal(&nvp->nvp_reset_cv);
5459 
5460 			return (0);
5461 		}
5462 
5463 		DTRACE_PROBE(link_lost_reset_keep_trying_p);
5464 
5465 		return (nvp->nvp_wait_sig);
5466 	}
5467 
5468 	NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5469 	    "nv_monitor_reset: link up.  time since reset %ldms",
5470 	    TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time));
5471 
5472 	nv_read_signature(nvp);
5473 
5474 
5475 	if (nvp->nvp_signature != NV_NO_SIG) {
5476 		/*
5477 		 * signature has been acquired, send the appropriate
5478 		 * event to the sata common module.
5479 		 */
5480 		if (nvp->nvp_state & (NV_ATTACH|NV_HOTPLUG)) {
5481 			char *source;
5482 
5483 			if (nvp->nvp_state & NV_HOTPLUG) {
5484 
5485 				source = "hotplugged";
5486 				nv_port_state_change(nvp,
5487 				    SATA_EVNT_DEVICE_ATTACHED,
5488 				    SATA_ADDR_CPORT, SATA_DSTATE_PWR_ACTIVE);
5489 				DTRACE_PROBE1(got_sig_for_hotplugged_device_h,
5490 				    int, nvp->nvp_state);
5491 
5492 			} else {
5493 				source = "activated or attached";
5494 				DTRACE_PROBE1(got_sig_for_existing_device_h,
5495 				    int, nvp->nvp_state);
5496 			}
5497 
5498 			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5499 			    "signature acquired for %s device. sig:"
5500 			    " 0x%x state: 0x%x nvp_type: 0x%x", source,
5501 			    nvp->nvp_signature, nvp->nvp_state, nvp->nvp_type);
5502 
5503 
5504 			nvp->nvp_state &= ~(NV_RESET|NV_ATTACH|NV_HOTPLUG);
5505 
5506 #ifdef SGPIO_SUPPORT
5507 			if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5508 				nv_sgp_drive_connect(nvp->nvp_ctlp,
5509 				    SGP_CTLR_PORT_TO_DRV(
5510 				    nvp->nvp_ctlp->nvc_ctlr_num,
5511 				    nvp->nvp_port_num));
5512 			} else {
5513 				nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5514 				    SGP_CTLR_PORT_TO_DRV(
5515 				    nvp->nvp_ctlp->nvc_ctlr_num,
5516 				    nvp->nvp_port_num));
5517 			}
5518 #endif
5519 
5520 			cv_signal(&nvp->nvp_reset_cv);
5521 
5522 			return (0);
5523 		}
5524 
5525 		/*
5526 		 * Since this was not an attach, it was a reset of an
5527 		 * existing device
5528 		 */
5529 		nvp->nvp_state &= ~NV_RESET;
5530 		nvp->nvp_state |= NV_RESTORE;
5531 
5532 
5533 
5534 		DTRACE_PROBE(got_signature_reset_complete_p);
5535 		DTRACE_PROBE1(nvp_signature_h, int, nvp->nvp_signature);
5536 		DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
5537 
5538 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5539 		    "signature acquired reset complete. sig: 0x%x"
5540 		    " state: 0x%x", nvp->nvp_signature, nvp->nvp_state);
5541 
5542 		/*
5543 		 * interrupts may have been disabled so just make sure
5544 		 * they are cleared and re-enabled.
5545 		 */
5546 
5547 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5548 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5549 
5550 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_RESET,
5551 		    SATA_ADDR_DCPORT,
5552 		    SATA_DSTATE_RESET | SATA_DSTATE_PWR_ACTIVE);
5553 
5554 		return (0);
5555 	}
5556 
5557 
5558 	if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >
5559 	    NV_RETRY_RESET_SIG) {
5560 
5561 
5562 		if (nvp->nvp_reset_retry_count >= NV_MAX_RESET_RETRY) {
5563 
5564 			nvp->nvp_state |= NV_FAILED;
5565 			nvp->nvp_state &= ~(NV_RESET|NV_ATTACH|NV_HOTPLUG);
5566 
5567 			DTRACE_PROBE(reset_exceeded_waiting_for_sig_p);
5568 			DTRACE_PROBE(reset_exceeded_waiting_for_sig_f);
5569 			DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
5570 			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5571 			    "reset time exceeded waiting for sig nvp_state %x",
5572 			    nvp->nvp_state);
5573 
5574 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
5575 			    SATA_ADDR_CPORT, 0);
5576 
5577 			cv_signal(&nvp->nvp_reset_cv);
5578 
5579 			return (0);
5580 		}
5581 
5582 		nv_reset(nvp, "retry");
5583 
5584 		return (nvp->nvp_wait_sig);
5585 	}
5586 
5587 	/*
5588 	 * signature not received, keep trying
5589 	 */
5590 	DTRACE_PROBE(no_sig_keep_waiting_p);
5591 
5592 	/*
5593 	 * double the wait time for sig since the last try but cap it off at
5594 	 * 1 second.
5595 	 */
5596 	nvp->nvp_wait_sig = nvp->nvp_wait_sig * 2;
5597 
5598 	return (nvp->nvp_wait_sig > NV_ONE_SEC ? NV_ONE_SEC :
5599 	    nvp->nvp_wait_sig);
5600 }
5601 
5602 
5603 /*
5604  * timeout processing:
5605  *
5606  * Check if any packets have crossed a timeout threshold.  If so,
5607  * abort the packet.  This function is not NCQ-aware.
5608  *
5609  * If reset is in progress, call reset monitoring function.
5610  *
5611  * Timeout frequency may be lower for checking packet timeout
5612  * and higher for reset monitoring.
5613  *
5614  */
5615 static void
5616 nv_timeout(void *arg)
5617 {
5618 	nv_port_t *nvp = arg;
5619 	nv_slot_t *nv_slotp;
5620 	clock_t next_timeout_us = NV_ONE_SEC;
5621 	uint16_t int_status;
5622 	uint8_t status, bmstatus;
5623 	static int intr_warn_once = 0;
5624 	uint32_t serror;
5625 
5626 
5627 	ASSERT(nvp != NULL);
5628 
5629 	mutex_enter(&nvp->nvp_mutex);
5630 	nvp->nvp_timeout_id = 0;
5631 
5632 	if (nvp->nvp_state & (NV_DEACTIVATED|NV_FAILED)) {
5633 		next_timeout_us = 0;
5634 
5635 		goto finished;
5636 	}
5637 
5638 	if (nvp->nvp_state & NV_RESET) {
5639 		next_timeout_us = nv_monitor_reset(nvp);
5640 
5641 		goto finished;
5642 	}
5643 
5644 	if (nvp->nvp_state & NV_LINK_EVENT) {
5645 		boolean_t device_present = B_FALSE;
5646 		uint32_t sstatus;
5647 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5648 
5649 		if (TICK_TO_USEC(ddi_get_lbolt() -
5650 		    nvp->nvp_link_event_time) < NV_LINK_EVENT_SETTLE) {
5651 
5652 			next_timeout_us = 10 * NV_ONE_MSEC;
5653 
5654 			DTRACE_PROBE(link_event_set_no_timeout_keep_waiting_p);
5655 
5656 			goto finished;
5657 		}
5658 
5659 		DTRACE_PROBE(link_event_settled_now_process_p);
5660 
5661 		nvp->nvp_state &= ~NV_LINK_EVENT;
5662 
5663 		/*
5664 		 * ck804 routinely reports the wrong hotplug/unplug event,
5665 		 * and it's been seen on mcp55 when there are signal integrity
5666 		 * issues.  Therefore need to infer the event from the
5667 		 * current link status.
5668 		 */
5669 
5670 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5671 
5672 		if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
5673 		    (SSTATUS_GET_DET(sstatus) ==
5674 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5675 			device_present = B_TRUE;
5676 		}
5677 
5678 		if ((nvp->nvp_signature != NV_NO_SIG) &&
5679 		    (device_present == B_FALSE)) {
5680 
5681 			NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5682 			    "nv_timeout: device detached", NULL);
5683 
5684 			DTRACE_PROBE(device_detached_p);
5685 
5686 			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
5687 			    B_FALSE);
5688 
5689 			nv_port_state_change(nvp, SATA_EVNT_DEVICE_DETACHED,
5690 			    SATA_ADDR_CPORT, 0);
5691 
5692 			nvp->nvp_signature = NV_NO_SIG;
5693 			nvp->nvp_rem_time = ddi_get_lbolt();
5694 			nvp->nvp_type = SATA_DTYPE_NONE;
5695 			next_timeout_us = 0;
5696 
5697 #ifdef SGPIO_SUPPORT
5698 			nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5699 			    SGP_CTLR_PORT_TO_DRV(nvp->nvp_ctlp->nvc_ctlr_num,
5700 			    nvp->nvp_port_num));
5701 #endif
5702 
5703 			goto finished;
5704 		}
5705 
5706 		/*
5707 		 * if the device was already present, and it's still present,
5708 		 * then abort any outstanding command and issue a reset.
5709 		 * This may result from transient link errors.
5710 		 */
5711 
5712 		if ((nvp->nvp_signature != NV_NO_SIG) &&
5713 		    (device_present == B_TRUE)) {
5714 
5715 			NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5716 			    "nv_timeout: spurious link event", NULL);
5717 			DTRACE_PROBE(spurious_link_event_p);
5718 
5719 			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
5720 			    B_FALSE);
5721 
5722 			nvp->nvp_signature = NV_NO_SIG;
5723 			nvp->nvp_trans_link_time = ddi_get_lbolt();
5724 			nvp->nvp_trans_link_count++;
5725 			next_timeout_us = 0;
5726 
5727 			nv_reset(nvp, "transient link event");
5728 
5729 			goto finished;
5730 		}
5731 
5732 
5733 		/*
5734 		 * a new device has been inserted
5735 		 */
5736 		if ((nvp->nvp_signature == NV_NO_SIG) &&
5737 		    (device_present == B_TRUE)) {
5738 			NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5739 			    "nv_timeout: device attached", NULL);
5740 
5741 			DTRACE_PROBE(device_attached_p);
5742 			nvp->nvp_add_time = ddi_get_lbolt();
5743 			next_timeout_us = 0;
5744 			nvp->nvp_reset_count = 0;
5745 			nvp->nvp_state = NV_HOTPLUG;
5746 			nvp->nvp_type = SATA_DTYPE_UNKNOWN;
5747 			nv_reset(nvp, "hotplug");
5748 
5749 			goto finished;
5750 		}
5751 
5752 		/*
5753 		 * no link, and no prior device.  Nothing to do, but
5754 		 * log this.
5755 		 */
5756 		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5757 		    "nv_timeout: delayed hot processing no link no prior"
5758 		    " device", NULL);
5759 		DTRACE_PROBE(delayed_hotplug_no_link_no_prior_device_p);
5760 
5761 		nvp->nvp_trans_link_time = ddi_get_lbolt();
5762 		nvp->nvp_trans_link_count++;
5763 		next_timeout_us = 0;
5764 
5765 		goto finished;
5766 	}
5767 
5768 	/*
5769 	 * Not yet NCQ-aware - there is only one command active.
5770 	 */
5771 	nv_slotp = &(nvp->nvp_slot[0]);
5772 
5773 	/*
5774 	 * perform timeout checking and processing only if there is an
5775 	 * active packet on the port
5776 	 */
5777 	if (nv_slotp != NULL && nv_slotp->nvslot_spkt != NULL)  {
5778 		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5779 		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5780 		uint8_t cmd = satacmd->satacmd_cmd_reg;
5781 		uint64_t lba;
5782 
5783 #if ! defined(__lock_lint) && defined(DEBUG)
5784 
5785 		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5786 		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5787 		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5788 		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5789 		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5790 		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5791 #endif
5792 
5793 		/*
5794 		 * timeout not needed if there is a polling thread
5795 		 */
5796 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5797 			next_timeout_us = 0;
5798 
5799 			goto finished;
5800 		}
5801 
5802 		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5803 		    spkt->satapkt_time) {
5804 
5805 			serror = nv_get32(nvp->nvp_ctlp->nvc_bar_hdl[5],
5806 			    nvp->nvp_serror);
5807 			status = nv_get8(nvp->nvp_ctl_hdl,
5808 			    nvp->nvp_altstatus);
5809 			bmstatus = nv_get8(nvp->nvp_bm_hdl,
5810 			    nvp->nvp_bmisx);
5811 
5812 			nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5813 			    "nv_timeout: aborting: "
5814 			    "nvslot_stime: %ld max ticks till timeout: %ld "
5815 			    "cur_time: %ld cmd = 0x%x lba = %d seq = %d",
5816 			    nv_slotp->nvslot_stime,
5817 			    drv_usectohz(MICROSEC *
5818 			    spkt->satapkt_time), ddi_get_lbolt(),
5819 			    cmd, lba, nvp->nvp_seq);
5820 
5821 			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5822 			    "nv_timeout: altstatus = 0x%x  bmicx = 0x%x "
5823 			    "serror = 0x%x previous_cmd = "
5824 			    "0x%x", status, bmstatus, serror,
5825 			    nvp->nvp_previous_cmd);
5826 
5827 
5828 			DTRACE_PROBE1(nv_timeout_packet_p, int, nvp);
5829 
5830 			if (nvp->nvp_mcp5x_int_status != NULL) {
5831 
5832 				int_status = nv_get16(
5833 				    nvp->nvp_ctlp->nvc_bar_hdl[5],
5834 				    nvp->nvp_mcp5x_int_status);
5835 				NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5836 				    "int_status = 0x%x", int_status);
5837 
5838 				if (int_status & MCP5X_INT_COMPLETE) {
5839 					/*
5840 					 * Completion interrupt was missed.
5841 					 * Issue warning message once.
5842 					 */
5843 					if (!intr_warn_once) {
5844 
5845 						nv_cmn_err(CE_WARN,
5846 						    nvp->nvp_ctlp,
5847 						    nvp,
5848 						    "nv_sata: missing command "
5849 						    "completion interrupt");
5850 						intr_warn_once = 1;
5851 
5852 					}
5853 
5854 					NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp,
5855 					    nvp, "timeout detected with "
5856 					    "interrupt ready - calling "
5857 					    "int directly", NULL);
5858 
5859 					mutex_exit(&nvp->nvp_mutex);
5860 					(void) mcp5x_intr_port(nvp);
5861 					mutex_enter(&nvp->nvp_mutex);
5862 
5863 				} else {
5864 					/*
5865 					 * True timeout and not a missing
5866 					 * interrupt.
5867 					 */
5868 					DTRACE_PROBE1(timeout_abort_active_p,
5869 					    int *, nvp);
5870 					(void) nv_abort_active(nvp, spkt,
5871 					    SATA_PKT_TIMEOUT, B_TRUE);
5872 				}
5873 			} else {
5874 				(void) nv_abort_active(nvp, spkt,
5875 				    SATA_PKT_TIMEOUT, B_TRUE);
5876 			}
5877 
5878 		} else {
5879 			NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5880 			    "nv_timeout:"
5881 			    " still in use so restarting timeout",
5882 			    NULL);
5883 
5884 			next_timeout_us = NV_ONE_SEC;
5885 		}
5886 	} else {
5887 		/*
5888 		 * there was no active packet, so do not re-enable timeout
5889 		 */
5890 		next_timeout_us = 0;
5891 		NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5892 		    "nv_timeout: no active packet so not re-arming "
5893 		    "timeout", NULL);
5894 	}
5895 
5896 finished:
5897 
5898 	nv_setup_timeout(nvp, next_timeout_us);
5899 
5900 	mutex_exit(&nvp->nvp_mutex);
5901 }
5902 
5903 
5904 /*
5905  * enable or disable the 3 interrupt types the driver is
5906  * interested in: completion, add and remove.
5907  */
5908 static void
5909 ck804_set_intr(nv_port_t *nvp, int flag)
5910 {
5911 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5912 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5913 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5914 	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5915 	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5916 	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5917 	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5918 
5919 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5920 		int_en = nv_get8(bar5_hdl,
5921 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5922 		int_en &= ~intr_bits[port];
5923 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5924 		    int_en);
5925 		return;
5926 	}
5927 
5928 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5929 
5930 	/*
5931 	 * controller level lock also required since access to an 8-bit
5932 	 * interrupt register is shared between both channels.
5933 	 */
5934 	mutex_enter(&nvc->nvc_mutex);
5935 
5936 	if (flag & NV_INTR_CLEAR_ALL) {
5937 		NVLOG(NVDBG_INTR, nvc, nvp,
5938 		    "ck804_set_intr: NV_INTR_CLEAR_ALL", NULL);
5939 
5940 		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5941 		    (uint8_t *)(nvc->nvc_ck804_int_status));
5942 
5943 		if (intr_status & clear_all_bits[port]) {
5944 
5945 			nv_put8(nvc->nvc_bar_hdl[5],
5946 			    (uint8_t *)(nvc->nvc_ck804_int_status),
5947 			    clear_all_bits[port]);
5948 
5949 			NVLOG(NVDBG_INTR, nvc, nvp,
5950 			    "interrupt bits cleared %x",
5951 			    intr_status & clear_all_bits[port]);
5952 		}
5953 	}
5954 
5955 	if (flag & NV_INTR_DISABLE) {
5956 		NVLOG(NVDBG_INTR, nvc, nvp,
5957 		    "ck804_set_intr: NV_INTR_DISABLE", NULL);
5958 		int_en = nv_get8(bar5_hdl,
5959 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5960 		int_en &= ~intr_bits[port];
5961 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5962 		    int_en);
5963 	}
5964 
5965 	if (flag & NV_INTR_ENABLE) {
5966 		NVLOG(NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE",
5967 		    NULL);
5968 		int_en = nv_get8(bar5_hdl,
5969 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5970 		int_en |= intr_bits[port];
5971 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5972 		    int_en);
5973 	}
5974 
5975 	mutex_exit(&nvc->nvc_mutex);
5976 }
5977 
5978 
5979 /*
5980  * enable or disable the 3 interrupts the driver is interested in:
5981  * completion interrupt, hot add, and hot remove interrupt.
5982  */
5983 static void
5984 mcp5x_set_intr(nv_port_t *nvp, int flag)
5985 {
5986 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5987 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5988 	uint16_t intr_bits =
5989 	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5990 	uint16_t int_en;
5991 
5992 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5993 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5994 		int_en &= ~intr_bits;
5995 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5996 		return;
5997 	}
5998 
5999 	ASSERT(mutex_owned(&nvp->nvp_mutex));
6000 
6001 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag);
6002 
6003 	if (flag & NV_INTR_CLEAR_ALL) {
6004 		NVLOG(NVDBG_INTR, nvc, nvp,
6005 		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL", NULL);
6006 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
6007 	}
6008 
6009 	if (flag & NV_INTR_ENABLE) {
6010 		NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE",
6011 		    NULL);
6012 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
6013 		int_en |= intr_bits;
6014 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6015 	}
6016 
6017 	if (flag & NV_INTR_DISABLE) {
6018 		NVLOG(NVDBG_INTR, nvc, nvp,
6019 		    "mcp5x_set_intr: NV_INTR_DISABLE", NULL);
6020 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
6021 		int_en &= ~intr_bits;
6022 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6023 	}
6024 }
6025 
6026 
6027 static void
6028 nv_resume(nv_port_t *nvp)
6029 {
6030 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()", NULL);
6031 
6032 	mutex_enter(&nvp->nvp_mutex);
6033 
6034 	if (nvp->nvp_state & NV_DEACTIVATED) {
6035 		mutex_exit(&nvp->nvp_mutex);
6036 
6037 		return;
6038 	}
6039 
6040 	/* Enable interrupt */
6041 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
6042 
6043 	/*
6044 	 * Power may have been removed to the port and the
6045 	 * drive, and/or a drive may have been added or removed.
6046 	 * Force a reset which will cause a probe and re-establish
6047 	 * any state needed on the drive.
6048 	 */
6049 	nv_reset(nvp, "resume");
6050 
6051 	mutex_exit(&nvp->nvp_mutex);
6052 }
6053 
6054 
6055 static void
6056 nv_suspend(nv_port_t *nvp)
6057 {
6058 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()", NULL);
6059 
6060 	mutex_enter(&nvp->nvp_mutex);
6061 
6062 #ifdef SGPIO_SUPPORT
6063 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
6064 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
6065 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
6066 	}
6067 #endif
6068 
6069 	if (nvp->nvp_state & NV_DEACTIVATED) {
6070 		mutex_exit(&nvp->nvp_mutex);
6071 
6072 		return;
6073 	}
6074 
6075 	/*
6076 	 * Stop the timeout handler.
6077 	 * (It will be restarted in nv_reset() during nv_resume().)
6078 	 */
6079 	if (nvp->nvp_timeout_id) {
6080 		(void) untimeout(nvp->nvp_timeout_id);
6081 		nvp->nvp_timeout_id = 0;
6082 	}
6083 
6084 	/* Disable interrupt */
6085 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
6086 	    NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
6087 
6088 	mutex_exit(&nvp->nvp_mutex);
6089 }
6090 
6091 
6092 static void
6093 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
6094 {
6095 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6096 	sata_cmd_t *scmd = &spkt->satapkt_cmd;
6097 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
6098 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6099 	uchar_t status;
6100 	struct sata_cmd_flags flags;
6101 
6102 	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6103 	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
6104 	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6105 
6106 	if (spkt == NULL) {
6107 
6108 		return;
6109 	}
6110 
6111 	/*
6112 	 * in the error case, implicitly set the return of regs needed
6113 	 * for error handling.
6114 	 */
6115 	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
6116 	    nvp->nvp_altstatus);
6117 
6118 	flags = scmd->satacmd_flags;
6119 
6120 	if (status & SATA_STATUS_ERR) {
6121 		flags.sata_copy_out_lba_low_msb = B_TRUE;
6122 		flags.sata_copy_out_lba_mid_msb = B_TRUE;
6123 		flags.sata_copy_out_lba_high_msb = B_TRUE;
6124 		flags.sata_copy_out_lba_low_lsb = B_TRUE;
6125 		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
6126 		flags.sata_copy_out_lba_high_lsb = B_TRUE;
6127 		flags.sata_copy_out_error_reg = B_TRUE;
6128 		flags.sata_copy_out_sec_count_msb = B_TRUE;
6129 		flags.sata_copy_out_sec_count_lsb = B_TRUE;
6130 		scmd->satacmd_status_reg = status;
6131 	}
6132 
6133 	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
6134 
6135 		/*
6136 		 * set HOB so that high byte will be read
6137 		 */
6138 		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
6139 
6140 		/*
6141 		 * get the requested high bytes
6142 		 */
6143 		if (flags.sata_copy_out_sec_count_msb) {
6144 			scmd->satacmd_sec_count_msb =
6145 			    nv_get8(cmdhdl, nvp->nvp_count);
6146 		}
6147 
6148 		if (flags.sata_copy_out_lba_low_msb) {
6149 			scmd->satacmd_lba_low_msb =
6150 			    nv_get8(cmdhdl, nvp->nvp_sect);
6151 		}
6152 
6153 		if (flags.sata_copy_out_lba_mid_msb) {
6154 			scmd->satacmd_lba_mid_msb =
6155 			    nv_get8(cmdhdl, nvp->nvp_lcyl);
6156 		}
6157 
6158 		if (flags.sata_copy_out_lba_high_msb) {
6159 			scmd->satacmd_lba_high_msb =
6160 			    nv_get8(cmdhdl, nvp->nvp_hcyl);
6161 		}
6162 	}
6163 
6164 	/*
6165 	 * disable HOB so that low byte is read
6166 	 */
6167 	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
6168 
6169 	/*
6170 	 * get the requested low bytes
6171 	 */
6172 	if (flags.sata_copy_out_sec_count_lsb) {
6173 		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
6174 	}
6175 
6176 	if (flags.sata_copy_out_lba_low_lsb) {
6177 		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
6178 	}
6179 
6180 	if (flags.sata_copy_out_lba_mid_lsb) {
6181 		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
6182 	}
6183 
6184 	if (flags.sata_copy_out_lba_high_lsb) {
6185 		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
6186 	}
6187 
6188 	/*
6189 	 * get the device register if requested
6190 	 */
6191 	if (flags.sata_copy_out_device_reg) {
6192 		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
6193 	}
6194 
6195 	/*
6196 	 * get the error register if requested
6197 	 */
6198 	if (flags.sata_copy_out_error_reg) {
6199 		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
6200 	}
6201 }
6202 
6203 
6204 /*
6205  * hot plug and remove interrupts can occur when the device is reset.
6206  * Masking the interrupt doesn't always work well because if a
6207  * different interrupt arrives on the other port, the driver can still
6208  * end up checking the state of the other port and discover the hot
6209  * interrupt flag is set even though it was masked.  Also, when there are
6210  * errors on the link there can be transient link events which need to be
6211  * masked and eliminated as well.
6212  */
6213 static void
6214 nv_link_event(nv_port_t *nvp, int flag)
6215 {
6216 
6217 	NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_link_event: flag: %s",
6218 	    flag ? "add" : "remove");
6219 
6220 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
6221 
6222 	nvp->nvp_link_event_time = ddi_get_lbolt();
6223 
6224 	/*
6225 	 * if a port has been deactivated, ignore all link events
6226 	 */
6227 	if (nvp->nvp_state & NV_DEACTIVATED) {
6228 		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "ignoring link event"
6229 		    " port deactivated", NULL);
6230 		DTRACE_PROBE(ignoring_link_port_deactivated_p);
6231 
6232 		return;
6233 	}
6234 
6235 	/*
6236 	 * if the drive has been reset, ignore any transient events.  If it's
6237 	 * a real removal event, nv_monitor_reset() will handle it.
6238 	 */
6239 	if (nvp->nvp_state & NV_RESET) {
6240 		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "ignoring link event"
6241 		    " during reset", NULL);
6242 		DTRACE_PROBE(ignoring_link_event_during_reset_p);
6243 
6244 		return;
6245 	}
6246 
6247 	/*
6248 	 * if link event processing is already enabled, nothing to
6249 	 * do.
6250 	 */
6251 	if (nvp->nvp_state & NV_LINK_EVENT) {
6252 
6253 		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6254 		    "received link event while processing already in "
6255 		    "progress", NULL);
6256 		DTRACE_PROBE(nv_link_event_already_set_p);
6257 
6258 		return;
6259 	}
6260 
6261 	DTRACE_PROBE1(link_event_p, int, nvp);
6262 
6263 	nvp->nvp_state |= NV_LINK_EVENT;
6264 
6265 	nv_setup_timeout(nvp, NV_LINK_EVENT_SETTLE);
6266 }
6267 
6268 
6269 /*
6270  * Get request sense data and stuff it the command's sense buffer.
6271  * Start a request sense command in order to get sense data to insert
6272  * in the sata packet's rqsense buffer.  The command completion
6273  * processing is in nv_intr_pkt_pio.
6274  *
6275  * The sata common module provides a function to allocate and set-up a
6276  * request sense packet command. The reasons it is not being used here is:
6277  * a) it cannot be called in an interrupt context and this function is
6278  *    called in an interrupt context.
6279  * b) it allocates DMA resources that are not used here because this is
6280  *    implemented using PIO.
6281  *
6282  * If, in the future, this is changed to use DMA, the sata common module
6283  * should be used to allocate and set-up the error retrieval (request sense)
6284  * command.
6285  */
6286 static int
6287 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
6288 {
6289 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
6290 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
6291 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6292 	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
6293 
6294 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6295 	    "nv_start_rqsense_pio: start", NULL);
6296 
6297 	/* clear the local request sense buffer before starting the command */
6298 	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
6299 
6300 	/* Write the request sense PACKET command */
6301 
6302 	/* select the drive */
6303 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
6304 
6305 	/* make certain the drive selected */
6306 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
6307 	    NV_SEC2USEC(5), 0) == B_FALSE) {
6308 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6309 		    "nv_start_rqsense_pio: drive select failed", NULL);
6310 		return (NV_FAILURE);
6311 	}
6312 
6313 	/* set up the command */
6314 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
6315 	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
6316 	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
6317 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
6318 	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
6319 
6320 	/* initiate the command by writing the command register last */
6321 	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
6322 
6323 	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
6324 	NV_DELAY_NSEC(400);
6325 
6326 	/*
6327 	 * Wait for the device to indicate that it is ready for the command
6328 	 * ATAPI protocol state - HP0: Check_Status_A
6329 	 */
6330 
6331 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
6332 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
6333 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
6334 	    4000000, 0) == B_FALSE) {
6335 		if (nv_get8(cmdhdl, nvp->nvp_status) &
6336 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
6337 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
6338 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6339 			    "nv_start_rqsense_pio: rqsense dev error (HP0)",
6340 			    NULL);
6341 		} else {
6342 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
6343 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6344 			    "nv_start_rqsense_pio: rqsense timeout (HP0)",
6345 			    NULL);
6346 		}
6347 
6348 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
6349 		nv_complete_io(nvp, spkt, 0);
6350 		nv_reset(nvp, "rqsense_pio");
6351 
6352 		return (NV_FAILURE);
6353 	}
6354 
6355 	/*
6356 	 * Put the ATAPI command in the data register
6357 	 * ATAPI protocol state - HP1: Send_Packet
6358 	 */
6359 
6360 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
6361 	    (ushort_t *)nvp->nvp_data,
6362 	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
6363 
6364 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6365 	    "nv_start_rqsense_pio: exiting into HP3", NULL);
6366 
6367 	return (NV_SUCCESS);
6368 }
6369 
6370 /*
6371  * quiesce(9E) entry point.
6372  *
6373  * This function is called when the system is single-threaded at high
6374  * PIL with preemption disabled. Therefore, this function must not be
6375  * blocked.
6376  *
6377  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6378  * DDI_FAILURE indicates an error condition and should almost never happen.
6379  */
6380 static int
6381 nv_quiesce(dev_info_t *dip)
6382 {
6383 	int port, instance = ddi_get_instance(dip);
6384 	nv_ctl_t *nvc;
6385 
6386 	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
6387 		return (DDI_FAILURE);
6388 
6389 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
6390 		nv_port_t *nvp = &(nvc->nvc_port[port]);
6391 		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6392 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6393 		uint32_t sctrl;
6394 
6395 		/*
6396 		 * Stop the controllers from generating interrupts.
6397 		 */
6398 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
6399 
6400 		/*
6401 		 * clear signature registers
6402 		 */
6403 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
6404 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
6405 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
6406 		nv_put8(cmdhdl, nvp->nvp_count, 0);
6407 
6408 		nvp->nvp_signature = NV_NO_SIG;
6409 		nvp->nvp_type = SATA_DTYPE_NONE;
6410 		nvp->nvp_state |= NV_RESET;
6411 		nvp->nvp_reset_time = ddi_get_lbolt();
6412 
6413 		/*
6414 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
6415 		 */
6416 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6417 
6418 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
6419 		    sctrl | SCONTROL_DET_COMRESET);
6420 
6421 		/*
6422 		 * wait 1ms
6423 		 */
6424 		drv_usecwait(1000);
6425 
6426 		/*
6427 		 * de-assert reset in PHY
6428 		 */
6429 		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
6430 	}
6431 
6432 	return (DDI_SUCCESS);
6433 }
6434 
6435 
6436 #ifdef SGPIO_SUPPORT
6437 /*
6438  * NVIDIA specific SGPIO LED support
6439  * Please refer to the NVIDIA documentation for additional details
6440  */
6441 
6442 /*
6443  * nv_sgp_led_init
6444  * Detect SGPIO support.  If present, initialize.
6445  */
6446 static void
6447 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
6448 {
6449 	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
6450 	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
6451 	nv_sgp_cmn_t *cmn;	/* shared data structure */
6452 	int i;
6453 	char tqname[SGPIO_TQ_NAME_LEN];
6454 	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
6455 
6456 	/*
6457 	 * Initialize with appropriately invalid values in case this function
6458 	 * exits without initializing SGPIO (for example, there is no SGPIO
6459 	 * support).
6460 	 */
6461 	nvc->nvc_sgp_csr = 0;
6462 	nvc->nvc_sgp_cbp = NULL;
6463 	nvc->nvc_sgp_cmn = NULL;
6464 
6465 	/*
6466 	 * Only try to initialize SGPIO LED support if this property
6467 	 * indicates it should be.
6468 	 */
6469 	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
6470 	    "enable-sgpio-leds", 0) != 1)
6471 		return;
6472 
6473 	/*
6474 	 * CK804 can pass the sgpio_detect test even though it does not support
6475 	 * SGPIO, so don't even look at a CK804.
6476 	 */
6477 	if (nvc->nvc_mcp5x_flag != B_TRUE)
6478 		return;
6479 
6480 	/*
6481 	 * The NVIDIA SGPIO support can nominally handle 6 drives.
6482 	 * However, the current implementation only supports 4 drives.
6483 	 * With two drives per controller, that means only look at the
6484 	 * first two controllers.
6485 	 */
6486 	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
6487 		return;
6488 
6489 	/* confirm that the SGPIO registers are there */
6490 	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
6491 		NVLOG(NVDBG_INIT, nvc, NULL,
6492 		    "SGPIO registers not detected", NULL);
6493 		return;
6494 	}
6495 
6496 	/* save off the SGPIO_CSR I/O address */
6497 	nvc->nvc_sgp_csr = csrp;
6498 
6499 	/* map in Control Block */
6500 	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
6501 	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
6502 
6503 	/* initialize the SGPIO h/w */
6504 	if (nv_sgp_init(nvc) == NV_FAILURE) {
6505 		nv_cmn_err(CE_WARN, nvc, NULL,
6506 		    "Unable to initialize SGPIO");
6507 	}
6508 
6509 	/*
6510 	 * Initialize the shared space for this instance.  This could
6511 	 * involve allocating the space, saving a pointer to the space
6512 	 * and starting the taskq that actually turns the LEDs on and off.
6513 	 * Or, it could involve just getting the pointer to the already
6514 	 * allocated space.
6515 	 */
6516 
6517 	mutex_enter(&nv_sgp_c2c_mutex);
6518 
6519 	/* try and find our CBP in the mapping table */
6520 	cmn = NULL;
6521 	for (i = 0; i < NV_MAX_CBPS; i++) {
6522 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == cbp) {
6523 			cmn = nv_sgp_cbp2cmn[i].c2cm_cmn;
6524 			break;
6525 		}
6526 
6527 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
6528 			break;
6529 	}
6530 
6531 	if (i >= NV_MAX_CBPS) {
6532 		/*
6533 		 * CBP to shared space mapping table is full
6534 		 */
6535 		nvc->nvc_sgp_cmn = NULL;
6536 		nv_cmn_err(CE_WARN, nvc, NULL,
6537 		    "LED handling not initialized - too many controllers");
6538 	} else if (cmn == NULL) {
6539 		/*
6540 		 * Allocate the shared space, point the SGPIO scratch register
6541 		 * at it and start the led update taskq.
6542 		 */
6543 
6544 		/* allocate shared space */
6545 		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
6546 		    KM_SLEEP);
6547 		if (cmn == NULL) {
6548 			nv_cmn_err(CE_WARN, nvc, NULL,
6549 			    "Failed to allocate shared data");
6550 			return;
6551 		}
6552 
6553 		nvc->nvc_sgp_cmn = cmn;
6554 
6555 		/* initialize the shared data structure */
6556 		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
6557 		cmn->nvs_connected = 0;
6558 		cmn->nvs_activity = 0;
6559 		cmn->nvs_cbp = cbp;
6560 
6561 		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
6562 		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
6563 		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
6564 
6565 		/* put the address in the SGPIO scratch register */
6566 #if defined(__amd64)
6567 		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
6568 #else
6569 		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
6570 #endif
6571 
6572 		/* add an entry to the cbp to cmn mapping table */
6573 
6574 		/* i should be the next available table position */
6575 		nv_sgp_cbp2cmn[i].c2cm_cbp = cbp;
6576 		nv_sgp_cbp2cmn[i].c2cm_cmn = cmn;
6577 
6578 		/* start the activity LED taskq */
6579 
6580 		/*
6581 		 * The taskq name should be unique and the time
6582 		 */
6583 		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
6584 		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
6585 		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
6586 		    TASKQ_DEFAULTPRI, 0);
6587 		if (cmn->nvs_taskq == NULL) {
6588 			cmn->nvs_taskq_delay = 0;
6589 			nv_cmn_err(CE_WARN, nvc, NULL,
6590 			    "Failed to start activity LED taskq");
6591 		} else {
6592 			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
6593 			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
6594 			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
6595 		}
6596 	} else {
6597 		nvc->nvc_sgp_cmn = cmn;
6598 		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6599 	}
6600 
6601 	mutex_exit(&nv_sgp_c2c_mutex);
6602 }
6603 
6604 /*
6605  * nv_sgp_detect
6606  * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
6607  * report back whether both were readable.
6608  */
6609 static int
6610 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
6611     uint32_t *cbpp)
6612 {
6613 	/* get the SGPIO_CSRP */
6614 	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
6615 	if (*csrpp == 0) {
6616 		return (NV_FAILURE);
6617 	}
6618 
6619 	/* SGPIO_CSRP is good, get the SGPIO_CBP */
6620 	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
6621 	if (*cbpp == 0) {
6622 		return (NV_FAILURE);
6623 	}
6624 
6625 	/* SGPIO_CBP is good, so we must support SGPIO */
6626 	return (NV_SUCCESS);
6627 }
6628 
6629 /*
6630  * nv_sgp_init
6631  * Initialize SGPIO.
6632  * The initialization process is described by NVIDIA, but the hardware does
6633  * not always behave as documented, so several steps have been changed and/or
6634  * omitted.
6635  */
6636 static int
6637 nv_sgp_init(nv_ctl_t *nvc)
6638 {
6639 	int seq;
6640 	int rval = NV_SUCCESS;
6641 	hrtime_t start, end;
6642 	uint32_t cmd;
6643 	uint32_t status;
6644 	int drive_count;
6645 
6646 	status = nv_sgp_csr_read(nvc);
6647 	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
6648 		/* SGPIO logic is in reset state and requires initialization */
6649 
6650 		/* noting the Sequence field value */
6651 		seq = SGPIO_CSR_SEQ(status);
6652 
6653 		/* issue SGPIO_CMD_READ_PARAMS command */
6654 		cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
6655 		nv_sgp_csr_write(nvc, cmd);
6656 
6657 		DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
6658 
6659 		/* poll for command completion */
6660 		start = gethrtime();
6661 		end = start + NV_SGP_CMD_TIMEOUT;
6662 		for (;;) {
6663 			status = nv_sgp_csr_read(nvc);
6664 
6665 			/* break on error */
6666 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) {
6667 				NVLOG(NVDBG_VERBOSE, nvc, NULL,
6668 				    "Command error during initialization",
6669 				    NULL);
6670 				rval = NV_FAILURE;
6671 				break;
6672 			}
6673 
6674 			/* command processing is taking place */
6675 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) {
6676 				if (SGPIO_CSR_SEQ(status) != seq) {
6677 					NVLOG(NVDBG_VERBOSE, nvc, NULL,
6678 					    "Sequence number change error",
6679 					    NULL);
6680 				}
6681 
6682 				break;
6683 			}
6684 
6685 			/* if completion not detected in 2000ms ... */
6686 
6687 			if (gethrtime() > end)
6688 				break;
6689 
6690 			/* wait 400 ns before checking again */
6691 			NV_DELAY_NSEC(400);
6692 		}
6693 	}
6694 
6695 	if (rval == NV_FAILURE)
6696 		return (rval);
6697 
6698 	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
6699 		NVLOG(NVDBG_VERBOSE, nvc, NULL,
6700 		    "SGPIO logic not operational after init - state %d",
6701 		    SGPIO_CSR_SSTAT(status));
6702 		/*
6703 		 * Should return (NV_FAILURE) but the hardware can be
6704 		 * operational even if the SGPIO Status does not indicate
6705 		 * this.
6706 		 */
6707 	}
6708 
6709 	/*
6710 	 * NVIDIA recommends reading the supported drive count even
6711 	 * though they also indicate that it is always 4 at this time.
6712 	 */
6713 	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
6714 	if (drive_count != SGPIO_DRV_CNT_VALUE) {
6715 		NVLOG(NVDBG_INIT, nvc, NULL,
6716 		    "SGPIO reported undocumented drive count - %d",
6717 		    drive_count);
6718 	}
6719 
6720 	NVLOG(NVDBG_INIT, nvc, NULL,
6721 	    "initialized ctlr: %d csr: 0x%08x",
6722 	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr);
6723 
6724 	return (rval);
6725 }
6726 
6727 static int
6728 nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6729 {
6730 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6731 
6732 	if (cmn == NULL)
6733 		return (NV_FAILURE);
6734 
6735 	mutex_enter(&cmn->nvs_slock);
6736 	cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6737 	mutex_exit(&cmn->nvs_slock);
6738 
6739 	return (NV_SUCCESS);
6740 }
6741 
6742 /*
6743  * nv_sgp_csr_read
6744  * This is just a 32-bit port read from the value that was obtained from the
6745  * PCI config space.
6746  *
6747  * XXX It was advised to use the in[bwl] function for this, even though they
6748  * are obsolete interfaces.
6749  */
6750 static int
6751 nv_sgp_csr_read(nv_ctl_t *nvc)
6752 {
6753 	return (inl(nvc->nvc_sgp_csr));
6754 }
6755 
6756 /*
6757  * nv_sgp_csr_write
6758  * This is just a 32-bit I/O port write.  The port number was obtained from
6759  * the PCI config space.
6760  *
6761  * XXX It was advised to use the out[bwl] function for this, even though they
6762  * are obsolete interfaces.
6763  */
6764 static void
6765 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6766 {
6767 	outl(nvc->nvc_sgp_csr, val);
6768 }
6769 
6770 /*
6771  * nv_sgp_write_data
6772  * Cause SGPIO to send Control Block data
6773  */
6774 static int
6775 nv_sgp_write_data(nv_ctl_t *nvc)
6776 {
6777 	hrtime_t start, end;
6778 	uint32_t status;
6779 	uint32_t cmd;
6780 
6781 	/* issue command */
6782 	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6783 	nv_sgp_csr_write(nvc, cmd);
6784 
6785 	/* poll for completion */
6786 	start = gethrtime();
6787 	end = start + NV_SGP_CMD_TIMEOUT;
6788 	for (;;) {
6789 		status = nv_sgp_csr_read(nvc);
6790 
6791 		/* break on error completion */
6792 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6793 			break;
6794 
6795 		/* break on successful completion */
6796 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6797 			break;
6798 
6799 		/* Wait 400 ns and try again */
6800 		NV_DELAY_NSEC(400);
6801 
6802 		if (gethrtime() > end)
6803 			break;
6804 	}
6805 
6806 	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6807 		return (NV_SUCCESS);
6808 
6809 	return (NV_FAILURE);
6810 }
6811 
6812 /*
6813  * nv_sgp_activity_led_ctl
6814  * This is run as a taskq.  It wakes up at a fixed interval and checks to
6815  * see if any of the activity LEDs need to be changed.
6816  */
6817 static void
6818 nv_sgp_activity_led_ctl(void *arg)
6819 {
6820 	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6821 	nv_sgp_cmn_t *cmn;
6822 	volatile nv_sgp_cb_t *cbp;
6823 	clock_t ticks;
6824 	uint8_t drv_leds;
6825 	uint32_t old_leds;
6826 	uint32_t new_led_state;
6827 	int i;
6828 
6829 	cmn = nvc->nvc_sgp_cmn;
6830 	cbp = nvc->nvc_sgp_cbp;
6831 
6832 	do {
6833 		/* save off the old state of all of the LEDs */
6834 		old_leds = cbp->sgpio0_tr;
6835 
6836 		DTRACE_PROBE3(sgpio__activity__state,
6837 		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6838 		    int, old_leds);
6839 
6840 		new_led_state = 0;
6841 
6842 		/* for each drive */
6843 		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6844 
6845 			/* get the current state of the LEDs for the drive */
6846 			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6847 
6848 			if ((cmn->nvs_connected & (1 << i)) == 0) {
6849 				/* if not connected, turn off activity */
6850 				drv_leds &= ~TR_ACTIVE_MASK;
6851 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6852 
6853 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6854 				new_led_state |=
6855 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6856 
6857 				continue;
6858 			}
6859 
6860 			if ((cmn->nvs_activity & (1 << i)) == 0) {
6861 				/* connected, but not active */
6862 				drv_leds &= ~TR_ACTIVE_MASK;
6863 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6864 
6865 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6866 				new_led_state |=
6867 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6868 
6869 				continue;
6870 			}
6871 
6872 			/* connected and active */
6873 			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6874 				/* was enabled, so disable */
6875 				drv_leds &= ~TR_ACTIVE_MASK;
6876 				drv_leds |=
6877 				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6878 
6879 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6880 				new_led_state |=
6881 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6882 			} else {
6883 				/* was disabled, so enable */
6884 				drv_leds &= ~TR_ACTIVE_MASK;
6885 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6886 
6887 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6888 				new_led_state |=
6889 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6890 			}
6891 
6892 			/*
6893 			 * clear the activity bit
6894 			 * if there is drive activity again within the
6895 			 * loop interval (now 1/16 second), nvs_activity
6896 			 * will be reset and the "connected and active"
6897 			 * condition above will cause the LED to blink
6898 			 * off and on at the loop interval rate.  The
6899 			 * rate may be increased (interval shortened) as
6900 			 * long as it is not more than 1/30 second.
6901 			 */
6902 			mutex_enter(&cmn->nvs_slock);
6903 			cmn->nvs_activity &= ~(1 << i);
6904 			mutex_exit(&cmn->nvs_slock);
6905 		}
6906 
6907 		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6908 
6909 		/* write out LED values */
6910 
6911 		mutex_enter(&cmn->nvs_slock);
6912 		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6913 		cbp->sgpio0_tr |= new_led_state;
6914 		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6915 		mutex_exit(&cmn->nvs_slock);
6916 
6917 		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6918 			NVLOG(NVDBG_VERBOSE, nvc, NULL,
6919 			    "nv_sgp_write_data failure updating active LED",
6920 			    NULL);
6921 		}
6922 
6923 		/* now rest for the interval */
6924 		mutex_enter(&cmn->nvs_tlock);
6925 		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6926 		if (ticks > 0)
6927 			(void) cv_reltimedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6928 			    ticks, TR_CLOCK_TICK);
6929 		mutex_exit(&cmn->nvs_tlock);
6930 	} while (ticks > 0);
6931 }
6932 
6933 /*
6934  * nv_sgp_drive_connect
6935  * Set the flag used to indicate that the drive is attached to the HBA.
6936  * Used to let the taskq know that it should turn the Activity LED on.
6937  */
6938 static void
6939 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6940 {
6941 	nv_sgp_cmn_t *cmn;
6942 
6943 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6944 		return;
6945 	cmn = nvc->nvc_sgp_cmn;
6946 
6947 	mutex_enter(&cmn->nvs_slock);
6948 	cmn->nvs_connected |= (1 << drive);
6949 	mutex_exit(&cmn->nvs_slock);
6950 }
6951 
6952 /*
6953  * nv_sgp_drive_disconnect
6954  * Clears the flag used to indicate that the drive is no longer attached
6955  * to the HBA.  Used to let the taskq know that it should turn the
6956  * Activity LED off.  The flag that indicates that the drive is in use is
6957  * also cleared.
6958  */
6959 static void
6960 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6961 {
6962 	nv_sgp_cmn_t *cmn;
6963 
6964 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6965 		return;
6966 	cmn = nvc->nvc_sgp_cmn;
6967 
6968 	mutex_enter(&cmn->nvs_slock);
6969 	cmn->nvs_connected &= ~(1 << drive);
6970 	cmn->nvs_activity &= ~(1 << drive);
6971 	mutex_exit(&cmn->nvs_slock);
6972 }
6973 
6974 /*
6975  * nv_sgp_drive_active
6976  * Sets the flag used to indicate that the drive has been accessed and the
6977  * LED should be flicked off, then on.  It is cleared at a fixed time
6978  * interval by the LED taskq and set by the sata command start.
6979  */
6980 static void
6981 nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6982 {
6983 	nv_sgp_cmn_t *cmn;
6984 
6985 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6986 		return;
6987 	cmn = nvc->nvc_sgp_cmn;
6988 
6989 	DTRACE_PROBE1(sgpio__active, int, drive);
6990 
6991 	mutex_enter(&cmn->nvs_slock);
6992 	cmn->nvs_activity |= (1 << drive);
6993 	mutex_exit(&cmn->nvs_slock);
6994 }
6995 
6996 
6997 /*
6998  * nv_sgp_locate
6999  * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
7000  * maintained in the SGPIO Control Block.
7001  */
7002 static void
7003 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
7004 {
7005 	uint8_t leds;
7006 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7007 	nv_sgp_cmn_t *cmn;
7008 
7009 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7010 		return;
7011 	cmn = nvc->nvc_sgp_cmn;
7012 
7013 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7014 		return;
7015 
7016 	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
7017 
7018 	mutex_enter(&cmn->nvs_slock);
7019 
7020 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7021 
7022 	leds &= ~TR_LOCATE_MASK;
7023 	leds |= TR_LOCATE_SET(value);
7024 
7025 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7026 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7027 
7028 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7029 
7030 	mutex_exit(&cmn->nvs_slock);
7031 
7032 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7033 		nv_cmn_err(CE_WARN, nvc, NULL,
7034 		    "nv_sgp_write_data failure updating OK2RM/Locate LED");
7035 	}
7036 }
7037 
7038 /*
7039  * nv_sgp_error
7040  * Turns the Error/Failure LED off or on for a particular drive.  State is
7041  * maintained in the SGPIO Control Block.
7042  */
7043 static void
7044 nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
7045 {
7046 	uint8_t leds;
7047 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7048 	nv_sgp_cmn_t *cmn;
7049 
7050 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7051 		return;
7052 	cmn = nvc->nvc_sgp_cmn;
7053 
7054 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7055 		return;
7056 
7057 	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
7058 
7059 	mutex_enter(&cmn->nvs_slock);
7060 
7061 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7062 
7063 	leds &= ~TR_ERROR_MASK;
7064 	leds |= TR_ERROR_SET(value);
7065 
7066 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7067 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7068 
7069 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7070 
7071 	mutex_exit(&cmn->nvs_slock);
7072 
7073 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7074 		nv_cmn_err(CE_WARN, nvc, NULL,
7075 		    "nv_sgp_write_data failure updating Fail/Error LED");
7076 	}
7077 }
7078 
7079 static void
7080 nv_sgp_cleanup(nv_ctl_t *nvc)
7081 {
7082 	int drive, i;
7083 	uint8_t drv_leds;
7084 	uint32_t led_state;
7085 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7086 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
7087 	extern void psm_unmap_phys(caddr_t, size_t);
7088 
7089 	/*
7090 	 * If the SGPIO Control Block isn't mapped or the shared data
7091 	 * structure isn't present in this instance, there isn't much that
7092 	 * can be cleaned up.
7093 	 */
7094 	if ((cb == NULL) || (cmn == NULL))
7095 		return;
7096 
7097 	/* turn off activity LEDs for this controller */
7098 	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
7099 
7100 	/* get the existing LED state */
7101 	led_state = cb->sgpio0_tr;
7102 
7103 	/* turn off port 0 */
7104 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
7105 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7106 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7107 
7108 	/* turn off port 1 */
7109 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
7110 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7111 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7112 
7113 	/* set the new led state, which should turn off this ctrl's LEDs */
7114 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7115 	(void) nv_sgp_write_data(nvc);
7116 
7117 	/* clear the controller's in use bit */
7118 	mutex_enter(&cmn->nvs_slock);
7119 	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
7120 	mutex_exit(&cmn->nvs_slock);
7121 
7122 	if (cmn->nvs_in_use == 0) {
7123 		/* if all "in use" bits cleared, take everything down */
7124 
7125 		if (cmn->nvs_taskq != NULL) {
7126 			/* allow activity taskq to exit */
7127 			cmn->nvs_taskq_delay = 0;
7128 			cv_broadcast(&cmn->nvs_cv);
7129 
7130 			/* then destroy it */
7131 			ddi_taskq_destroy(cmn->nvs_taskq);
7132 		}
7133 
7134 		/* turn off all of the LEDs */
7135 		cb->sgpio0_tr = 0;
7136 		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7137 		(void) nv_sgp_write_data(nvc);
7138 
7139 		cb->sgpio_sr = NULL;
7140 
7141 		/* zero out the CBP to cmn mapping */
7142 		for (i = 0; i < NV_MAX_CBPS; i++) {
7143 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == cmn->nvs_cbp) {
7144 				nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
7145 				break;
7146 			}
7147 
7148 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
7149 				break;
7150 		}
7151 
7152 		/* free resources */
7153 		cv_destroy(&cmn->nvs_cv);
7154 		mutex_destroy(&cmn->nvs_tlock);
7155 		mutex_destroy(&cmn->nvs_slock);
7156 
7157 		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
7158 	}
7159 
7160 	nvc->nvc_sgp_cmn = NULL;
7161 
7162 	/* unmap the SGPIO Control Block */
7163 	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
7164 }
7165 #endif	/* SGPIO_SUPPORT */
7166