1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *
29  * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
30  * based chipsets.
31  *
32  * NCQ
33  * ---
34  *
35  * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
36  * and is likely to be revisited in the future.
37  *
38  *
39  * Power Management
40  * ----------------
41  *
42  * Normally power management would be responsible for ensuring the device
43  * is quiescent and then changing power states to the device, such as
44  * powering down parts or all of the device.  mcp5x/ck804 is unique in
45  * that it is only available as part of a larger southbridge chipset, so
46  * removing power to the device isn't possible.  Switches to control
47  * power management states D0/D3 in the PCI configuration space appear to
48  * be supported but changes to these states are apparently are ignored.
49  * The only further PM that the driver _could_ do is shut down the PHY,
50  * but in order to deliver the first rev of the driver sooner than later,
51  * that will be deferred until some future phase.
52  *
53  * Since the driver currently will not directly change any power state to
54  * the device, no power() entry point will be required.  However, it is
55  * possible that in ACPI power state S3, aka suspend to RAM, that power
56  * can be removed to the device, and the driver cannot rely on BIOS to
57  * have reset any state.  For the time being, there is no known
58  * non-default configurations that need to be programmed.  This judgement
59  * is based on the port of the legacy ata driver not having any such
60  * functionality and based on conversations with the PM team.  If such a
61  * restoration is later deemed necessary it can be incorporated into the
62  * DDI_RESUME processing.
63  *
64  */
65 
66 #include <sys/scsi/scsi.h>
67 #include <sys/pci.h>
68 #include <sys/byteorder.h>
69 #include <sys/sunddi.h>
70 #include <sys/sata/sata_hba.h>
71 #ifdef SGPIO_SUPPORT
72 #include <sys/sata/adapters/nv_sata/nv_sgpio.h>
73 #include <sys/devctl.h>
74 #include <sys/sdt.h>
75 #endif
76 #include <sys/sata/adapters/nv_sata/nv_sata.h>
77 #include <sys/disp.h>
78 #include <sys/note.h>
79 #include <sys/promif.h>
80 
81 
82 /*
83  * Function prototypes for driver entry points
84  */
85 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
86 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
87 static int nv_quiesce(dev_info_t *dip);
88 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
89     void *arg, void **result);
90 
91 /*
92  * Function prototypes for entry points from sata service module
93  * These functions are distinguished from other local functions
94  * by the prefix "nv_sata_"
95  */
96 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
97 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
98 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
99 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
100 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
101 
102 /*
103  * Local function prototypes
104  */
105 static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
106 static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
107 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
108 #ifdef NV_MSI_SUPPORTED
109 static int nv_add_msi_intrs(nv_ctl_t *nvc);
110 #endif
111 static void nv_rem_intrs(nv_ctl_t *nvc);
112 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
113 static int nv_start_nodata(nv_port_t *nvp, int slot);
114 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
115 static int nv_start_pio_in(nv_port_t *nvp, int slot);
116 static int nv_start_pio_out(nv_port_t *nvp, int slot);
117 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
118 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
119 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
120 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
121 static int nv_start_dma(nv_port_t *nvp, int slot);
122 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
123 static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
124 static void nv_uninit_ctl(nv_ctl_t *nvc);
125 static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
126 static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
127 static void nv_uninit_port(nv_port_t *nvp);
128 static int nv_init_port(nv_port_t *nvp);
129 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
130 static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131 #ifdef NCQ
132 static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
133 #endif
134 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
135 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
136     int state);
137 static boolean_t nv_check_link(uint32_t sstatus);
138 static void nv_common_reg_init(nv_ctl_t *nvc);
139 static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
140 static void nv_reset(nv_port_t *nvp);
141 static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
142 static void nv_timeout(void *);
143 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
144 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
145 static void nv_read_signature(nv_port_t *nvp);
146 static void mcp5x_set_intr(nv_port_t *nvp, int flag);
147 static void ck804_set_intr(nv_port_t *nvp, int flag);
148 static void nv_resume(nv_port_t *nvp);
149 static void nv_suspend(nv_port_t *nvp);
150 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
151 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
152 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
153     sata_pkt_t *spkt);
154 static void nv_report_add_remove(nv_port_t *nvp, int flags);
155 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
156 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
157     uchar_t failure_onbits2, uchar_t failure_offbits2,
158     uchar_t failure_onbits3, uchar_t failure_offbits3,
159     uint_t timeout_usec, int type_wait);
160 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
161     uint_t timeout_usec, int type_wait);
162 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
163 
164 #ifdef SGPIO_SUPPORT
165 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
166 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
167 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
168     cred_t *credp, int *rvalp);
169 
170 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
171 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
172     uint32_t *cbpp);
173 static int nv_sgp_init(nv_ctl_t *nvc);
174 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
175 static int nv_sgp_csr_read(nv_ctl_t *nvc);
176 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
177 static int nv_sgp_write_data(nv_ctl_t *nvc);
178 static void nv_sgp_activity_led_ctl(void *arg);
179 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
180 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
181 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
182 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
183 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
184 static void nv_sgp_cleanup(nv_ctl_t *nvc);
185 #endif
186 
187 
188 /*
189  * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
190  * Verify if needed if ported to other ISA.
191  */
192 static ddi_dma_attr_t buffer_dma_attr = {
193 	DMA_ATTR_V0,		/* dma_attr_version */
194 	0,			/* dma_attr_addr_lo: lowest bus address */
195 	0xffffffffull,		/* dma_attr_addr_hi: */
196 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
197 	4,			/* dma_attr_align */
198 	1,			/* dma_attr_burstsizes. */
199 	1,			/* dma_attr_minxfer */
200 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
201 	0xffffffffull,		/* dma_attr_seg */
202 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
203 	512,			/* dma_attr_granular */
204 	0,			/* dma_attr_flags */
205 };
206 static ddi_dma_attr_t buffer_dma_40bit_attr = {
207 	DMA_ATTR_V0,		/* dma_attr_version */
208 	0,			/* dma_attr_addr_lo: lowest bus address */
209 	0xffffffffffull,	/* dma_attr_addr_hi: */
210 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
211 	4,			/* dma_attr_align */
212 	1,			/* dma_attr_burstsizes. */
213 	1,			/* dma_attr_minxfer */
214 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
215 	0xffffffffull,		/* dma_attr_seg */
216 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
217 	512,			/* dma_attr_granular */
218 	0,			/* dma_attr_flags */
219 };
220 
221 
222 /*
223  * DMA attributes for PRD tables
224  */
225 ddi_dma_attr_t nv_prd_dma_attr = {
226 	DMA_ATTR_V0,		/* dma_attr_version */
227 	0,			/* dma_attr_addr_lo */
228 	0xffffffffull,		/* dma_attr_addr_hi */
229 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
230 	4,			/* dma_attr_align */
231 	1,			/* dma_attr_burstsizes */
232 	1,			/* dma_attr_minxfer */
233 	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
234 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
235 	1,			/* dma_attr_sgllen */
236 	1,			/* dma_attr_granular */
237 	0			/* dma_attr_flags */
238 };
239 
240 /*
241  * Device access attributes
242  */
243 static ddi_device_acc_attr_t accattr = {
244     DDI_DEVICE_ATTR_V0,
245     DDI_STRUCTURE_LE_ACC,
246     DDI_STRICTORDER_ACC
247 };
248 
249 
250 #ifdef SGPIO_SUPPORT
251 static struct cb_ops nv_cb_ops = {
252 	nv_open,		/* open */
253 	nv_close,		/* close */
254 	nodev,			/* strategy (block) */
255 	nodev,			/* print (block) */
256 	nodev,			/* dump (block) */
257 	nodev,			/* read */
258 	nodev,			/* write */
259 	nv_ioctl,		/* ioctl */
260 	nodev,			/* devmap */
261 	nodev,			/* mmap */
262 	nodev,			/* segmap */
263 	nochpoll,		/* chpoll */
264 	ddi_prop_op,		/* prop_op */
265 	NULL,			/* streams */
266 	D_NEW | D_MP |
267 	D_64BIT | D_HOTPLUG,	/* flags */
268 	CB_REV			/* rev */
269 };
270 #endif  /* SGPIO_SUPPORT */
271 
272 
273 static struct dev_ops nv_dev_ops = {
274 	DEVO_REV,		/* devo_rev */
275 	0,			/* refcnt  */
276 	nv_getinfo,		/* info */
277 	nulldev,		/* identify */
278 	nulldev,		/* probe */
279 	nv_attach,		/* attach */
280 	nv_detach,		/* detach */
281 	nodev,			/* no reset */
282 #ifdef SGPIO_SUPPORT
283 	&nv_cb_ops,		/* driver operations */
284 #else
285 	(struct cb_ops *)0,	/* driver operations */
286 #endif
287 	NULL,			/* bus operations */
288 	NULL,			/* power */
289 	nv_quiesce		/* quiesce */
290 };
291 
292 
293 /*
294  * Request Sense CDB for ATAPI
295  */
296 static const uint8_t nv_rqsense_cdb[16] = {
297 	SCMD_REQUEST_SENSE,
298 	0,
299 	0,
300 	0,
301 	SATA_ATAPI_MIN_RQSENSE_LEN,
302 	0,
303 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
304 };
305 
306 
307 static sata_tran_hotplug_ops_t nv_hotplug_ops;
308 
309 extern struct mod_ops mod_driverops;
310 
311 static  struct modldrv modldrv = {
312 	&mod_driverops,	/* driverops */
313 	"Nvidia ck804/mcp51/mcp55 HBA",
314 	&nv_dev_ops,	/* driver ops */
315 };
316 
317 static  struct modlinkage modlinkage = {
318 	MODREV_1,
319 	&modldrv,
320 	NULL
321 };
322 
323 
324 /*
325  * wait between checks of reg status
326  */
327 int nv_usec_delay = NV_WAIT_REG_CHECK;
328 
329 /*
330  * The following is needed for nv_vcmn_err()
331  */
332 static kmutex_t nv_log_mutex; /* protects nv_log_buf */
333 static char nv_log_buf[NV_STRING_512];
334 int nv_debug_flags = NVDBG_ALWAYS;
335 int nv_log_to_console = B_FALSE;
336 
337 int nv_log_delay = 0;
338 int nv_prom_print = B_FALSE;
339 
340 /*
341  * for debugging
342  */
343 #ifdef DEBUG
344 int ncq_commands = 0;
345 int non_ncq_commands = 0;
346 #endif
347 
348 /*
349  * Opaque state pointer to be initialized by ddi_soft_state_init()
350  */
351 static void *nv_statep	= NULL;
352 
353 /*
354  * Map from CBP to shared space
355  *
356  * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
357  * Control Block Pointer as well as the corresponding Control Block) that
358  * is shared across all driver instances associated with that part.  The
359  * Control Block is used to update and query the LED state for the devices
360  * on the controllers associated with those instances.  There is also some
361  * driver state (called the 'common' area here) associated with each SGPIO
362  * Control Block.  The nv_sgp_cpb2cmn is used to map a given CBP to its
363  * control area.
364  *
365  * The driver can also use this mapping array to determine whether the
366  * common area for a given CBP has been initialized, and, if it isn't
367  * initialized, initialize it.
368  *
369  * When a driver instance with a CBP value that is already in the array is
370  * initialized, it will use the pointer to the previously initialized common
371  * area associated with that SGPIO CBP value, rather than initialize it
372  * itself.
373  *
374  * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
375  */
376 #ifdef SGPIO_SUPPORT
377 static kmutex_t nv_sgp_c2c_mutex;
378 static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
379 #endif
380 
381 /* We still have problems in 40-bit DMA support, so disable it by default */
382 int nv_sata_40bit_dma = B_FALSE;
383 
384 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
385 	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
386 	nv_sata_activate,	/* activate port. cfgadm -c connect */
387 	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
388 };
389 
390 
391 /*
392  *  nv module initialization
393  */
394 int
395 _init(void)
396 {
397 	int	error;
398 #ifdef SGPIO_SUPPORT
399 	int	i;
400 #endif
401 
402 	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
403 
404 	if (error != 0) {
405 
406 		return (error);
407 	}
408 
409 	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
410 #ifdef SGPIO_SUPPORT
411 	mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
412 
413 	for (i = 0; i < NV_MAX_CBPS; i++) {
414 		nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
415 		nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
416 	}
417 #endif
418 
419 	if ((error = sata_hba_init(&modlinkage)) != 0) {
420 		ddi_soft_state_fini(&nv_statep);
421 		mutex_destroy(&nv_log_mutex);
422 
423 		return (error);
424 	}
425 
426 	error = mod_install(&modlinkage);
427 	if (error != 0) {
428 		sata_hba_fini(&modlinkage);
429 		ddi_soft_state_fini(&nv_statep);
430 		mutex_destroy(&nv_log_mutex);
431 
432 		return (error);
433 	}
434 
435 	return (error);
436 }
437 
438 
439 /*
440  * nv module uninitialize
441  */
442 int
443 _fini(void)
444 {
445 	int	error;
446 
447 	error = mod_remove(&modlinkage);
448 
449 	if (error != 0) {
450 		return (error);
451 	}
452 
453 	/*
454 	 * remove the resources allocated in _init()
455 	 */
456 	mutex_destroy(&nv_log_mutex);
457 #ifdef SGPIO_SUPPORT
458 	mutex_destroy(&nv_sgp_c2c_mutex);
459 #endif
460 	sata_hba_fini(&modlinkage);
461 	ddi_soft_state_fini(&nv_statep);
462 
463 	return (error);
464 }
465 
466 
467 /*
468  * nv _info entry point
469  */
470 int
471 _info(struct modinfo *modinfop)
472 {
473 	return (mod_info(&modlinkage, modinfop));
474 }
475 
476 
477 /*
478  * these wrappers for ddi_{get,put}8 are for observability
479  * with dtrace
480  */
481 #ifdef DEBUG
482 
483 static void
484 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
485 {
486 	ddi_put8(handle, dev_addr, value);
487 }
488 
489 static void
490 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
491 {
492 	ddi_put32(handle, dev_addr, value);
493 }
494 
495 static uint32_t
496 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
497 {
498 	return (ddi_get32(handle, dev_addr));
499 }
500 
501 static void
502 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
503 {
504 	ddi_put16(handle, dev_addr, value);
505 }
506 
507 static uint16_t
508 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
509 {
510 	return (ddi_get16(handle, dev_addr));
511 }
512 
513 static uint8_t
514 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
515 {
516 	return (ddi_get8(handle, dev_addr));
517 }
518 
519 #else
520 
521 #define	nv_put8 ddi_put8
522 #define	nv_put32 ddi_put32
523 #define	nv_get32 ddi_get32
524 #define	nv_put16 ddi_put16
525 #define	nv_get16 ddi_get16
526 #define	nv_get8 ddi_get8
527 
528 #endif
529 
530 
531 /*
532  * Driver attach
533  */
534 static int
535 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
536 {
537 	int status, attach_state, intr_types, bar, i, command;
538 	int inst = ddi_get_instance(dip);
539 	ddi_acc_handle_t pci_conf_handle;
540 	nv_ctl_t *nvc;
541 	uint8_t subclass;
542 	uint32_t reg32;
543 #ifdef SGPIO_SUPPORT
544 	pci_regspec_t *regs;
545 	int rlen;
546 #endif
547 
548 	switch (cmd) {
549 
550 	case DDI_ATTACH:
551 
552 		NVLOG((NVDBG_INIT, NULL, NULL,
553 		    "nv_attach(): DDI_ATTACH inst %d", inst));
554 
555 		attach_state = ATTACH_PROGRESS_NONE;
556 
557 		status = ddi_soft_state_zalloc(nv_statep, inst);
558 
559 		if (status != DDI_SUCCESS) {
560 			break;
561 		}
562 
563 		nvc = ddi_get_soft_state(nv_statep, inst);
564 
565 		nvc->nvc_dip = dip;
566 
567 		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
568 
569 		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
570 			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
571 			    PCI_CONF_REVID);
572 			NVLOG((NVDBG_INIT, NULL, NULL,
573 			    "inst %d: silicon revid is %x nv_debug_flags=%x",
574 			    inst, nvc->nvc_revid, nv_debug_flags));
575 		} else {
576 			break;
577 		}
578 
579 		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
580 
581 		/*
582 		 * Set the PCI command register: enable IO/MEM/Master.
583 		 */
584 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
585 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
586 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
587 
588 		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
589 
590 		if (subclass & PCI_MASS_RAID) {
591 			cmn_err(CE_WARN,
592 			    "attach failed: RAID mode not supported");
593 			break;
594 		}
595 
596 		/*
597 		 * the 6 bars of the controller are:
598 		 * 0: port 0 task file
599 		 * 1: port 0 status
600 		 * 2: port 1 task file
601 		 * 3: port 1 status
602 		 * 4: bus master for both ports
603 		 * 5: extended registers for SATA features
604 		 */
605 		for (bar = 0; bar < 6; bar++) {
606 			status = ddi_regs_map_setup(dip, bar + 1,
607 			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
608 			    &nvc->nvc_bar_hdl[bar]);
609 
610 			if (status != DDI_SUCCESS) {
611 				NVLOG((NVDBG_INIT, nvc, NULL,
612 				    "ddi_regs_map_setup failure for bar"
613 				    " %d status = %d", bar, status));
614 				break;
615 			}
616 		}
617 
618 		attach_state |= ATTACH_PROGRESS_BARS;
619 
620 		/*
621 		 * initialize controller and driver core
622 		 */
623 		status = nv_init_ctl(nvc, pci_conf_handle);
624 
625 		if (status == NV_FAILURE) {
626 			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
627 
628 			break;
629 		}
630 
631 		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
632 
633 		/*
634 		 * initialize mutexes
635 		 */
636 		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
637 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
638 
639 		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
640 
641 		/*
642 		 * get supported interrupt types
643 		 */
644 		if (ddi_intr_get_supported_types(dip, &intr_types) !=
645 		    DDI_SUCCESS) {
646 			nv_cmn_err(CE_WARN, nvc, NULL,
647 			    "!ddi_intr_get_supported_types failed");
648 			NVLOG((NVDBG_INIT, nvc, NULL,
649 			    "interrupt supported types failed"));
650 
651 			break;
652 		}
653 
654 		NVLOG((NVDBG_INIT, nvc, NULL,
655 		    "ddi_intr_get_supported_types() returned: 0x%x",
656 		    intr_types));
657 
658 #ifdef NV_MSI_SUPPORTED
659 		if (intr_types & DDI_INTR_TYPE_MSI) {
660 			NVLOG((NVDBG_INIT, nvc, NULL,
661 			    "using MSI interrupt type"));
662 
663 			/*
664 			 * Try MSI first, but fall back to legacy if MSI
665 			 * attach fails
666 			 */
667 			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
668 				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
669 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
670 				NVLOG((NVDBG_INIT, nvc, NULL,
671 				    "MSI interrupt setup done"));
672 			} else {
673 				nv_cmn_err(CE_CONT, nvc, NULL,
674 				    "!MSI registration failed "
675 				    "will try Legacy interrupts");
676 			}
677 		}
678 #endif
679 
680 		/*
681 		 * Either the MSI interrupt setup has failed or only
682 		 * the fixed interrupts are available on the system.
683 		 */
684 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
685 		    (intr_types & DDI_INTR_TYPE_FIXED)) {
686 
687 			NVLOG((NVDBG_INIT, nvc, NULL,
688 			    "using Legacy interrupt type"));
689 
690 			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
691 				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
692 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
693 				NVLOG((NVDBG_INIT, nvc, NULL,
694 				    "Legacy interrupt setup done"));
695 			} else {
696 				nv_cmn_err(CE_WARN, nvc, NULL,
697 				    "!legacy interrupt setup failed");
698 				NVLOG((NVDBG_INIT, nvc, NULL,
699 				    "legacy interrupt setup failed"));
700 				break;
701 			}
702 		}
703 
704 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
705 			NVLOG((NVDBG_INIT, nvc, NULL,
706 			    "no interrupts registered"));
707 			break;
708 		}
709 
710 #ifdef SGPIO_SUPPORT
711 		/*
712 		 * save off the controller number
713 		 */
714 		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
715 		    "reg", (caddr_t)&regs, &rlen);
716 		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
717 		kmem_free(regs, rlen);
718 
719 		/*
720 		 * initialize SGPIO
721 		 */
722 		nv_sgp_led_init(nvc, pci_conf_handle);
723 #endif	/* SGPIO_SUPPORT */
724 
725 		/*
726 		 * attach to sata module
727 		 */
728 		if (sata_hba_attach(nvc->nvc_dip,
729 		    &nvc->nvc_sata_hba_tran,
730 		    DDI_ATTACH) != DDI_SUCCESS) {
731 			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
732 
733 			break;
734 		}
735 
736 		pci_config_teardown(&pci_conf_handle);
737 
738 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
739 
740 		return (DDI_SUCCESS);
741 
742 	case DDI_RESUME:
743 
744 		nvc = ddi_get_soft_state(nv_statep, inst);
745 
746 		NVLOG((NVDBG_INIT, nvc, NULL,
747 		    "nv_attach(): DDI_RESUME inst %d", inst));
748 
749 		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
750 			return (DDI_FAILURE);
751 		}
752 
753 		/*
754 		 * Set the PCI command register: enable IO/MEM/Master.
755 		 */
756 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
757 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
758 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
759 
760 		/*
761 		 * Need to set bit 2 to 1 at config offset 0x50
762 		 * to enable access to the bar5 registers.
763 		 */
764 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
765 
766 		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
767 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
768 			    reg32 | NV_BAR5_SPACE_EN);
769 		}
770 
771 		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
772 
773 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
774 			nv_resume(&(nvc->nvc_port[i]));
775 		}
776 
777 		pci_config_teardown(&pci_conf_handle);
778 
779 		return (DDI_SUCCESS);
780 
781 	default:
782 		return (DDI_FAILURE);
783 	}
784 
785 
786 	/*
787 	 * DDI_ATTACH failure path starts here
788 	 */
789 
790 	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
791 		nv_rem_intrs(nvc);
792 	}
793 
794 	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
795 		/*
796 		 * Remove timers
797 		 */
798 		int port = 0;
799 		nv_port_t *nvp;
800 
801 		for (; port < NV_MAX_PORTS(nvc); port++) {
802 			nvp = &(nvc->nvc_port[port]);
803 			if (nvp->nvp_timeout_id != 0) {
804 				(void) untimeout(nvp->nvp_timeout_id);
805 			}
806 		}
807 	}
808 
809 	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
810 		mutex_destroy(&nvc->nvc_mutex);
811 	}
812 
813 	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
814 		nv_uninit_ctl(nvc);
815 	}
816 
817 	if (attach_state & ATTACH_PROGRESS_BARS) {
818 		while (--bar >= 0) {
819 			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
820 		}
821 	}
822 
823 	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
824 		ddi_soft_state_free(nv_statep, inst);
825 	}
826 
827 	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
828 		pci_config_teardown(&pci_conf_handle);
829 	}
830 
831 	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
832 
833 	return (DDI_FAILURE);
834 }
835 
836 
837 static int
838 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
839 {
840 	int i, port, inst = ddi_get_instance(dip);
841 	nv_ctl_t *nvc;
842 	nv_port_t *nvp;
843 
844 	nvc = ddi_get_soft_state(nv_statep, inst);
845 
846 	switch (cmd) {
847 
848 	case DDI_DETACH:
849 
850 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
851 
852 		/*
853 		 * Remove interrupts
854 		 */
855 		nv_rem_intrs(nvc);
856 
857 		/*
858 		 * Remove timers
859 		 */
860 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
861 			nvp = &(nvc->nvc_port[port]);
862 			if (nvp->nvp_timeout_id != 0) {
863 				(void) untimeout(nvp->nvp_timeout_id);
864 			}
865 		}
866 
867 		/*
868 		 * Remove maps
869 		 */
870 		for (i = 0; i < 6; i++) {
871 			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
872 		}
873 
874 		/*
875 		 * Destroy mutexes
876 		 */
877 		mutex_destroy(&nvc->nvc_mutex);
878 
879 		/*
880 		 * Uninitialize the controller
881 		 */
882 		nv_uninit_ctl(nvc);
883 
884 #ifdef SGPIO_SUPPORT
885 		/*
886 		 * release SGPIO resources
887 		 */
888 		nv_sgp_cleanup(nvc);
889 #endif
890 
891 		/*
892 		 * unregister from the sata module
893 		 */
894 		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
895 
896 		/*
897 		 * Free soft state
898 		 */
899 		ddi_soft_state_free(nv_statep, inst);
900 
901 		return (DDI_SUCCESS);
902 
903 	case DDI_SUSPEND:
904 
905 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
906 
907 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
908 			nv_suspend(&(nvc->nvc_port[i]));
909 		}
910 
911 		nvc->nvc_state |= NV_CTRL_SUSPEND;
912 
913 		return (DDI_SUCCESS);
914 
915 	default:
916 		return (DDI_FAILURE);
917 	}
918 }
919 
920 
921 /*ARGSUSED*/
922 static int
923 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
924 {
925 	nv_ctl_t *nvc;
926 	int instance;
927 	dev_t dev;
928 
929 	dev = (dev_t)arg;
930 	instance = getminor(dev);
931 
932 	switch (infocmd) {
933 	case DDI_INFO_DEVT2DEVINFO:
934 		nvc = ddi_get_soft_state(nv_statep,  instance);
935 		if (nvc != NULL) {
936 			*result = nvc->nvc_dip;
937 			return (DDI_SUCCESS);
938 		} else {
939 			*result = NULL;
940 			return (DDI_FAILURE);
941 		}
942 	case DDI_INFO_DEVT2INSTANCE:
943 		*(int *)result = instance;
944 		break;
945 	default:
946 		break;
947 	}
948 	return (DDI_SUCCESS);
949 }
950 
951 
952 #ifdef SGPIO_SUPPORT
953 /* ARGSUSED */
954 static int
955 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
956 {
957 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
958 
959 	if (nvc == NULL) {
960 		return (ENXIO);
961 	}
962 
963 	return (0);
964 }
965 
966 
967 /* ARGSUSED */
968 static int
969 nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
970 {
971 	return (0);
972 }
973 
974 
975 /* ARGSUSED */
976 static int
977 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
978 {
979 	nv_ctl_t *nvc;
980 	int inst;
981 	int status;
982 	int ctlr, port;
983 	int drive;
984 	uint8_t curr_led;
985 	struct dc_led_ctl led;
986 
987 	inst = getminor(dev);
988 	if (inst == -1) {
989 		return (EBADF);
990 	}
991 
992 	nvc = ddi_get_soft_state(nv_statep, inst);
993 	if (nvc == NULL) {
994 		return (EBADF);
995 	}
996 
997 	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
998 		return (EIO);
999 	}
1000 
1001 	switch (cmd) {
1002 	case DEVCTL_SET_LED:
1003 		status = ddi_copyin((void *)arg, &led,
1004 		    sizeof (struct dc_led_ctl), mode);
1005 		if (status != 0)
1006 			return (EFAULT);
1007 
1008 		/*
1009 		 * Since only the first two controller currently support
1010 		 * SGPIO (as per NVIDIA docs), this code will as well.
1011 		 * Note that this validate the port value within led_state
1012 		 * as well.
1013 		 */
1014 
1015 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1016 		if ((ctlr != 0) && (ctlr != 1))
1017 			return (ENXIO);
1018 
1019 		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1020 		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
1021 			return (EINVAL);
1022 		}
1023 
1024 		drive = led.led_number;
1025 
1026 		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1027 		    (led.led_state == DCL_STATE_OFF)) {
1028 
1029 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1030 				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1031 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1032 				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1033 			} else {
1034 				return (ENXIO);
1035 			}
1036 
1037 			port = SGP_DRV_TO_PORT(led.led_number);
1038 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1039 		}
1040 
1041 		if (led.led_ctl_active == DCL_CNTRL_ON) {
1042 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1043 				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1044 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1045 				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1046 			} else {
1047 				return (ENXIO);
1048 			}
1049 
1050 			port = SGP_DRV_TO_PORT(led.led_number);
1051 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1052 		}
1053 
1054 		break;
1055 
1056 	case DEVCTL_GET_LED:
1057 		status = ddi_copyin((void *)arg, &led,
1058 		    sizeof (struct dc_led_ctl), mode);
1059 		if (status != 0)
1060 			return (EFAULT);
1061 
1062 		/*
1063 		 * Since only the first two controller currently support
1064 		 * SGPIO (as per NVIDIA docs), this code will as well.
1065 		 * Note that this validate the port value within led_state
1066 		 * as well.
1067 		 */
1068 
1069 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1070 		if ((ctlr != 0) && (ctlr != 1))
1071 			return (ENXIO);
1072 
1073 		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1074 		    led.led_number);
1075 
1076 		port = SGP_DRV_TO_PORT(led.led_number);
1077 		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1078 			led.led_ctl_active = DCL_CNTRL_ON;
1079 
1080 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1081 				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1082 					led.led_state = DCL_STATE_OFF;
1083 				else
1084 					led.led_state = DCL_STATE_ON;
1085 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1086 				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1087 					led.led_state = DCL_STATE_OFF;
1088 				else
1089 					led.led_state = DCL_STATE_ON;
1090 			} else {
1091 				return (ENXIO);
1092 			}
1093 		} else {
1094 			led.led_ctl_active = DCL_CNTRL_OFF;
1095 			/*
1096 			 * Not really off, but never set and no constant for
1097 			 * tri-state
1098 			 */
1099 			led.led_state = DCL_STATE_OFF;
1100 		}
1101 
1102 		status = ddi_copyout(&led, (void *)arg,
1103 		    sizeof (struct dc_led_ctl), mode);
1104 		if (status != 0)
1105 			return (EFAULT);
1106 
1107 		break;
1108 
1109 	case DEVCTL_NUM_LEDS:
1110 		led.led_number = SGPIO_DRV_CNT_VALUE;
1111 		led.led_ctl_active = 1;
1112 		led.led_type = 3;
1113 
1114 		/*
1115 		 * According to documentation, NVIDIA SGPIO is supposed to
1116 		 * support blinking, but it does not seem to work in practice.
1117 		 */
1118 		led.led_state = DCL_STATE_ON;
1119 
1120 		status = ddi_copyout(&led, (void *)arg,
1121 		    sizeof (struct dc_led_ctl), mode);
1122 		if (status != 0)
1123 			return (EFAULT);
1124 
1125 		break;
1126 
1127 	default:
1128 		return (EINVAL);
1129 	}
1130 
1131 	return (0);
1132 }
1133 #endif	/* SGPIO_SUPPORT */
1134 
1135 
1136 /*
1137  * Called by sata module to probe a port.  Port and device state
1138  * are not changed here... only reported back to the sata module.
1139  *
1140  * If probe confirms a device is present for the first time, it will
1141  * initiate a device reset, then probe will be called again and the
1142  * signature will be check.  If the signature is valid, data structures
1143  * will be initialized.
1144  */
1145 static int
1146 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1147 {
1148 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1149 	uint8_t cport = sd->satadev_addr.cport;
1150 	uint8_t pmport = sd->satadev_addr.pmport;
1151 	uint8_t qual = sd->satadev_addr.qual;
1152 	clock_t nv_lbolt = ddi_get_lbolt();
1153 	nv_port_t *nvp;
1154 
1155 	if (cport >= NV_MAX_PORTS(nvc)) {
1156 		sd->satadev_type = SATA_DTYPE_NONE;
1157 		sd->satadev_state = SATA_STATE_UNKNOWN;
1158 
1159 		return (SATA_FAILURE);
1160 	}
1161 
1162 	ASSERT(nvc->nvc_port != NULL);
1163 	nvp = &(nvc->nvc_port[cport]);
1164 	ASSERT(nvp != NULL);
1165 
1166 	NVLOG((NVDBG_PROBE, nvc, nvp,
1167 	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1168 	    "qual: 0x%x", cport, pmport, qual));
1169 
1170 	mutex_enter(&nvp->nvp_mutex);
1171 
1172 	/*
1173 	 * This check seems to be done in the SATA module.
1174 	 * It may not be required here
1175 	 */
1176 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1177 		nv_cmn_err(CE_WARN, nvc, nvp,
1178 		    "port inactive.  Use cfgadm to activate");
1179 		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1180 		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1181 		mutex_exit(&nvp->nvp_mutex);
1182 
1183 		return (SATA_FAILURE);
1184 	}
1185 
1186 	if (qual == SATA_ADDR_PMPORT) {
1187 		sd->satadev_type = SATA_DTYPE_NONE;
1188 		sd->satadev_state = SATA_STATE_UNKNOWN;
1189 		mutex_exit(&nvp->nvp_mutex);
1190 		nv_cmn_err(CE_WARN, nvc, nvp,
1191 		    "controller does not support port multiplier");
1192 
1193 		return (SATA_FAILURE);
1194 	}
1195 
1196 	sd->satadev_state = SATA_PSTATE_PWRON;
1197 
1198 	nv_copy_registers(nvp, sd, NULL);
1199 
1200 	/*
1201 	 * determine link status
1202 	 */
1203 	if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) {
1204 		uint8_t det;
1205 
1206 		/*
1207 		 * Reset will cause the link to go down for a short period of
1208 		 * time.  If link is lost for less than 2 seconds ignore it
1209 		 * so that the reset can progress.
1210 		 */
1211 		if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
1212 
1213 			if (nvp->nvp_link_lost_time == 0) {
1214 				nvp->nvp_link_lost_time = nv_lbolt;
1215 			}
1216 
1217 			if (TICK_TO_SEC(nv_lbolt -
1218 			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
1219 				NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp,
1220 				    "probe: intermittent link lost while"
1221 				    " resetting"));
1222 				/*
1223 				 * fake status of link so that probe continues
1224 				 */
1225 				SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1226 				    SSTATUS_IPM_ACTIVE);
1227 				SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1228 				    SSTATUS_DET_DEVPRE_PHYCOM);
1229 				sd->satadev_type = SATA_DTYPE_UNKNOWN;
1230 				mutex_exit(&nvp->nvp_mutex);
1231 
1232 				return (SATA_SUCCESS);
1233 			} else {
1234 				nvp->nvp_state &=
1235 				    ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
1236 			}
1237 		}
1238 
1239 		/*
1240 		 * no link, so tear down port and abort all active packets
1241 		 */
1242 
1243 		det = (sd->satadev_scr.sstatus & SSTATUS_DET) >>
1244 		    SSTATUS_DET_SHIFT;
1245 
1246 		switch (det) {
1247 		case SSTATUS_DET_NODEV:
1248 		case SSTATUS_DET_PHYOFFLINE:
1249 			sd->satadev_type = SATA_DTYPE_NONE;
1250 			break;
1251 		default:
1252 			sd->satadev_type = SATA_DTYPE_UNKNOWN;
1253 			break;
1254 		}
1255 
1256 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1257 		    "probe: link lost invoking nv_abort_active"));
1258 
1259 		(void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT);
1260 		nv_uninit_port(nvp);
1261 
1262 		mutex_exit(&nvp->nvp_mutex);
1263 
1264 		return (SATA_SUCCESS);
1265 	} else {
1266 		nvp->nvp_link_lost_time = 0;
1267 	}
1268 
1269 	/*
1270 	 * A device is present so clear hotremoved flag
1271 	 */
1272 	nvp->nvp_state &= ~NV_PORT_HOTREMOVED;
1273 
1274 #ifdef SGPIO_SUPPORT
1275 	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1276 	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1277 #endif
1278 
1279 	/*
1280 	 * If the signature was acquired previously there is no need to
1281 	 * do it again.
1282 	 */
1283 	if (nvp->nvp_signature != 0) {
1284 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1285 		    "probe: signature acquired previously"));
1286 		sd->satadev_type = nvp->nvp_type;
1287 		mutex_exit(&nvp->nvp_mutex);
1288 
1289 		return (SATA_SUCCESS);
1290 	}
1291 
1292 	/*
1293 	 * If NV_PORT_RESET is not set, this is the first time through
1294 	 * so perform reset and return.
1295 	 */
1296 	if ((nvp->nvp_state & NV_PORT_RESET) == 0) {
1297 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1298 		    "probe: first reset to get sig"));
1299 		nvp->nvp_state |= NV_PORT_RESET_PROBE;
1300 		nv_reset(nvp);
1301 		sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1302 		nvp->nvp_probe_time = nv_lbolt;
1303 		mutex_exit(&nvp->nvp_mutex);
1304 
1305 		return (SATA_SUCCESS);
1306 	}
1307 
1308 	/*
1309 	 * Reset was done previously.  see if the signature is
1310 	 * available.
1311 	 */
1312 	nv_read_signature(nvp);
1313 	sd->satadev_type = nvp->nvp_type;
1314 
1315 	/*
1316 	 * Some drives may require additional resets to get a
1317 	 * valid signature.  If a drive was not just powered up, the signature
1318 	 * should arrive within half a second of reset.  Therefore if more
1319 	 * than 5 seconds has elapsed while waiting for a signature, reset
1320 	 * again.  These extra resets do not appear to create problems when
1321 	 * the drive is spinning up for more than this reset period.
1322 	 */
1323 	if (nvp->nvp_signature == 0) {
1324 		if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) {
1325 			NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset"
1326 			    " during signature acquisition"));
1327 			nv_reset(nvp);
1328 		}
1329 
1330 		mutex_exit(&nvp->nvp_mutex);
1331 
1332 		return (SATA_SUCCESS);
1333 	}
1334 
1335 	NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms",
1336 	    TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time)));
1337 
1338 	/*
1339 	 * nv_sata only deals with ATA disks and ATAPI CD/DVDs so far.  If
1340 	 * it is not either of those, then just return.
1341 	 */
1342 	if ((nvp->nvp_type != SATA_DTYPE_ATADISK) &&
1343 	    (nvp->nvp_type != SATA_DTYPE_ATAPICD)) {
1344 		NVLOG((NVDBG_PROBE, nvc, nvp, "Driver currently handles only"
1345 		    " disks/CDs/DVDs.  Signature acquired was %X",
1346 		    nvp->nvp_signature));
1347 		mutex_exit(&nvp->nvp_mutex);
1348 
1349 		return (SATA_SUCCESS);
1350 	}
1351 
1352 	/*
1353 	 * make sure structures are initialized
1354 	 */
1355 	if (nv_init_port(nvp) == NV_SUCCESS) {
1356 		NVLOG((NVDBG_PROBE, nvc, nvp,
1357 		    "device detected and set up at port %d", cport));
1358 		mutex_exit(&nvp->nvp_mutex);
1359 
1360 		return (SATA_SUCCESS);
1361 	} else {
1362 		nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data "
1363 		    "structures for port %d", cport);
1364 		mutex_exit(&nvp->nvp_mutex);
1365 
1366 		return (SATA_FAILURE);
1367 	}
1368 	/*NOTREACHED*/
1369 }
1370 
1371 
1372 /*
1373  * Called by sata module to start a new command.
1374  */
1375 static int
1376 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1377 {
1378 	int cport = spkt->satapkt_device.satadev_addr.cport;
1379 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1380 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1381 	int ret;
1382 
1383 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1384 	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1385 
1386 	mutex_enter(&nvp->nvp_mutex);
1387 
1388 	/*
1389 	 * hotremoved is an intermediate state where the link was lost,
1390 	 * but the hotplug event has not yet been processed by the sata
1391 	 * module.  Fail the request.
1392 	 */
1393 	if (nvp->nvp_state & NV_PORT_HOTREMOVED) {
1394 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1395 		spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN;
1396 		NVLOG((NVDBG_ERRS, nvc, nvp,
1397 		    "nv_sata_start: NV_PORT_HOTREMOVED"));
1398 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1399 		mutex_exit(&nvp->nvp_mutex);
1400 
1401 		return (SATA_TRAN_PORT_ERROR);
1402 	}
1403 
1404 	if (nvp->nvp_state & NV_PORT_RESET) {
1405 		NVLOG((NVDBG_ERRS, nvc, nvp,
1406 		    "still waiting for reset completion"));
1407 		spkt->satapkt_reason = SATA_PKT_BUSY;
1408 		mutex_exit(&nvp->nvp_mutex);
1409 
1410 		/*
1411 		 * If in panic, timeouts do not occur, so fake one
1412 		 * so that the signature can be acquired to complete
1413 		 * the reset handling.
1414 		 */
1415 		if (ddi_in_panic()) {
1416 			nv_timeout(nvp);
1417 		}
1418 
1419 		return (SATA_TRAN_BUSY);
1420 	}
1421 
1422 	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1423 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1424 		NVLOG((NVDBG_ERRS, nvc, nvp,
1425 		    "nv_sata_start: SATA_DTYPE_NONE"));
1426 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1427 		mutex_exit(&nvp->nvp_mutex);
1428 
1429 		return (SATA_TRAN_PORT_ERROR);
1430 	}
1431 
1432 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1433 		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1434 		nv_cmn_err(CE_WARN, nvc, nvp,
1435 		    "port multipliers not supported by controller");
1436 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1437 		mutex_exit(&nvp->nvp_mutex);
1438 
1439 		return (SATA_TRAN_CMD_UNSUPPORTED);
1440 	}
1441 
1442 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1443 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1444 		NVLOG((NVDBG_ERRS, nvc, nvp,
1445 		    "nv_sata_start: port not yet initialized"));
1446 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1447 		mutex_exit(&nvp->nvp_mutex);
1448 
1449 		return (SATA_TRAN_PORT_ERROR);
1450 	}
1451 
1452 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1453 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1454 		NVLOG((NVDBG_ERRS, nvc, nvp,
1455 		    "nv_sata_start: NV_PORT_INACTIVE"));
1456 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1457 		mutex_exit(&nvp->nvp_mutex);
1458 
1459 		return (SATA_TRAN_PORT_ERROR);
1460 	}
1461 
1462 	if (nvp->nvp_state & NV_PORT_FAILED) {
1463 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1464 		NVLOG((NVDBG_ERRS, nvc, nvp,
1465 		    "nv_sata_start: NV_PORT_FAILED state"));
1466 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1467 		mutex_exit(&nvp->nvp_mutex);
1468 
1469 		return (SATA_TRAN_PORT_ERROR);
1470 	}
1471 
1472 	/*
1473 	 * after a device reset, and then when sata module restore processing
1474 	 * is complete, the sata module will set sata_clear_dev_reset which
1475 	 * indicates that restore processing has completed and normal
1476 	 * non-restore related commands should be processed.
1477 	 */
1478 	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1479 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1480 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1481 		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1482 	}
1483 
1484 	/*
1485 	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1486 	 * only allow commands which restore device state.  The sata module
1487 	 * marks such commands with with sata_ignore_dev_reset.
1488 	 *
1489 	 * during coredump, nv_reset is called and but then the restore
1490 	 * doesn't happen.  For now, workaround by ignoring the wait for
1491 	 * restore if the system is panicing.
1492 	 */
1493 	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1494 	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1495 	    (ddi_in_panic() == 0)) {
1496 		spkt->satapkt_reason = SATA_PKT_BUSY;
1497 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1498 		    "nv_sata_start: waiting for restore "));
1499 		mutex_exit(&nvp->nvp_mutex);
1500 
1501 		return (SATA_TRAN_BUSY);
1502 	}
1503 
1504 	if (nvp->nvp_state & NV_PORT_ABORTING) {
1505 		spkt->satapkt_reason = SATA_PKT_BUSY;
1506 		NVLOG((NVDBG_ERRS, nvc, nvp,
1507 		    "nv_sata_start: NV_PORT_ABORTING"));
1508 		mutex_exit(&nvp->nvp_mutex);
1509 
1510 		return (SATA_TRAN_BUSY);
1511 	}
1512 
1513 	if (spkt->satapkt_op_mode &
1514 	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1515 
1516 		ret = nv_start_sync(nvp, spkt);
1517 
1518 		mutex_exit(&nvp->nvp_mutex);
1519 
1520 		return (ret);
1521 	}
1522 
1523 	/*
1524 	 * start command asynchronous command
1525 	 */
1526 	ret = nv_start_async(nvp, spkt);
1527 
1528 	mutex_exit(&nvp->nvp_mutex);
1529 
1530 	return (ret);
1531 }
1532 
1533 
1534 /*
1535  * SATA_OPMODE_POLLING implies the driver is in a
1536  * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1537  * If only SATA_OPMODE_SYNCH is set, the driver can use
1538  * interrupts and sleep wait on a cv.
1539  *
1540  * If SATA_OPMODE_POLLING is set, the driver can't use
1541  * interrupts and must busy wait and simulate the
1542  * interrupts by waiting for BSY to be cleared.
1543  *
1544  * Synchronous mode has to return BUSY if there are
1545  * any other commands already on the drive.
1546  */
1547 static int
1548 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1549 {
1550 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1551 	int ret;
1552 
1553 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1554 
1555 	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1556 		spkt->satapkt_reason = SATA_PKT_BUSY;
1557 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1558 		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1559 		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1560 		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1561 		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1562 
1563 		return (SATA_TRAN_BUSY);
1564 	}
1565 
1566 	/*
1567 	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1568 	 */
1569 	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1570 	    servicing_interrupt()) {
1571 		spkt->satapkt_reason = SATA_PKT_BUSY;
1572 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1573 		    "SYNC mode not allowed during interrupt"));
1574 
1575 		return (SATA_TRAN_BUSY);
1576 
1577 	}
1578 
1579 	/*
1580 	 * disable interrupt generation if in polled mode
1581 	 */
1582 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1583 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1584 	}
1585 
1586 	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1587 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1588 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1589 		}
1590 
1591 		return (ret);
1592 	}
1593 
1594 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1595 		mutex_exit(&nvp->nvp_mutex);
1596 		ret = nv_poll_wait(nvp, spkt);
1597 		mutex_enter(&nvp->nvp_mutex);
1598 
1599 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1600 
1601 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1602 		    " done % reason %d", ret));
1603 
1604 		return (ret);
1605 	}
1606 
1607 	/*
1608 	 * non-polling synchronous mode handling.  The interrupt will signal
1609 	 * when the IO is completed.
1610 	 */
1611 	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1612 
1613 	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1614 
1615 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1616 	}
1617 
1618 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1619 	    " done % reason %d", spkt->satapkt_reason));
1620 
1621 	return (SATA_TRAN_ACCEPTED);
1622 }
1623 
1624 
1625 static int
1626 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1627 {
1628 	int ret;
1629 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1630 #if ! defined(__lock_lint)
1631 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1632 #endif
1633 
1634 	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1635 
1636 	for (;;) {
1637 
1638 		NV_DELAY_NSEC(400);
1639 
1640 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1641 		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1642 		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1643 			mutex_enter(&nvp->nvp_mutex);
1644 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1645 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1646 			nv_reset(nvp);
1647 			nv_complete_io(nvp, spkt, 0);
1648 			mutex_exit(&nvp->nvp_mutex);
1649 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1650 			    "SATA_STATUS_BSY"));
1651 
1652 			return (SATA_TRAN_ACCEPTED);
1653 		}
1654 
1655 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1656 
1657 		/*
1658 		 * Simulate interrupt.
1659 		 */
1660 		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1661 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1662 
1663 		if (ret != DDI_INTR_CLAIMED) {
1664 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1665 			    " unclaimed -- resetting"));
1666 			mutex_enter(&nvp->nvp_mutex);
1667 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1668 			nv_reset(nvp);
1669 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1670 			nv_complete_io(nvp, spkt, 0);
1671 			mutex_exit(&nvp->nvp_mutex);
1672 
1673 			return (SATA_TRAN_ACCEPTED);
1674 		}
1675 
1676 #if ! defined(__lock_lint)
1677 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1678 			/*
1679 			 * packet is complete
1680 			 */
1681 			return (SATA_TRAN_ACCEPTED);
1682 		}
1683 #endif
1684 	}
1685 	/*NOTREACHED*/
1686 }
1687 
1688 
1689 /*
1690  * Called by sata module to abort outstanding packets.
1691  */
1692 /*ARGSUSED*/
1693 static int
1694 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1695 {
1696 	int cport = spkt->satapkt_device.satadev_addr.cport;
1697 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1698 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1699 	int c_a, ret;
1700 
1701 	ASSERT(cport < NV_MAX_PORTS(nvc));
1702 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1703 
1704 	mutex_enter(&nvp->nvp_mutex);
1705 
1706 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1707 		mutex_exit(&nvp->nvp_mutex);
1708 		nv_cmn_err(CE_WARN, nvc, nvp,
1709 		    "abort request failed: port inactive");
1710 
1711 		return (SATA_FAILURE);
1712 	}
1713 
1714 	/*
1715 	 * spkt == NULL then abort all commands
1716 	 */
1717 	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED);
1718 
1719 	if (c_a) {
1720 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1721 		    "packets aborted running=%d", c_a));
1722 		ret = SATA_SUCCESS;
1723 	} else {
1724 		if (spkt == NULL) {
1725 			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1726 		} else {
1727 			NVLOG((NVDBG_ENTRY, nvc, nvp,
1728 			    "can't find spkt to abort"));
1729 		}
1730 		ret = SATA_FAILURE;
1731 	}
1732 
1733 	mutex_exit(&nvp->nvp_mutex);
1734 
1735 	return (ret);
1736 }
1737 
1738 
1739 /*
1740  * if spkt == NULL abort all pkts running, otherwise
1741  * abort the requested packet.  must be called with nv_mutex
1742  * held and returns with it held.  Not NCQ aware.
1743  */
1744 static int
1745 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason)
1746 {
1747 	int aborted = 0, i, reset_once = B_FALSE;
1748 	struct nv_slot *nv_slotp;
1749 	sata_pkt_t *spkt_slot;
1750 
1751 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1752 
1753 	/*
1754 	 * return if the port is not configured
1755 	 */
1756 	if (nvp->nvp_slot == NULL) {
1757 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1758 		    "nv_abort_active: not configured so returning"));
1759 
1760 		return (0);
1761 	}
1762 
1763 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1764 
1765 	nvp->nvp_state |= NV_PORT_ABORTING;
1766 
1767 	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1768 
1769 		nv_slotp = &(nvp->nvp_slot[i]);
1770 		spkt_slot = nv_slotp->nvslot_spkt;
1771 
1772 		/*
1773 		 * skip if not active command in slot
1774 		 */
1775 		if (spkt_slot == NULL) {
1776 			continue;
1777 		}
1778 
1779 		/*
1780 		 * if a specific packet was requested, skip if
1781 		 * this is not a match
1782 		 */
1783 		if ((spkt != NULL) && (spkt != spkt_slot)) {
1784 			continue;
1785 		}
1786 
1787 		/*
1788 		 * stop the hardware.  This could need reworking
1789 		 * when NCQ is enabled in the driver.
1790 		 */
1791 		if (reset_once == B_FALSE) {
1792 			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1793 
1794 			/*
1795 			 * stop DMA engine
1796 			 */
1797 			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1798 
1799 			nv_reset(nvp);
1800 			reset_once = B_TRUE;
1801 		}
1802 
1803 		spkt_slot->satapkt_reason = abort_reason;
1804 		nv_complete_io(nvp, spkt_slot, i);
1805 		aborted++;
1806 	}
1807 
1808 	nvp->nvp_state &= ~NV_PORT_ABORTING;
1809 
1810 	return (aborted);
1811 }
1812 
1813 
1814 /*
1815  * Called by sata module to reset a port, device, or the controller.
1816  */
1817 static int
1818 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1819 {
1820 	int cport = sd->satadev_addr.cport;
1821 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1822 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1823 	int ret = SATA_SUCCESS;
1824 
1825 	ASSERT(cport < NV_MAX_PORTS(nvc));
1826 
1827 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1828 
1829 	mutex_enter(&nvp->nvp_mutex);
1830 
1831 	switch (sd->satadev_addr.qual) {
1832 
1833 	case SATA_ADDR_CPORT:
1834 		/*FALLTHROUGH*/
1835 	case SATA_ADDR_DCPORT:
1836 		nv_reset(nvp);
1837 		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1838 
1839 		break;
1840 	case SATA_ADDR_CNTRL:
1841 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1842 		    "nv_sata_reset: constroller reset not supported"));
1843 
1844 		break;
1845 	case SATA_ADDR_PMPORT:
1846 	case SATA_ADDR_DPMPORT:
1847 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1848 		    "nv_sata_reset: port multipliers not supported"));
1849 		/*FALLTHROUGH*/
1850 	default:
1851 		/*
1852 		 * unsupported case
1853 		 */
1854 		ret = SATA_FAILURE;
1855 		break;
1856 	}
1857 
1858 	if (ret == SATA_SUCCESS) {
1859 		/*
1860 		 * If the port is inactive, do a quiet reset and don't attempt
1861 		 * to wait for reset completion or do any post reset processing
1862 		 */
1863 		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1864 			nvp->nvp_state &= ~NV_PORT_RESET;
1865 			nvp->nvp_reset_time = 0;
1866 		}
1867 
1868 		/*
1869 		 * clear the port failed flag
1870 		 */
1871 		nvp->nvp_state &= ~NV_PORT_FAILED;
1872 	}
1873 
1874 	mutex_exit(&nvp->nvp_mutex);
1875 
1876 	return (ret);
1877 }
1878 
1879 
1880 /*
1881  * Sata entry point to handle port activation.  cfgadm -c connect
1882  */
1883 static int
1884 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1885 {
1886 	int cport = sd->satadev_addr.cport;
1887 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1888 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1889 
1890 	ASSERT(cport < NV_MAX_PORTS(nvc));
1891 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1892 
1893 	mutex_enter(&nvp->nvp_mutex);
1894 
1895 	sd->satadev_state = SATA_STATE_READY;
1896 
1897 	nv_copy_registers(nvp, sd, NULL);
1898 
1899 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1900 
1901 	nvp->nvp_state = 0;
1902 
1903 	mutex_exit(&nvp->nvp_mutex);
1904 
1905 	return (SATA_SUCCESS);
1906 }
1907 
1908 
1909 /*
1910  * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1911  */
1912 static int
1913 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1914 {
1915 	int cport = sd->satadev_addr.cport;
1916 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1917 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1918 
1919 	ASSERT(cport < NV_MAX_PORTS(nvc));
1920 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1921 
1922 	mutex_enter(&nvp->nvp_mutex);
1923 
1924 	(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1925 
1926 	/*
1927 	 * mark the device as inaccessible
1928 	 */
1929 	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1930 
1931 	/*
1932 	 * disable the interrupts on port
1933 	 */
1934 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1935 
1936 	nv_uninit_port(nvp);
1937 
1938 	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1939 	nv_copy_registers(nvp, sd, NULL);
1940 
1941 	mutex_exit(&nvp->nvp_mutex);
1942 
1943 	return (SATA_SUCCESS);
1944 }
1945 
1946 
1947 /*
1948  * find an empty slot in the driver's queue, increment counters,
1949  * and then invoke the appropriate PIO or DMA start routine.
1950  */
1951 static int
1952 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1953 {
1954 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1955 	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1956 	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1957 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1958 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1959 	nv_slot_t *nv_slotp;
1960 	boolean_t dma_cmd;
1961 
1962 	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1963 	    sata_cmdp->satacmd_cmd_reg));
1964 
1965 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1966 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1967 		nvp->nvp_ncq_run++;
1968 		/*
1969 		 * search for an empty NCQ slot.  by the time, it's already
1970 		 * been determined by the caller that there is room on the
1971 		 * queue.
1972 		 */
1973 		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1974 		    on_bit <<= 1) {
1975 			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1976 				break;
1977 			}
1978 		}
1979 
1980 		/*
1981 		 * the first empty slot found, should not exceed the queue
1982 		 * depth of the drive.  if it does it's an error.
1983 		 */
1984 		ASSERT(slot != nvp->nvp_queue_depth);
1985 
1986 		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1987 		    nvp->nvp_sactive);
1988 		ASSERT((sactive & on_bit) == 0);
1989 		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1990 		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1991 		    on_bit));
1992 		nvp->nvp_sactive_cache |= on_bit;
1993 
1994 		ncq = NVSLOT_NCQ;
1995 
1996 	} else {
1997 		nvp->nvp_non_ncq_run++;
1998 		slot = 0;
1999 	}
2000 
2001 	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
2002 
2003 	ASSERT(nv_slotp->nvslot_spkt == NULL);
2004 
2005 	nv_slotp->nvslot_spkt = spkt;
2006 	nv_slotp->nvslot_flags = ncq;
2007 
2008 	/*
2009 	 * the sata module doesn't indicate which commands utilize the
2010 	 * DMA engine, so find out using this switch table.
2011 	 */
2012 	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
2013 	case SATAC_READ_DMA_EXT:
2014 	case SATAC_WRITE_DMA_EXT:
2015 	case SATAC_WRITE_DMA:
2016 	case SATAC_READ_DMA:
2017 	case SATAC_READ_DMA_QUEUED:
2018 	case SATAC_READ_DMA_QUEUED_EXT:
2019 	case SATAC_WRITE_DMA_QUEUED:
2020 	case SATAC_WRITE_DMA_QUEUED_EXT:
2021 	case SATAC_READ_FPDMA_QUEUED:
2022 	case SATAC_WRITE_FPDMA_QUEUED:
2023 		dma_cmd = B_TRUE;
2024 		break;
2025 	default:
2026 		dma_cmd = B_FALSE;
2027 	}
2028 
2029 	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
2030 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
2031 		nv_slotp->nvslot_start = nv_start_dma;
2032 		nv_slotp->nvslot_intr = nv_intr_dma;
2033 	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
2034 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "packet command"));
2035 		nv_slotp->nvslot_start = nv_start_pkt_pio;
2036 		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
2037 		if ((direction == SATA_DIR_READ) ||
2038 		    (direction == SATA_DIR_WRITE)) {
2039 			nv_slotp->nvslot_byte_count =
2040 			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2041 			nv_slotp->nvslot_v_addr =
2042 			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2043 			/*
2044 			 * Freeing DMA resources allocated by the framework
2045 			 * now to avoid buffer overwrite (dma sync) problems
2046 			 * when the buffer is released at command completion.
2047 			 * Primarily an issue on systems with more than
2048 			 * 4GB of memory.
2049 			 */
2050 			sata_free_dma_resources(spkt);
2051 		}
2052 	} else if (direction == SATA_DIR_NODATA_XFER) {
2053 		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
2054 		nv_slotp->nvslot_start = nv_start_nodata;
2055 		nv_slotp->nvslot_intr = nv_intr_nodata;
2056 	} else if (direction == SATA_DIR_READ) {
2057 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
2058 		nv_slotp->nvslot_start = nv_start_pio_in;
2059 		nv_slotp->nvslot_intr = nv_intr_pio_in;
2060 		nv_slotp->nvslot_byte_count =
2061 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2062 		nv_slotp->nvslot_v_addr =
2063 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2064 		/*
2065 		 * Freeing DMA resources allocated by the framework now to
2066 		 * avoid buffer overwrite (dma sync) problems when the buffer
2067 		 * is released at command completion.  This is not an issue
2068 		 * for write because write does not update the buffer.
2069 		 * Primarily an issue on systems with more than 4GB of memory.
2070 		 */
2071 		sata_free_dma_resources(spkt);
2072 	} else if (direction == SATA_DIR_WRITE) {
2073 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
2074 		nv_slotp->nvslot_start = nv_start_pio_out;
2075 		nv_slotp->nvslot_intr = nv_intr_pio_out;
2076 		nv_slotp->nvslot_byte_count =
2077 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2078 		nv_slotp->nvslot_v_addr =
2079 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2080 	} else {
2081 		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2082 		    " %d cookies %d cmd %x",
2083 		    sata_cmdp->satacmd_flags.sata_data_direction,
2084 		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2085 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2086 		ret = SATA_TRAN_CMD_UNSUPPORTED;
2087 
2088 		goto fail;
2089 	}
2090 
2091 	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2092 	    SATA_TRAN_ACCEPTED) {
2093 #ifdef SGPIO_SUPPORT
2094 		nv_sgp_drive_active(nvp->nvp_ctlp,
2095 		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2096 #endif
2097 		nv_slotp->nvslot_stime = ddi_get_lbolt();
2098 
2099 		/*
2100 		 * start timer if it's not already running and this packet
2101 		 * is not requesting polled mode.
2102 		 */
2103 		if ((nvp->nvp_timeout_id == 0) &&
2104 		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2105 			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2106 			    drv_usectohz(NV_ONE_SEC));
2107 		}
2108 
2109 		return (SATA_TRAN_ACCEPTED);
2110 	}
2111 
2112 	fail:
2113 
2114 	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2115 
2116 	if (ncq == NVSLOT_NCQ) {
2117 		nvp->nvp_ncq_run--;
2118 		nvp->nvp_sactive_cache &= ~on_bit;
2119 	} else {
2120 		nvp->nvp_non_ncq_run--;
2121 	}
2122 	nv_slotp->nvslot_spkt = NULL;
2123 	nv_slotp->nvslot_flags = 0;
2124 
2125 	return (ret);
2126 }
2127 
2128 
2129 /*
2130  * Check if the signature is ready and if non-zero translate
2131  * it into a solaris sata defined type.
2132  */
2133 static void
2134 nv_read_signature(nv_port_t *nvp)
2135 {
2136 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2137 
2138 	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2139 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2140 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2141 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2142 
2143 	switch (nvp->nvp_signature) {
2144 
2145 	case NV_SIG_DISK:
2146 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
2147 		nvp->nvp_type = SATA_DTYPE_ATADISK;
2148 		break;
2149 	case NV_SIG_ATAPI:
2150 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2151 		    "drive is an optical device"));
2152 		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2153 		break;
2154 	case NV_SIG_PM:
2155 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2156 		    "device is a port multiplier"));
2157 		nvp->nvp_type = SATA_DTYPE_PMULT;
2158 		break;
2159 	case NV_SIG_NOTREADY:
2160 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2161 		    "signature not ready"));
2162 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2163 		break;
2164 	default:
2165 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2166 		    " recognized", nvp->nvp_signature);
2167 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2168 		break;
2169 	}
2170 
2171 	if (nvp->nvp_signature) {
2172 		nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
2173 	}
2174 }
2175 
2176 
2177 /*
2178  * Reset the port
2179  */
2180 static void
2181 nv_reset(nv_port_t *nvp)
2182 {
2183 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2184 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2185 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2186 	uint32_t sctrl;
2187 
2188 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()"));
2189 
2190 	ASSERT(mutex_owned(&nvp->nvp_mutex));
2191 
2192 	/*
2193 	 * clear signature registers
2194 	 */
2195 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
2196 	nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2197 	nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2198 	nv_put8(cmdhdl, nvp->nvp_count, 0);
2199 
2200 	nvp->nvp_signature = 0;
2201 	nvp->nvp_type = 0;
2202 	nvp->nvp_state |= NV_PORT_RESET;
2203 	nvp->nvp_reset_time = ddi_get_lbolt();
2204 	nvp->nvp_link_lost_time = 0;
2205 
2206 	/*
2207 	 * assert reset in PHY by writing a 1 to bit 0 scontrol
2208 	 */
2209 	sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2210 
2211 	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET);
2212 
2213 	/*
2214 	 * wait 1ms
2215 	 */
2216 	drv_usecwait(1000);
2217 
2218 	/*
2219 	 * de-assert reset in PHY
2220 	 */
2221 	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
2222 
2223 	/*
2224 	 * make sure timer is running
2225 	 */
2226 	if (nvp->nvp_timeout_id == 0) {
2227 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2228 		    drv_usectohz(NV_ONE_SEC));
2229 	}
2230 }
2231 
2232 
2233 /*
2234  * Initialize register handling specific to mcp51/mcp55
2235  */
2236 /* ARGSUSED */
2237 static void
2238 mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2239 {
2240 	nv_port_t *nvp;
2241 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2242 	uint8_t off, port;
2243 
2244 	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2245 	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2246 
2247 	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2248 		nvp = &(nvc->nvc_port[port]);
2249 		nvp->nvp_mcp5x_int_status =
2250 		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2251 		nvp->nvp_mcp5x_int_ctl =
2252 		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2253 
2254 		/*
2255 		 * clear any previous interrupts asserted
2256 		 */
2257 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2258 		    MCP5X_INT_CLEAR);
2259 
2260 		/*
2261 		 * These are the interrupts to accept for now.  The spec
2262 		 * says these are enable bits, but nvidia has indicated
2263 		 * these are masking bits.  Even though they may be masked
2264 		 * out to prevent asserting the main interrupt, they can
2265 		 * still be asserted while reading the interrupt status
2266 		 * register, so that needs to be considered in the interrupt
2267 		 * handler.
2268 		 */
2269 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2270 		    ~(MCP5X_INT_IGNORE));
2271 	}
2272 
2273 	/*
2274 	 * Allow the driver to program the BM on the first command instead
2275 	 * of waiting for an interrupt.
2276 	 */
2277 #ifdef NCQ
2278 	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2279 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2280 	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2281 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2282 #endif
2283 
2284 	/*
2285 	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2286 	 * Enable DMA to take advantage of that.
2287 	 *
2288 	 */
2289 	if (nvc->nvc_revid >= 0xa3) {
2290 		if (nv_sata_40bit_dma == B_TRUE) {
2291 			uint32_t reg32;
2292 			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2293 			    "rev id is %X and"
2294 			    " is capable of 40-bit DMA addressing",
2295 			    nvc->nvc_revid));
2296 			nvc->dma_40bit = B_TRUE;
2297 			reg32 = pci_config_get32(pci_conf_handle,
2298 			    NV_SATA_CFG_20);
2299 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2300 			    reg32 | NV_40BIT_PRD);
2301 		} else {
2302 			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2303 			    "40-bit DMA disabled by nv_sata_40bit_dma"));
2304 		}
2305 	} else {
2306 		nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "rev id is %X and is "
2307 		    "not capable of 40-bit DMA addressing", nvc->nvc_revid);
2308 	}
2309 }
2310 
2311 
2312 /*
2313  * Initialize register handling specific to ck804
2314  */
2315 static void
2316 ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2317 {
2318 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2319 	uint32_t reg32;
2320 	uint16_t reg16;
2321 	nv_port_t *nvp;
2322 	int j;
2323 
2324 	/*
2325 	 * delay hotplug interrupts until PHYRDY.
2326 	 */
2327 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2328 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2329 	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2330 
2331 	/*
2332 	 * enable hot plug interrupts for channel x and y
2333 	 */
2334 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2335 	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2336 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2337 	    NV_HIRQ_EN | reg16);
2338 
2339 
2340 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2341 	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2342 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2343 	    NV_HIRQ_EN | reg16);
2344 
2345 	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2346 
2347 	/*
2348 	 * clear any existing interrupt pending then enable
2349 	 */
2350 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2351 		nvp = &(nvc->nvc_port[j]);
2352 		mutex_enter(&nvp->nvp_mutex);
2353 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2354 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2355 		mutex_exit(&nvp->nvp_mutex);
2356 	}
2357 }
2358 
2359 
2360 /*
2361  * Initialize the controller and set up driver data structures.
2362  * determine if ck804 or mcp5x class.
2363  */
2364 static int
2365 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2366 {
2367 	struct sata_hba_tran stran;
2368 	nv_port_t *nvp;
2369 	int j, ck804;
2370 	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2371 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2372 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2373 	uint32_t reg32;
2374 	uint8_t reg8, reg8_save;
2375 
2376 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2377 
2378 	ck804 = B_TRUE;
2379 #ifdef SGPIO_SUPPORT
2380 	nvc->nvc_mcp5x_flag = B_FALSE;
2381 #endif
2382 
2383 	/*
2384 	 * Need to set bit 2 to 1 at config offset 0x50
2385 	 * to enable access to the bar5 registers.
2386 	 */
2387 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2388 	if (!(reg32 & NV_BAR5_SPACE_EN)) {
2389 		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2390 		    reg32 | NV_BAR5_SPACE_EN);
2391 	}
2392 
2393 	/*
2394 	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2395 	 * task file registers into bar5 while mcp5x won't.  The offset of
2396 	 * the task file registers in mcp5x's space is unused, so it will
2397 	 * return zero.  So check one of the task file registers to see if it is
2398 	 * writable and reads back what was written.  If it's mcp5x it will
2399 	 * return back 0xff whereas ck804 will return the value written.
2400 	 */
2401 	reg8_save = nv_get8(bar5_hdl,
2402 	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2403 
2404 
2405 	for (j = 1; j < 3; j++) {
2406 
2407 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2408 		reg8 = nv_get8(bar5_hdl,
2409 		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2410 
2411 		if (reg8 != j) {
2412 			ck804 = B_FALSE;
2413 			nvc->nvc_mcp5x_flag = B_TRUE;
2414 			break;
2415 		}
2416 	}
2417 
2418 	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2419 
2420 	if (ck804 == B_TRUE) {
2421 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2422 		nvc->nvc_interrupt = ck804_intr;
2423 		nvc->nvc_reg_init = ck804_reg_init;
2424 		nvc->nvc_set_intr = ck804_set_intr;
2425 	} else {
2426 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55"));
2427 		nvc->nvc_interrupt = mcp5x_intr;
2428 		nvc->nvc_reg_init = mcp5x_reg_init;
2429 		nvc->nvc_set_intr = mcp5x_set_intr;
2430 	}
2431 
2432 
2433 	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV_2;
2434 	stran.sata_tran_hba_dip = nvc->nvc_dip;
2435 	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2436 	stran.sata_tran_hba_features_support =
2437 	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2438 	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2439 	stran.sata_tran_probe_port = nv_sata_probe;
2440 	stran.sata_tran_start = nv_sata_start;
2441 	stran.sata_tran_abort = nv_sata_abort;
2442 	stran.sata_tran_reset_dport = nv_sata_reset;
2443 	stran.sata_tran_selftest = NULL;
2444 	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2445 	stran.sata_tran_pwrmgt_ops = NULL;
2446 	stran.sata_tran_ioctl = NULL;
2447 	nvc->nvc_sata_hba_tran = stran;
2448 
2449 	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2450 	    KM_SLEEP);
2451 
2452 	/*
2453 	 * initialize registers common to all chipsets
2454 	 */
2455 	nv_common_reg_init(nvc);
2456 
2457 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2458 		nvp = &(nvc->nvc_port[j]);
2459 
2460 		cmd_addr = nvp->nvp_cmd_addr;
2461 		ctl_addr = nvp->nvp_ctl_addr;
2462 		bm_addr = nvp->nvp_bm_addr;
2463 
2464 		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2465 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2466 
2467 		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2468 
2469 		nvp->nvp_data	= cmd_addr + NV_DATA;
2470 		nvp->nvp_error	= cmd_addr + NV_ERROR;
2471 		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2472 		nvp->nvp_count	= cmd_addr + NV_COUNT;
2473 		nvp->nvp_sect	= cmd_addr + NV_SECT;
2474 		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2475 		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2476 		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2477 		nvp->nvp_status	= cmd_addr + NV_STATUS;
2478 		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2479 		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2480 		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2481 
2482 		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2483 		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2484 		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2485 
2486 		nvp->nvp_state = 0;
2487 	}
2488 
2489 	/*
2490 	 * initialize register by calling chip specific reg initialization
2491 	 */
2492 	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2493 
2494 	/* initialize the hba dma attribute */
2495 	if (nvc->dma_40bit == B_TRUE)
2496 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2497 		    &buffer_dma_40bit_attr;
2498 	else
2499 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2500 		    &buffer_dma_attr;
2501 
2502 	return (NV_SUCCESS);
2503 }
2504 
2505 
2506 /*
2507  * Initialize data structures with enough slots to handle queuing, if
2508  * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2509  * NCQ support is built into the driver and enabled.  It might have been
2510  * better to derive the true size from the drive itself, but the sata
2511  * module only sends down that information on the first NCQ command,
2512  * which means possibly re-sizing the structures on an interrupt stack,
2513  * making error handling more messy.  The easy way is to just allocate
2514  * all 32 slots, which is what most drives support anyway.
2515  */
2516 static int
2517 nv_init_port(nv_port_t *nvp)
2518 {
2519 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2520 	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2521 	dev_info_t *dip = nvc->nvc_dip;
2522 	ddi_device_acc_attr_t dev_attr;
2523 	size_t buf_size;
2524 	ddi_dma_cookie_t cookie;
2525 	uint_t count;
2526 	int rc, i;
2527 
2528 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2529 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2530 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2531 
2532 	if (nvp->nvp_state & NV_PORT_INIT) {
2533 		NVLOG((NVDBG_INIT, nvc, nvp,
2534 		    "nv_init_port previously initialized"));
2535 
2536 		return (NV_SUCCESS);
2537 	} else {
2538 		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2539 	}
2540 
2541 	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2542 	    NV_QUEUE_SLOTS, KM_SLEEP);
2543 
2544 	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2545 	    NV_QUEUE_SLOTS, KM_SLEEP);
2546 
2547 	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2548 	    NV_QUEUE_SLOTS, KM_SLEEP);
2549 
2550 	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2551 	    NV_QUEUE_SLOTS, KM_SLEEP);
2552 
2553 	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2554 	    KM_SLEEP);
2555 
2556 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2557 
2558 		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2559 		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2560 
2561 		if (rc != DDI_SUCCESS) {
2562 			nv_uninit_port(nvp);
2563 
2564 			return (NV_FAILURE);
2565 		}
2566 
2567 		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2568 		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2569 		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2570 		    &(nvp->nvp_sg_acc_hdl[i]));
2571 
2572 		if (rc != DDI_SUCCESS) {
2573 			nv_uninit_port(nvp);
2574 
2575 			return (NV_FAILURE);
2576 		}
2577 
2578 		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2579 		    nvp->nvp_sg_addr[i], buf_size,
2580 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2581 		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2582 
2583 		if (rc != DDI_DMA_MAPPED) {
2584 			nv_uninit_port(nvp);
2585 
2586 			return (NV_FAILURE);
2587 		}
2588 
2589 		ASSERT(count == 1);
2590 		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2591 
2592 		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2593 
2594 		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2595 	}
2596 
2597 	/*
2598 	 * nvp_queue_depth represents the actual drive queue depth, not the
2599 	 * number of slots allocated in the structures (which may be more).
2600 	 * Actual queue depth is only learned after the first NCQ command, so
2601 	 * initialize it to 1 for now.
2602 	 */
2603 	nvp->nvp_queue_depth = 1;
2604 
2605 	nvp->nvp_state |= NV_PORT_INIT;
2606 
2607 	return (NV_SUCCESS);
2608 }
2609 
2610 
2611 /*
2612  * Free dynamically allocated structures for port.
2613  */
2614 static void
2615 nv_uninit_port(nv_port_t *nvp)
2616 {
2617 	int i;
2618 
2619 	/*
2620 	 * It is possible to reach here before a port has been initialized or
2621 	 * after it has already been uninitialized.  Just return in that case.
2622 	 */
2623 	if (nvp->nvp_slot == NULL) {
2624 
2625 		return;
2626 	}
2627 
2628 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2629 	    "nv_uninit_port uninitializing"));
2630 
2631 	nvp->nvp_type = SATA_DTYPE_NONE;
2632 
2633 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2634 		if (nvp->nvp_sg_paddr[i]) {
2635 			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2636 		}
2637 
2638 		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2639 			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2640 		}
2641 
2642 		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2643 			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2644 		}
2645 	}
2646 
2647 	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2648 	nvp->nvp_slot = NULL;
2649 
2650 	kmem_free(nvp->nvp_sg_dma_hdl,
2651 	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2652 	nvp->nvp_sg_dma_hdl = NULL;
2653 
2654 	kmem_free(nvp->nvp_sg_acc_hdl,
2655 	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2656 	nvp->nvp_sg_acc_hdl = NULL;
2657 
2658 	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2659 	nvp->nvp_sg_addr = NULL;
2660 
2661 	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2662 	nvp->nvp_sg_paddr = NULL;
2663 
2664 	nvp->nvp_state &= ~NV_PORT_INIT;
2665 	nvp->nvp_signature = 0;
2666 }
2667 
2668 
2669 /*
2670  * Cache register offsets and access handles to frequently accessed registers
2671  * which are common to either chipset.
2672  */
2673 static void
2674 nv_common_reg_init(nv_ctl_t *nvc)
2675 {
2676 	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2677 	uchar_t *bm_addr_offset, *sreg_offset;
2678 	uint8_t bar, port;
2679 	nv_port_t *nvp;
2680 
2681 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2682 		if (port == 0) {
2683 			bar = NV_BAR_0;
2684 			bm_addr_offset = 0;
2685 			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2686 		} else {
2687 			bar = NV_BAR_2;
2688 			bm_addr_offset = (uchar_t *)8;
2689 			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2690 		}
2691 
2692 		nvp = &(nvc->nvc_port[port]);
2693 		nvp->nvp_ctlp = nvc;
2694 		nvp->nvp_port_num = port;
2695 		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2696 
2697 		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2698 		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2699 		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2700 		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2701 		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2702 		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2703 		    (long)bm_addr_offset;
2704 
2705 		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2706 		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2707 		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2708 		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2709 	}
2710 }
2711 
2712 
2713 static void
2714 nv_uninit_ctl(nv_ctl_t *nvc)
2715 {
2716 	int port;
2717 	nv_port_t *nvp;
2718 
2719 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2720 
2721 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2722 		nvp = &(nvc->nvc_port[port]);
2723 		mutex_enter(&nvp->nvp_mutex);
2724 		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2725 		nv_uninit_port(nvp);
2726 		mutex_exit(&nvp->nvp_mutex);
2727 		mutex_destroy(&nvp->nvp_mutex);
2728 		cv_destroy(&nvp->nvp_poll_cv);
2729 	}
2730 
2731 	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2732 	nvc->nvc_port = NULL;
2733 }
2734 
2735 
2736 /*
2737  * ck804 interrupt.  This is a wrapper around ck804_intr_process so
2738  * that interrupts from other devices can be disregarded while dtracing.
2739  */
2740 /* ARGSUSED */
2741 static uint_t
2742 ck804_intr(caddr_t arg1, caddr_t arg2)
2743 {
2744 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2745 	uint8_t intr_status;
2746 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2747 
2748 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
2749 		return (DDI_INTR_UNCLAIMED);
2750 
2751 	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
2752 
2753 	if (intr_status == 0) {
2754 
2755 		return (DDI_INTR_UNCLAIMED);
2756 	}
2757 
2758 	ck804_intr_process(nvc, intr_status);
2759 
2760 	return (DDI_INTR_CLAIMED);
2761 }
2762 
2763 
2764 /*
2765  * Main interrupt handler for ck804.  handles normal device
2766  * interrupts as well as port hot plug and remove interrupts.
2767  *
2768  */
2769 static void
2770 ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2771 {
2772 
2773 	int port, i;
2774 	nv_port_t *nvp;
2775 	nv_slot_t *nv_slotp;
2776 	uchar_t	status;
2777 	sata_pkt_t *spkt;
2778 	uint8_t bmstatus, clear_bits;
2779 	ddi_acc_handle_t bmhdl;
2780 	int nvcleared = 0;
2781 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2782 	uint32_t sstatus;
2783 	int port_mask_hot[] = {
2784 		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
2785 	};
2786 	int port_mask_pm[] = {
2787 		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
2788 	};
2789 
2790 	NVLOG((NVDBG_INTR, nvc, NULL,
2791 	    "ck804_intr_process entered intr_status=%x", intr_status));
2792 
2793 	/*
2794 	 * For command completion interrupt, explicit clear is not required.
2795 	 * however, for the error cases explicit clear is performed.
2796 	 */
2797 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2798 
2799 		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
2800 
2801 		if ((port_mask[port] & intr_status) == 0) {
2802 			continue;
2803 		}
2804 
2805 		NVLOG((NVDBG_INTR, nvc, NULL,
2806 		    "ck804_intr_process interrupt on port %d", port));
2807 
2808 		nvp = &(nvc->nvc_port[port]);
2809 
2810 		mutex_enter(&nvp->nvp_mutex);
2811 
2812 		/*
2813 		 * there was a corner case found where an interrupt
2814 		 * arrived before nvp_slot was set.  Should
2815 		 * probably should track down why that happens and try
2816 		 * to eliminate that source and then get rid of this
2817 		 * check.
2818 		 */
2819 		if (nvp->nvp_slot == NULL) {
2820 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2821 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2822 			    "received before initialization "
2823 			    "completed status=%x", status));
2824 			mutex_exit(&nvp->nvp_mutex);
2825 
2826 			/*
2827 			 * clear interrupt bits
2828 			 */
2829 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2830 			    port_mask[port]);
2831 
2832 			continue;
2833 		}
2834 
2835 		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
2836 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2837 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2838 			    " no command in progress status=%x", status));
2839 			mutex_exit(&nvp->nvp_mutex);
2840 
2841 			/*
2842 			 * clear interrupt bits
2843 			 */
2844 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2845 			    port_mask[port]);
2846 
2847 			continue;
2848 		}
2849 
2850 		bmhdl = nvp->nvp_bm_hdl;
2851 		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2852 
2853 		if (!(bmstatus & BMISX_IDEINTS)) {
2854 			mutex_exit(&nvp->nvp_mutex);
2855 
2856 			continue;
2857 		}
2858 
2859 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2860 
2861 		if (status & SATA_STATUS_BSY) {
2862 			mutex_exit(&nvp->nvp_mutex);
2863 
2864 			continue;
2865 		}
2866 
2867 		nv_slotp = &(nvp->nvp_slot[0]);
2868 
2869 		ASSERT(nv_slotp);
2870 
2871 		spkt = nv_slotp->nvslot_spkt;
2872 
2873 		if (spkt == NULL) {
2874 			mutex_exit(&nvp->nvp_mutex);
2875 
2876 			continue;
2877 		}
2878 
2879 		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2880 
2881 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2882 
2883 		/*
2884 		 * If there is no link cannot be certain about the completion
2885 		 * of the packet, so abort it.
2886 		 */
2887 		if (nv_check_link((&spkt->satapkt_device)->
2888 		    satadev_scr.sstatus) == B_FALSE) {
2889 
2890 			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2891 
2892 		} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2893 
2894 			nv_complete_io(nvp, spkt, 0);
2895 		}
2896 
2897 		mutex_exit(&nvp->nvp_mutex);
2898 	}
2899 
2900 	/*
2901 	 * ck804 often doesn't correctly distinguish hot add/remove
2902 	 * interrupts.  Frequently both the ADD and the REMOVE bits
2903 	 * are asserted, whether it was a remove or add.  Use sstatus
2904 	 * to distinguish hot add from hot remove.
2905 	 */
2906 
2907 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2908 		clear_bits = 0;
2909 
2910 		nvp = &(nvc->nvc_port[port]);
2911 		mutex_enter(&nvp->nvp_mutex);
2912 
2913 		if ((port_mask_pm[port] & intr_status) != 0) {
2914 			clear_bits = port_mask_pm[port];
2915 			NVLOG((NVDBG_HOT, nvc, nvp,
2916 			    "clearing PM interrupt bit: %x",
2917 			    intr_status & port_mask_pm[port]));
2918 		}
2919 
2920 		if ((port_mask_hot[port] & intr_status) == 0) {
2921 			if (clear_bits != 0) {
2922 				goto clear;
2923 			} else {
2924 				mutex_exit(&nvp->nvp_mutex);
2925 				continue;
2926 			}
2927 		}
2928 
2929 		/*
2930 		 * reaching here means there was a hot add or remove.
2931 		 */
2932 		clear_bits |= port_mask_hot[port];
2933 
2934 		ASSERT(nvc->nvc_port[port].nvp_sstatus);
2935 
2936 		sstatus = nv_get32(bar5_hdl,
2937 		    nvc->nvc_port[port].nvp_sstatus);
2938 
2939 		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
2940 		    SSTATUS_DET_DEVPRE_PHYCOM) {
2941 			nv_report_add_remove(nvp, 0);
2942 		} else {
2943 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2944 		}
2945 	clear:
2946 		/*
2947 		 * clear interrupt bits.  explicit interrupt clear is
2948 		 * required for hotplug interrupts.
2949 		 */
2950 		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
2951 
2952 		/*
2953 		 * make sure it's flushed and cleared.  If not try
2954 		 * again.  Sometimes it has been observed to not clear
2955 		 * on the first try.
2956 		 */
2957 		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
2958 
2959 		/*
2960 		 * make 10 additional attempts to clear the interrupt
2961 		 */
2962 		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
2963 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
2964 			    "still not clear try=%d", intr_status,
2965 			    ++nvcleared));
2966 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2967 			    clear_bits);
2968 			intr_status = nv_get8(bar5_hdl,
2969 			    nvc->nvc_ck804_int_status);
2970 		}
2971 
2972 		/*
2973 		 * if still not clear, log a message and disable the
2974 		 * port. highly unlikely that this path is taken, but it
2975 		 * gives protection against a wedged interrupt.
2976 		 */
2977 		if (intr_status & clear_bits) {
2978 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2979 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2980 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2981 			nvp->nvp_state |= NV_PORT_FAILED;
2982 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2983 			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
2984 			    "interrupt.  disabling port intr_status=%X",
2985 			    intr_status);
2986 		}
2987 
2988 		mutex_exit(&nvp->nvp_mutex);
2989 	}
2990 }
2991 
2992 
2993 /*
2994  * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
2995  * on the controller, to handle completion and hot plug and remove events.
2996  *
2997  */
2998 static uint_t
2999 mcp5x_intr_port(nv_port_t *nvp)
3000 {
3001 	nv_ctl_t *nvc = nvp->nvp_ctlp;
3002 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3003 	uint8_t clear = 0, intr_cycles = 0;
3004 	int ret = DDI_INTR_UNCLAIMED;
3005 	uint16_t int_status;
3006 
3007 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_intr_port entered"));
3008 
3009 	for (;;) {
3010 		/*
3011 		 * read current interrupt status
3012 		 */
3013 		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3014 
3015 		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
3016 
3017 		/*
3018 		 * MCP5X_INT_IGNORE interrupts will show up in the status,
3019 		 * but are masked out from causing an interrupt to be generated
3020 		 * to the processor.  Ignore them here by masking them out.
3021 		 */
3022 		int_status &= ~(MCP5X_INT_IGNORE);
3023 
3024 		/*
3025 		 * exit the loop when no more interrupts to process
3026 		 */
3027 		if (int_status == 0) {
3028 
3029 			break;
3030 		}
3031 
3032 		if (int_status & MCP5X_INT_COMPLETE) {
3033 			NVLOG((NVDBG_INTR, nvc, nvp,
3034 			    "mcp5x_packet_complete_intr"));
3035 			/*
3036 			 * since int_status was set, return DDI_INTR_CLAIMED
3037 			 * from the DDI's perspective even though the packet
3038 			 * completion may not have succeeded.  If it fails,
3039 			 * need to manually clear the interrupt, otherwise
3040 			 * clearing is implicit.
3041 			 */
3042 			ret = DDI_INTR_CLAIMED;
3043 			if (mcp5x_packet_complete_intr(nvc, nvp) ==
3044 			    NV_FAILURE) {
3045 				clear = MCP5X_INT_COMPLETE;
3046 			} else {
3047 				intr_cycles = 0;
3048 			}
3049 		}
3050 
3051 		if (int_status & MCP5X_INT_DMA_SETUP) {
3052 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr"));
3053 
3054 			/*
3055 			 * Needs to be cleared before starting the BM, so do it
3056 			 * now.  make sure this is still working.
3057 			 */
3058 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3059 			    MCP5X_INT_DMA_SETUP);
3060 #ifdef NCQ
3061 			ret = mcp5x_dma_setup_intr(nvc, nvp);
3062 #endif
3063 		}
3064 
3065 		if (int_status & MCP5X_INT_REM) {
3066 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x device removed"));
3067 			clear = MCP5X_INT_REM;
3068 			ret = DDI_INTR_CLAIMED;
3069 
3070 			mutex_enter(&nvp->nvp_mutex);
3071 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3072 			mutex_exit(&nvp->nvp_mutex);
3073 
3074 		} else if (int_status & MCP5X_INT_ADD) {
3075 			NVLOG((NVDBG_HOT, nvc, nvp, "mcp5x device added"));
3076 			clear = MCP5X_INT_ADD;
3077 			ret = DDI_INTR_CLAIMED;
3078 
3079 			mutex_enter(&nvp->nvp_mutex);
3080 			nv_report_add_remove(nvp, 0);
3081 			mutex_exit(&nvp->nvp_mutex);
3082 		}
3083 
3084 		if (clear) {
3085 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3086 			clear = 0;
3087 		}
3088 
3089 		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3090 			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3091 			    "processing.  Disabling port int_status=%X"
3092 			    " clear=%X", int_status, clear);
3093 			mutex_enter(&nvp->nvp_mutex);
3094 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3095 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3096 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3097 			nvp->nvp_state |= NV_PORT_FAILED;
3098 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
3099 			mutex_exit(&nvp->nvp_mutex);
3100 		}
3101 	}
3102 
3103 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_intr_port: finished ret=%d", ret));
3104 
3105 	return (ret);
3106 }
3107 
3108 
3109 /* ARGSUSED */
3110 static uint_t
3111 mcp5x_intr(caddr_t arg1, caddr_t arg2)
3112 {
3113 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3114 	int ret;
3115 
3116 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3117 		return (DDI_INTR_UNCLAIMED);
3118 
3119 	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3120 	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3121 
3122 	return (ret);
3123 }
3124 
3125 
3126 #ifdef NCQ
3127 /*
3128  * with software driven NCQ on mcp5x, an interrupt occurs right
3129  * before the drive is ready to do a DMA transfer.  At this point,
3130  * the PRD table needs to be programmed and the DMA engine enabled
3131  * and ready to go.
3132  *
3133  * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3134  * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3135  * -- clear bit 0 of master command reg
3136  * -- program PRD
3137  * -- clear the interrupt status bit for the DMA Setup FIS
3138  * -- set bit 0 of the bus master command register
3139  */
3140 static int
3141 mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3142 {
3143 	int slot;
3144 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3145 	uint8_t bmicx;
3146 	int port = nvp->nvp_port_num;
3147 	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3148 	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3149 
3150 	nv_cmn_err(CE_PANIC, nvc, nvp,
3151 	    "this is should not be executed at all until NCQ");
3152 
3153 	mutex_enter(&nvp->nvp_mutex);
3154 
3155 	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3156 
3157 	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3158 
3159 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3160 	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
3161 
3162 	/*
3163 	 * halt the DMA engine.  This step is necessary according to
3164 	 * the mcp5x spec, probably since there may have been a "first" packet
3165 	 * that already programmed the DMA engine, but may not turn out to
3166 	 * be the first one processed.
3167 	 */
3168 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3169 
3170 #if 0
3171 	if (bmicx & BMICX_SSBM) {
3172 		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3173 		    "another packet.  Cancelling and reprogramming"));
3174 		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3175 	}
3176 #endif
3177 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3178 
3179 	nv_start_dma_engine(nvp, slot);
3180 
3181 	mutex_exit(&nvp->nvp_mutex);
3182 
3183 	return (DDI_INTR_CLAIMED);
3184 }
3185 #endif /* NCQ */
3186 
3187 
3188 /*
3189  * packet completion interrupt.  If the packet is complete, invoke
3190  * the packet completion callback.
3191  */
3192 static int
3193 mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3194 {
3195 	uint8_t status, bmstatus;
3196 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3197 	int sactive;
3198 	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3199 	sata_pkt_t *spkt;
3200 	nv_slot_t *nv_slotp;
3201 
3202 	mutex_enter(&nvp->nvp_mutex);
3203 
3204 	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3205 
3206 	if (!(bmstatus & BMISX_IDEINTS)) {
3207 		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
3208 		mutex_exit(&nvp->nvp_mutex);
3209 
3210 		return (NV_FAILURE);
3211 	}
3212 
3213 	/*
3214 	 * If the just completed item is a non-ncq command, the busy
3215 	 * bit should not be set
3216 	 */
3217 	if (nvp->nvp_non_ncq_run) {
3218 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3219 		if (status & SATA_STATUS_BSY) {
3220 			nv_cmn_err(CE_WARN, nvc, nvp,
3221 			    "unexpected SATA_STATUS_BSY set");
3222 			mutex_exit(&nvp->nvp_mutex);
3223 			/*
3224 			 * calling function will clear interrupt.  then
3225 			 * the real interrupt will either arrive or the
3226 			 * packet timeout handling will take over and
3227 			 * reset.
3228 			 */
3229 			return (NV_FAILURE);
3230 		}
3231 
3232 	} else {
3233 		/*
3234 		 * NCQ check for BSY here and wait if still bsy before
3235 		 * continuing. Rather than wait for it to be cleared
3236 		 * when starting a packet and wasting CPU time, the starting
3237 		 * thread can exit immediate, but might have to spin here
3238 		 * for a bit possibly.  Needs more work and experimentation.
3239 		 */
3240 		ASSERT(nvp->nvp_ncq_run);
3241 	}
3242 
3243 
3244 	if (nvp->nvp_ncq_run) {
3245 		ncq_command = B_TRUE;
3246 		ASSERT(nvp->nvp_non_ncq_run == 0);
3247 	} else {
3248 		ASSERT(nvp->nvp_non_ncq_run != 0);
3249 	}
3250 
3251 	/*
3252 	 * active_pkt_bit will represent the bitmap of the single completed
3253 	 * packet.  Because of the nature of sw assisted NCQ, only one
3254 	 * command will complete per interrupt.
3255 	 */
3256 
3257 	if (ncq_command == B_FALSE) {
3258 		active_pkt = 0;
3259 	} else {
3260 		/*
3261 		 * NCQ: determine which command just completed, by examining
3262 		 * which bit cleared in the register since last written.
3263 		 */
3264 		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3265 
3266 		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3267 
3268 		ASSERT(active_pkt_bit);
3269 
3270 
3271 		/*
3272 		 * this failure path needs more work to handle the
3273 		 * error condition and recovery.
3274 		 */
3275 		if (active_pkt_bit == 0) {
3276 			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3277 
3278 			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3279 			    "nvp->nvp_sactive %X", sactive,
3280 			    nvp->nvp_sactive_cache);
3281 
3282 			(void) nv_get8(cmdhdl, nvp->nvp_status);
3283 
3284 			mutex_exit(&nvp->nvp_mutex);
3285 
3286 			return (NV_FAILURE);
3287 		}
3288 
3289 		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3290 		    active_pkt++, active_pkt_bit >>= 1) {
3291 		}
3292 
3293 		/*
3294 		 * make sure only one bit is ever turned on
3295 		 */
3296 		ASSERT(active_pkt_bit == 1);
3297 
3298 		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3299 	}
3300 
3301 	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3302 
3303 	spkt = nv_slotp->nvslot_spkt;
3304 
3305 	ASSERT(spkt != NULL);
3306 
3307 	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3308 
3309 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3310 
3311 	/*
3312 	 * If there is no link cannot be certain about the completion
3313 	 * of the packet, so abort it.
3314 	 */
3315 	if (nv_check_link((&spkt->satapkt_device)->
3316 	    satadev_scr.sstatus) == B_FALSE) {
3317 		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
3318 
3319 	} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3320 
3321 		nv_complete_io(nvp, spkt, active_pkt);
3322 	}
3323 
3324 	mutex_exit(&nvp->nvp_mutex);
3325 
3326 	return (NV_SUCCESS);
3327 }
3328 
3329 
3330 static void
3331 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3332 {
3333 
3334 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3335 
3336 	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3337 		nvp->nvp_ncq_run--;
3338 	} else {
3339 		nvp->nvp_non_ncq_run--;
3340 	}
3341 
3342 	/*
3343 	 * mark the packet slot idle so it can be reused.  Do this before
3344 	 * calling satapkt_comp so the slot can be reused.
3345 	 */
3346 	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3347 
3348 	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3349 		/*
3350 		 * If this is not timed polled mode cmd, which has an
3351 		 * active thread monitoring for completion, then need
3352 		 * to signal the sleeping thread that the cmd is complete.
3353 		 */
3354 		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3355 			cv_signal(&nvp->nvp_poll_cv);
3356 		}
3357 
3358 		return;
3359 	}
3360 
3361 	if (spkt->satapkt_comp != NULL) {
3362 		mutex_exit(&nvp->nvp_mutex);
3363 		(*spkt->satapkt_comp)(spkt);
3364 		mutex_enter(&nvp->nvp_mutex);
3365 	}
3366 }
3367 
3368 
3369 /*
3370  * check whether packet is ncq command or not.  for ncq command,
3371  * start it if there is still room on queue.  for non-ncq command only
3372  * start if no other command is running.
3373  */
3374 static int
3375 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3376 {
3377 	uint8_t cmd, ncq;
3378 
3379 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3380 
3381 	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3382 
3383 	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3384 	    (cmd == SATAC_READ_FPDMA_QUEUED));
3385 
3386 	if (ncq == B_FALSE) {
3387 
3388 		if ((nvp->nvp_non_ncq_run == 1) ||
3389 		    (nvp->nvp_ncq_run > 0)) {
3390 			/*
3391 			 * next command is non-ncq which can't run
3392 			 * concurrently.  exit and return queue full.
3393 			 */
3394 			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3395 
3396 			return (SATA_TRAN_QUEUE_FULL);
3397 		}
3398 
3399 		return (nv_start_common(nvp, spkt));
3400 	}
3401 
3402 	/*
3403 	 * ncq == B_TRUE
3404 	 */
3405 	if (nvp->nvp_non_ncq_run == 1) {
3406 		/*
3407 		 * cannot start any NCQ commands when there
3408 		 * is a non-NCQ command running.
3409 		 */
3410 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3411 
3412 		return (SATA_TRAN_QUEUE_FULL);
3413 	}
3414 
3415 #ifdef NCQ
3416 	/*
3417 	 * this is not compiled for now as satapkt_device.satadev_qdepth
3418 	 * is being pulled out until NCQ support is later addressed
3419 	 *
3420 	 * nvp_queue_depth is initialized by the first NCQ command
3421 	 * received.
3422 	 */
3423 	if (nvp->nvp_queue_depth == 1) {
3424 		nvp->nvp_queue_depth =
3425 		    spkt->satapkt_device.satadev_qdepth;
3426 
3427 		ASSERT(nvp->nvp_queue_depth > 1);
3428 
3429 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3430 		    "nv_process_queue: nvp_queue_depth set to %d",
3431 		    nvp->nvp_queue_depth));
3432 	}
3433 #endif
3434 
3435 	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3436 		/*
3437 		 * max number of NCQ commands already active
3438 		 */
3439 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3440 
3441 		return (SATA_TRAN_QUEUE_FULL);
3442 	}
3443 
3444 	return (nv_start_common(nvp, spkt));
3445 }
3446 
3447 
3448 /*
3449  * configure INTx and legacy interrupts
3450  */
3451 static int
3452 nv_add_legacy_intrs(nv_ctl_t *nvc)
3453 {
3454 	dev_info_t	*devinfo = nvc->nvc_dip;
3455 	int		actual, count = 0;
3456 	int		x, y, rc, inum = 0;
3457 
3458 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3459 
3460 	/*
3461 	 * get number of interrupts
3462 	 */
3463 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3464 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3465 		NVLOG((NVDBG_INTR, nvc, NULL,
3466 		    "ddi_intr_get_nintrs() failed, "
3467 		    "rc %d count %d", rc, count));
3468 
3469 		return (DDI_FAILURE);
3470 	}
3471 
3472 	/*
3473 	 * allocate an array of interrupt handles
3474 	 */
3475 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3476 	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3477 
3478 	/*
3479 	 * call ddi_intr_alloc()
3480 	 */
3481 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3482 	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3483 
3484 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3485 		nv_cmn_err(CE_WARN, nvc, NULL,
3486 		    "ddi_intr_alloc() failed, rc %d", rc);
3487 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3488 
3489 		return (DDI_FAILURE);
3490 	}
3491 
3492 	if (actual < count) {
3493 		nv_cmn_err(CE_WARN, nvc, NULL,
3494 		    "ddi_intr_alloc: requested: %d, received: %d",
3495 		    count, actual);
3496 
3497 		goto failure;
3498 	}
3499 
3500 	nvc->nvc_intr_cnt = actual;
3501 
3502 	/*
3503 	 * get intr priority
3504 	 */
3505 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3506 	    DDI_SUCCESS) {
3507 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3508 
3509 		goto failure;
3510 	}
3511 
3512 	/*
3513 	 * Test for high level mutex
3514 	 */
3515 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3516 		nv_cmn_err(CE_WARN, nvc, NULL,
3517 		    "nv_add_legacy_intrs: high level intr not supported");
3518 
3519 		goto failure;
3520 	}
3521 
3522 	for (x = 0; x < actual; x++) {
3523 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3524 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3525 			nv_cmn_err(CE_WARN, nvc, NULL,
3526 			    "ddi_intr_add_handler() failed");
3527 
3528 			goto failure;
3529 		}
3530 	}
3531 
3532 	/*
3533 	 * call ddi_intr_enable() for legacy interrupts
3534 	 */
3535 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3536 		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3537 	}
3538 
3539 	return (DDI_SUCCESS);
3540 
3541 	failure:
3542 	/*
3543 	 * free allocated intr and nvc_htable
3544 	 */
3545 	for (y = 0; y < actual; y++) {
3546 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3547 	}
3548 
3549 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3550 
3551 	return (DDI_FAILURE);
3552 }
3553 
3554 #ifdef	NV_MSI_SUPPORTED
3555 /*
3556  * configure MSI interrupts
3557  */
3558 static int
3559 nv_add_msi_intrs(nv_ctl_t *nvc)
3560 {
3561 	dev_info_t	*devinfo = nvc->nvc_dip;
3562 	int		count, avail, actual;
3563 	int		x, y, rc, inum = 0;
3564 
3565 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3566 
3567 	/*
3568 	 * get number of interrupts
3569 	 */
3570 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3571 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3572 		nv_cmn_err(CE_WARN, nvc, NULL,
3573 		    "ddi_intr_get_nintrs() failed, "
3574 		    "rc %d count %d", rc, count);
3575 
3576 		return (DDI_FAILURE);
3577 	}
3578 
3579 	/*
3580 	 * get number of available interrupts
3581 	 */
3582 	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3583 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3584 		nv_cmn_err(CE_WARN, nvc, NULL,
3585 		    "ddi_intr_get_navail() failed, "
3586 		    "rc %d avail %d", rc, avail);
3587 
3588 		return (DDI_FAILURE);
3589 	}
3590 
3591 	if (avail < count) {
3592 		nv_cmn_err(CE_WARN, nvc, NULL,
3593 		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3594 		    avail, count);
3595 	}
3596 
3597 	/*
3598 	 * allocate an array of interrupt handles
3599 	 */
3600 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3601 	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3602 
3603 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3604 	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3605 
3606 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3607 		nv_cmn_err(CE_WARN, nvc, NULL,
3608 		    "ddi_intr_alloc() failed, rc %d", rc);
3609 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3610 
3611 		return (DDI_FAILURE);
3612 	}
3613 
3614 	/*
3615 	 * Use interrupt count returned or abort?
3616 	 */
3617 	if (actual < count) {
3618 		NVLOG((NVDBG_INIT, nvc, NULL,
3619 		    "Requested: %d, Received: %d", count, actual));
3620 	}
3621 
3622 	nvc->nvc_intr_cnt = actual;
3623 
3624 	/*
3625 	 * get priority for first msi, assume remaining are all the same
3626 	 */
3627 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3628 	    DDI_SUCCESS) {
3629 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3630 
3631 		goto failure;
3632 	}
3633 
3634 	/*
3635 	 * test for high level mutex
3636 	 */
3637 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3638 		nv_cmn_err(CE_WARN, nvc, NULL,
3639 		    "nv_add_msi_intrs: high level intr not supported");
3640 
3641 		goto failure;
3642 	}
3643 
3644 	/*
3645 	 * Call ddi_intr_add_handler()
3646 	 */
3647 	for (x = 0; x < actual; x++) {
3648 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3649 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3650 			nv_cmn_err(CE_WARN, nvc, NULL,
3651 			    "ddi_intr_add_handler() failed");
3652 
3653 			goto failure;
3654 		}
3655 	}
3656 
3657 	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3658 
3659 	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3660 		(void) ddi_intr_block_enable(nvc->nvc_htable,
3661 		    nvc->nvc_intr_cnt);
3662 	} else {
3663 		/*
3664 		 * Call ddi_intr_enable() for MSI non block enable
3665 		 */
3666 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3667 			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3668 		}
3669 	}
3670 
3671 	return (DDI_SUCCESS);
3672 
3673 	failure:
3674 	/*
3675 	 * free allocated intr and nvc_htable
3676 	 */
3677 	for (y = 0; y < actual; y++) {
3678 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3679 	}
3680 
3681 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3682 
3683 	return (DDI_FAILURE);
3684 }
3685 #endif
3686 
3687 
3688 static void
3689 nv_rem_intrs(nv_ctl_t *nvc)
3690 {
3691 	int x, i;
3692 	nv_port_t *nvp;
3693 
3694 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3695 
3696 	/*
3697 	 * prevent controller from generating interrupts by
3698 	 * masking them out.  This is an extra precaution.
3699 	 */
3700 	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3701 		nvp = (&nvc->nvc_port[i]);
3702 		mutex_enter(&nvp->nvp_mutex);
3703 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3704 		mutex_exit(&nvp->nvp_mutex);
3705 	}
3706 
3707 	/*
3708 	 * disable all interrupts
3709 	 */
3710 	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3711 	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3712 		(void) ddi_intr_block_disable(nvc->nvc_htable,
3713 		    nvc->nvc_intr_cnt);
3714 	} else {
3715 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3716 			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3717 		}
3718 	}
3719 
3720 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3721 		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3722 		(void) ddi_intr_free(nvc->nvc_htable[x]);
3723 	}
3724 
3725 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3726 }
3727 
3728 
3729 /*
3730  * variable argument wrapper for cmn_err.  prefixes the instance and port
3731  * number if possible
3732  */
3733 static void
3734 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3735 {
3736 	char port[NV_STRING_10];
3737 	char inst[NV_STRING_10];
3738 
3739 	mutex_enter(&nv_log_mutex);
3740 
3741 	if (nvc) {
3742 		(void) snprintf(inst, NV_STRING_10, "inst %d",
3743 		    ddi_get_instance(nvc->nvc_dip));
3744 	} else {
3745 		inst[0] = '\0';
3746 	}
3747 
3748 	if (nvp) {
3749 		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3750 	} else {
3751 		port[0] = '\0';
3752 	}
3753 
3754 	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3755 	    (inst[0]|port[0] ? ": " :""));
3756 
3757 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3758 	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3759 
3760 	/*
3761 	 * normally set to log to console but in some debug situations it
3762 	 * may be useful to log only to a file.
3763 	 */
3764 	if (nv_log_to_console) {
3765 		if (nv_prom_print) {
3766 			prom_printf("%s\n", nv_log_buf);
3767 		} else {
3768 			cmn_err(ce, "%s", nv_log_buf);
3769 		}
3770 
3771 
3772 	} else {
3773 		cmn_err(ce, "!%s", nv_log_buf);
3774 	}
3775 
3776 	mutex_exit(&nv_log_mutex);
3777 }
3778 
3779 
3780 /*
3781  * wrapper for cmn_err
3782  */
3783 static void
3784 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3785 {
3786 	va_list ap;
3787 
3788 	va_start(ap, fmt);
3789 	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
3790 	va_end(ap);
3791 }
3792 
3793 
3794 #if defined(DEBUG)
3795 /*
3796  * prefixes the instance and port number if possible to the debug message
3797  */
3798 static void
3799 nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3800 {
3801 	va_list ap;
3802 
3803 	if ((nv_debug_flags & flag) == 0) {
3804 		return;
3805 	}
3806 
3807 	va_start(ap, fmt);
3808 	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
3809 	va_end(ap);
3810 
3811 	/*
3812 	 * useful for some debugging situations
3813 	 */
3814 	if (nv_log_delay) {
3815 		drv_usecwait(nv_log_delay);
3816 	}
3817 
3818 }
3819 #endif /* DEBUG */
3820 
3821 
3822 /*
3823  * program registers which are common to all commands
3824  */
3825 static void
3826 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
3827 {
3828 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3829 	sata_pkt_t *spkt;
3830 	sata_cmd_t *satacmd;
3831 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3832 	uint8_t cmd, ncq = B_FALSE;
3833 
3834 	spkt = nv_slotp->nvslot_spkt;
3835 	satacmd = &spkt->satapkt_cmd;
3836 	cmd = satacmd->satacmd_cmd_reg;
3837 
3838 	ASSERT(nvp->nvp_slot);
3839 
3840 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3841 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3842 		ncq = B_TRUE;
3843 	}
3844 
3845 	/*
3846 	 * select the drive
3847 	 */
3848 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3849 
3850 	/*
3851 	 * make certain the drive selected
3852 	 */
3853 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3854 	    NV_SEC2USEC(5), 0) == B_FALSE) {
3855 
3856 		return;
3857 	}
3858 
3859 	switch (spkt->satapkt_cmd.satacmd_addr_type) {
3860 
3861 	case ATA_ADDR_LBA:
3862 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
3863 
3864 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3865 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3866 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3867 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3868 
3869 		break;
3870 
3871 	case ATA_ADDR_LBA28:
3872 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3873 		    "ATA_ADDR_LBA28 mode"));
3874 		/*
3875 		 * NCQ only uses 48-bit addressing
3876 		 */
3877 		ASSERT(ncq != B_TRUE);
3878 
3879 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3880 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3881 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3882 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3883 
3884 		break;
3885 
3886 	case ATA_ADDR_LBA48:
3887 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3888 		    "ATA_ADDR_LBA48 mode"));
3889 
3890 		/*
3891 		 * for NCQ, tag goes into count register and real sector count
3892 		 * into features register.  The sata module does the translation
3893 		 * in the satacmd.
3894 		 */
3895 		if (ncq == B_TRUE) {
3896 			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
3897 			nv_put8(cmdhdl, nvp->nvp_feature,
3898 			    satacmd->satacmd_features_reg_ext);
3899 			nv_put8(cmdhdl, nvp->nvp_feature,
3900 			    satacmd->satacmd_features_reg);
3901 		} else {
3902 			nv_put8(cmdhdl, nvp->nvp_count,
3903 			    satacmd->satacmd_sec_count_msb);
3904 			nv_put8(cmdhdl, nvp->nvp_count,
3905 			    satacmd->satacmd_sec_count_lsb);
3906 		}
3907 
3908 		/*
3909 		 * send the high-order half first
3910 		 */
3911 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
3912 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
3913 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
3914 		/*
3915 		 * Send the low-order half
3916 		 */
3917 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3918 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3919 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3920 
3921 		break;
3922 
3923 	case 0:
3924 		/*
3925 		 * non-media access commands such as identify and features
3926 		 * take this path.
3927 		 */
3928 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3929 		nv_put8(cmdhdl, nvp->nvp_feature,
3930 		    satacmd->satacmd_features_reg);
3931 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3932 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3933 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3934 
3935 		break;
3936 
3937 	default:
3938 		break;
3939 	}
3940 
3941 	ASSERT(nvp->nvp_slot);
3942 }
3943 
3944 
3945 /*
3946  * start a command that involves no media access
3947  */
3948 static int
3949 nv_start_nodata(nv_port_t *nvp, int slot)
3950 {
3951 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3952 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3953 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3954 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3955 
3956 	nv_program_taskfile_regs(nvp, slot);
3957 
3958 	/*
3959 	 * This next one sets the controller in motion
3960 	 */
3961 	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
3962 
3963 	return (SATA_TRAN_ACCEPTED);
3964 }
3965 
3966 
3967 int
3968 nv_bm_status_clear(nv_port_t *nvp)
3969 {
3970 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3971 	uchar_t	status, ret;
3972 
3973 	/*
3974 	 * Get the current BM status
3975 	 */
3976 	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
3977 
3978 	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
3979 
3980 	/*
3981 	 * Clear the latches (and preserve the other bits)
3982 	 */
3983 	nv_put8(bmhdl, nvp->nvp_bmisx, status);
3984 
3985 	return (ret);
3986 }
3987 
3988 
3989 /*
3990  * program the bus master DMA engine with the PRD address for
3991  * the active slot command, and start the DMA engine.
3992  */
3993 static void
3994 nv_start_dma_engine(nv_port_t *nvp, int slot)
3995 {
3996 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3997 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3998 	uchar_t direction;
3999 
4000 	ASSERT(nv_slotp->nvslot_spkt != NULL);
4001 
4002 	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4003 	    == SATA_DIR_READ) {
4004 		direction = BMICX_RWCON_WRITE_TO_MEMORY;
4005 	} else {
4006 		direction = BMICX_RWCON_READ_FROM_MEMORY;
4007 	}
4008 
4009 	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4010 	    "nv_start_dma_engine entered"));
4011 
4012 	/*
4013 	 * reset the controller's interrupt and error status bits
4014 	 */
4015 	(void) nv_bm_status_clear(nvp);
4016 
4017 	/*
4018 	 * program the PRD table physical start address
4019 	 */
4020 	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4021 
4022 	/*
4023 	 * set the direction control and start the DMA controller
4024 	 */
4025 	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4026 }
4027 
4028 /*
4029  * start dma command, either in or out
4030  */
4031 static int
4032 nv_start_dma(nv_port_t *nvp, int slot)
4033 {
4034 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4035 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4036 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4037 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4038 	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4039 #ifdef NCQ
4040 	uint8_t ncq = B_FALSE;
4041 #endif
4042 	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4043 	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4044 	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4045 	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
4046 
4047 	ASSERT(sg_count != 0);
4048 
4049 	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4050 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4051 		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4052 		    sata_cmdp->satacmd_num_dma_cookies);
4053 
4054 		return (NV_FAILURE);
4055 	}
4056 
4057 	nv_program_taskfile_regs(nvp, slot);
4058 
4059 	/*
4060 	 * start the drive in motion
4061 	 */
4062 	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4063 
4064 	/*
4065 	 * the drive starts processing the transaction when the cmd register
4066 	 * is written.  This is done here before programming the DMA engine to
4067 	 * parallelize and save some time.  In the event that the drive is ready
4068 	 * before DMA, it will wait.
4069 	 */
4070 #ifdef NCQ
4071 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4072 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4073 		ncq = B_TRUE;
4074 	}
4075 #endif
4076 
4077 	/*
4078 	 * copy the PRD list to PRD table in DMA accessible memory
4079 	 * so that the controller can access it.
4080 	 */
4081 	for (idx = 0; idx < sg_count; idx++, srcp++) {
4082 		uint32_t size;
4083 
4084 		nv_put32(sghdl, dstp++, srcp->dmac_address);
4085 
4086 		/* Set the number of bytes to transfer, 0 implies 64KB */
4087 		size = srcp->dmac_size;
4088 		if (size == 0x10000)
4089 			size = 0;
4090 
4091 		/*
4092 		 * If this is a 40-bit address, copy bits 32-40 of the
4093 		 * physical address to bits 16-24 of the PRD count.
4094 		 */
4095 		if (srcp->dmac_laddress > UINT32_MAX) {
4096 			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4097 		}
4098 
4099 		/*
4100 		 * set the end of table flag for the last entry
4101 		 */
4102 		if (idx == (sg_count - 1)) {
4103 			size |= PRDE_EOT;
4104 		}
4105 
4106 		nv_put32(sghdl, dstp++, size);
4107 	}
4108 
4109 	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4110 	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4111 
4112 	nv_start_dma_engine(nvp, slot);
4113 
4114 #ifdef NCQ
4115 	/*
4116 	 * optimization:  for SWNCQ, start DMA engine if this is the only
4117 	 * command running.  Preliminary NCQ efforts indicated this needs
4118 	 * more debugging.
4119 	 *
4120 	 * if (nvp->nvp_ncq_run <= 1)
4121 	 */
4122 
4123 	if (ncq == B_FALSE) {
4124 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4125 		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4126 		    " cmd = %X", non_ncq_commands++, cmd));
4127 		nv_start_dma_engine(nvp, slot);
4128 	} else {
4129 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
4130 		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
4131 	}
4132 #endif /* NCQ */
4133 
4134 	return (SATA_TRAN_ACCEPTED);
4135 }
4136 
4137 
4138 /*
4139  * start a PIO data-in ATA command
4140  */
4141 static int
4142 nv_start_pio_in(nv_port_t *nvp, int slot)
4143 {
4144 
4145 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4146 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4147 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4148 
4149 	nv_program_taskfile_regs(nvp, slot);
4150 
4151 	/*
4152 	 * This next one sets the drive in motion
4153 	 */
4154 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4155 
4156 	return (SATA_TRAN_ACCEPTED);
4157 }
4158 
4159 
4160 /*
4161  * start a PIO data-out ATA command
4162  */
4163 static int
4164 nv_start_pio_out(nv_port_t *nvp, int slot)
4165 {
4166 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4167 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4168 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4169 
4170 	nv_program_taskfile_regs(nvp, slot);
4171 
4172 	/*
4173 	 * this next one sets the drive in motion
4174 	 */
4175 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4176 
4177 	/*
4178 	 * wait for the busy bit to settle
4179 	 */
4180 	NV_DELAY_NSEC(400);
4181 
4182 	/*
4183 	 * wait for the drive to assert DRQ to send the first chunk
4184 	 * of data. Have to busy wait because there's no interrupt for
4185 	 * the first chunk. This is bad... uses a lot of cycles if the
4186 	 * drive responds too slowly or if the wait loop granularity
4187 	 * is too large. It's even worse if the drive is defective and
4188 	 * the loop times out.
4189 	 */
4190 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4191 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4192 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4193 	    4000000, 0) == B_FALSE) {
4194 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4195 
4196 		goto error;
4197 	}
4198 
4199 	/*
4200 	 * send the first block.
4201 	 */
4202 	nv_intr_pio_out(nvp, nv_slotp);
4203 
4204 	/*
4205 	 * If nvslot_flags is not set to COMPLETE yet, then processing
4206 	 * is OK so far, so return.  Otherwise, fall into error handling
4207 	 * below.
4208 	 */
4209 	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4210 
4211 		return (SATA_TRAN_ACCEPTED);
4212 	}
4213 
4214 	error:
4215 	/*
4216 	 * there was an error so reset the device and complete the packet.
4217 	 */
4218 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4219 	nv_complete_io(nvp, spkt, 0);
4220 	nv_reset(nvp);
4221 
4222 	return (SATA_TRAN_PORT_ERROR);
4223 }
4224 
4225 
4226 /*
4227  * start a ATAPI Packet command (PIO data in or out)
4228  */
4229 static int
4230 nv_start_pkt_pio(nv_port_t *nvp, int slot)
4231 {
4232 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4233 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4234 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4235 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4236 
4237 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4238 	    "nv_start_pkt_pio: start"));
4239 
4240 	/*
4241 	 * Write the PACKET command to the command register.  Normally
4242 	 * this would be done through nv_program_taskfile_regs().  It
4243 	 * is done here because some values need to be overridden.
4244 	 */
4245 
4246 	/* select the drive */
4247 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4248 
4249 	/* make certain the drive selected */
4250 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4251 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4252 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4253 		    "nv_start_pkt_pio: drive select failed"));
4254 		return (SATA_TRAN_PORT_ERROR);
4255 	}
4256 
4257 	/*
4258 	 * The command is always sent via PIO, despite whatever the SATA
4259 	 * framework sets in the command.  Overwrite the DMA bit to do this.
4260 	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4261 	 */
4262 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4263 
4264 	/* set appropriately by the sata framework */
4265 	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4266 	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4267 	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4268 	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4269 
4270 	/* initiate the command by writing the command register last */
4271 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4272 
4273 	/* Give the host controller time to do its thing */
4274 	NV_DELAY_NSEC(400);
4275 
4276 	/*
4277 	 * Wait for the device to indicate that it is ready for the command
4278 	 * ATAPI protocol state - HP0: Check_Status_A
4279 	 */
4280 
4281 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4282 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4283 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4284 	    4000000, 0) == B_FALSE) {
4285 		/*
4286 		 * Either an error or device fault occurred or the wait
4287 		 * timed out.  According to the ATAPI protocol, command
4288 		 * completion is also possible.  Other implementations of
4289 		 * this protocol don't handle this last case, so neither
4290 		 * does this code.
4291 		 */
4292 
4293 		if (nv_get8(cmdhdl, nvp->nvp_status) &
4294 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4295 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4296 
4297 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4298 			    "nv_start_pkt_pio: device error (HP0)"));
4299 		} else {
4300 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4301 
4302 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4303 			    "nv_start_pkt_pio: timeout (HP0)"));
4304 		}
4305 
4306 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4307 		nv_complete_io(nvp, spkt, 0);
4308 		nv_reset(nvp);
4309 
4310 		return (SATA_TRAN_PORT_ERROR);
4311 	}
4312 
4313 	/*
4314 	 * Put the ATAPI command in the data register
4315 	 * ATAPI protocol state - HP1: Send_Packet
4316 	 */
4317 
4318 	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4319 	    (ushort_t *)nvp->nvp_data,
4320 	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4321 
4322 	/*
4323 	 * See you in nv_intr_pkt_pio.
4324 	 * ATAPI protocol state - HP3: INTRQ_wait
4325 	 */
4326 
4327 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4328 	    "nv_start_pkt_pio: exiting into HP3"));
4329 
4330 	return (SATA_TRAN_ACCEPTED);
4331 }
4332 
4333 
4334 /*
4335  * Interrupt processing for a non-data ATA command.
4336  */
4337 static void
4338 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4339 {
4340 	uchar_t status;
4341 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4342 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4343 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4344 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4345 
4346 	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
4347 
4348 	status = nv_get8(cmdhdl, nvp->nvp_status);
4349 
4350 	/*
4351 	 * check for errors
4352 	 */
4353 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4354 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4355 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4356 		    nvp->nvp_altstatus);
4357 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4358 	} else {
4359 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4360 	}
4361 
4362 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4363 }
4364 
4365 
4366 /*
4367  * ATA command, PIO data in
4368  */
4369 static void
4370 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4371 {
4372 	uchar_t	status;
4373 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4374 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4375 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4376 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4377 	int count;
4378 
4379 	status = nv_get8(cmdhdl, nvp->nvp_status);
4380 
4381 	if (status & SATA_STATUS_BSY) {
4382 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4383 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4384 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4385 		    nvp->nvp_altstatus);
4386 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4387 		nv_reset(nvp);
4388 
4389 		return;
4390 	}
4391 
4392 	/*
4393 	 * check for errors
4394 	 */
4395 	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4396 	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4397 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4398 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4399 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4400 
4401 		return;
4402 	}
4403 
4404 	/*
4405 	 * read the next chunk of data (if any)
4406 	 */
4407 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4408 
4409 	/*
4410 	 * read count bytes
4411 	 */
4412 	ASSERT(count != 0);
4413 
4414 	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4415 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4416 
4417 	nv_slotp->nvslot_v_addr += count;
4418 	nv_slotp->nvslot_byte_count -= count;
4419 
4420 
4421 	if (nv_slotp->nvslot_byte_count != 0) {
4422 		/*
4423 		 * more to transfer.  Wait for next interrupt.
4424 		 */
4425 		return;
4426 	}
4427 
4428 	/*
4429 	 * transfer is complete. wait for the busy bit to settle.
4430 	 */
4431 	NV_DELAY_NSEC(400);
4432 
4433 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4434 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4435 }
4436 
4437 
4438 /*
4439  * ATA command PIO data out
4440  */
4441 static void
4442 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4443 {
4444 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4445 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4446 	uchar_t status;
4447 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4448 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4449 	int count;
4450 
4451 	/*
4452 	 * clear the IRQ
4453 	 */
4454 	status = nv_get8(cmdhdl, nvp->nvp_status);
4455 
4456 	if (status & SATA_STATUS_BSY) {
4457 		/*
4458 		 * this should not happen
4459 		 */
4460 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4461 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4462 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4463 		    nvp->nvp_altstatus);
4464 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4465 
4466 		return;
4467 	}
4468 
4469 	/*
4470 	 * check for errors
4471 	 */
4472 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4473 		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4474 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4475 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4476 
4477 		return;
4478 	}
4479 
4480 	/*
4481 	 * this is the condition which signals the drive is
4482 	 * no longer ready to transfer.  Likely that the transfer
4483 	 * completed successfully, but check that byte_count is
4484 	 * zero.
4485 	 */
4486 	if ((status & SATA_STATUS_DRQ) == 0) {
4487 
4488 		if (nv_slotp->nvslot_byte_count == 0) {
4489 			/*
4490 			 * complete; successful transfer
4491 			 */
4492 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4493 		} else {
4494 			/*
4495 			 * error condition, incomplete transfer
4496 			 */
4497 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4498 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4499 		}
4500 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4501 
4502 		return;
4503 	}
4504 
4505 	/*
4506 	 * write the next chunk of data
4507 	 */
4508 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4509 
4510 	/*
4511 	 * read or write count bytes
4512 	 */
4513 
4514 	ASSERT(count != 0);
4515 
4516 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4517 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4518 
4519 	nv_slotp->nvslot_v_addr += count;
4520 	nv_slotp->nvslot_byte_count -= count;
4521 }
4522 
4523 
4524 /*
4525  * ATAPI PACKET command, PIO in/out interrupt
4526  *
4527  * Under normal circumstances, one of four different interrupt scenarios
4528  * will result in this function being called:
4529  *
4530  * 1. Packet command data transfer
4531  * 2. Packet command completion
4532  * 3. Request sense data transfer
4533  * 4. Request sense command completion
4534  */
4535 static void
4536 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4537 {
4538 	uchar_t	status;
4539 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4540 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4541 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4542 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4543 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4544 	uint16_t ctlr_count;
4545 	int count;
4546 
4547 	/* ATAPI protocol state - HP2: Check_Status_B */
4548 
4549 	status = nv_get8(cmdhdl, nvp->nvp_status);
4550 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4551 	    "nv_intr_pkt_pio: status 0x%x", status));
4552 
4553 	if (status & SATA_STATUS_BSY) {
4554 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4555 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4556 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4557 		} else {
4558 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4559 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4560 
4561 			nv_reset(nvp);
4562 		}
4563 
4564 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4565 		    "nv_intr_pkt_pio: busy - status 0x%x", status));
4566 
4567 		return;
4568 	}
4569 
4570 	if ((status & SATA_STATUS_DF) != 0) {
4571 		/*
4572 		 * On device fault, just clean up and bail.  Request sense
4573 		 * will just default to its NO SENSE initialized value.
4574 		 */
4575 
4576 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4577 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4578 		}
4579 
4580 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4581 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4582 
4583 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4584 		    nvp->nvp_altstatus);
4585 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4586 		    nvp->nvp_error);
4587 
4588 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4589 		    "nv_intr_pkt_pio: device fault"));
4590 
4591 		return;
4592 	}
4593 
4594 	if ((status & SATA_STATUS_ERR) != 0) {
4595 		/*
4596 		 * On command error, figure out whether we are processing a
4597 		 * request sense.  If so, clean up and bail.  Otherwise,
4598 		 * do a REQUEST SENSE.
4599 		 */
4600 
4601 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4602 			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4603 			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4604 			    NV_FAILURE) {
4605 				nv_copy_registers(nvp, &spkt->satapkt_device,
4606 				    spkt);
4607 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4608 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4609 			}
4610 
4611 			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4612 			    nvp->nvp_altstatus);
4613 			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4614 			    nvp->nvp_error);
4615 		} else {
4616 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4617 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4618 
4619 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4620 		}
4621 
4622 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4623 		    "nv_intr_pkt_pio: error (status 0x%x)", status));
4624 
4625 		return;
4626 	}
4627 
4628 	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4629 		/*
4630 		 * REQUEST SENSE command processing
4631 		 */
4632 
4633 		if ((status & (SATA_STATUS_DRQ)) != 0) {
4634 			/* ATAPI state - HP4: Transfer_Data */
4635 
4636 			/* read the byte count from the controller */
4637 			ctlr_count =
4638 			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4639 			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4640 
4641 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4642 			    "nv_intr_pkt_pio: ctlr byte count - %d",
4643 			    ctlr_count));
4644 
4645 			if (ctlr_count == 0) {
4646 				/* no data to transfer - some devices do this */
4647 
4648 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4649 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4650 
4651 				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4652 				    "nv_intr_pkt_pio: done (no data)"));
4653 
4654 				return;
4655 			}
4656 
4657 			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4658 
4659 			/* transfer the data */
4660 			ddi_rep_get16(cmdhdl,
4661 			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4662 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4663 			    DDI_DEV_NO_AUTOINCR);
4664 
4665 			/* consume residual bytes */
4666 			ctlr_count -= count;
4667 
4668 			if (ctlr_count > 0) {
4669 				for (; ctlr_count > 0; ctlr_count -= 2)
4670 					(void) ddi_get16(cmdhdl,
4671 					    (ushort_t *)nvp->nvp_data);
4672 			}
4673 
4674 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4675 			    "nv_intr_pkt_pio: transition to HP2"));
4676 		} else {
4677 			/* still in ATAPI state - HP2 */
4678 
4679 			/*
4680 			 * In order to avoid clobbering the rqsense data
4681 			 * set by the SATA framework, the sense data read
4682 			 * from the device is put in a separate buffer and
4683 			 * copied into the packet after the request sense
4684 			 * command successfully completes.
4685 			 */
4686 			bcopy(nv_slotp->nvslot_rqsense_buff,
4687 			    spkt->satapkt_cmd.satacmd_rqsense,
4688 			    SATA_ATAPI_RQSENSE_LEN);
4689 
4690 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4691 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4692 
4693 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4694 			    "nv_intr_pkt_pio: request sense done"));
4695 		}
4696 
4697 		return;
4698 	}
4699 
4700 	/*
4701 	 * Normal command processing
4702 	 */
4703 
4704 	if ((status & (SATA_STATUS_DRQ)) != 0) {
4705 		/* ATAPI protocol state - HP4: Transfer_Data */
4706 
4707 		/* read the byte count from the controller */
4708 		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4709 		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4710 
4711 		if (ctlr_count == 0) {
4712 			/* no data to transfer - some devices do this */
4713 
4714 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4715 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4716 
4717 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4718 			    "nv_intr_pkt_pio: done (no data)"));
4719 
4720 			return;
4721 		}
4722 
4723 		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
4724 
4725 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4726 		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
4727 
4728 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4729 		    "nv_intr_pkt_pio: byte_count 0x%x",
4730 		    nv_slotp->nvslot_byte_count));
4731 
4732 		/* transfer the data */
4733 
4734 		if (direction == SATA_DIR_READ) {
4735 			ddi_rep_get16(cmdhdl,
4736 			    (ushort_t *)nv_slotp->nvslot_v_addr,
4737 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4738 			    DDI_DEV_NO_AUTOINCR);
4739 
4740 			ctlr_count -= count;
4741 
4742 			if (ctlr_count > 0) {
4743 				/* consume remainding bytes */
4744 
4745 				for (; ctlr_count > 0;
4746 				    ctlr_count -= 2)
4747 					(void) ddi_get16(cmdhdl,
4748 					    (ushort_t *)nvp->nvp_data);
4749 
4750 				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4751 				    "nv_intr_pkt_pio: bytes remained"));
4752 			}
4753 		} else {
4754 			ddi_rep_put16(cmdhdl,
4755 			    (ushort_t *)nv_slotp->nvslot_v_addr,
4756 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4757 			    DDI_DEV_NO_AUTOINCR);
4758 		}
4759 
4760 		nv_slotp->nvslot_v_addr += count;
4761 		nv_slotp->nvslot_byte_count -= count;
4762 
4763 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4764 		    "nv_intr_pkt_pio: transition to HP2"));
4765 	} else {
4766 		/* still in ATAPI state - HP2 */
4767 
4768 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4769 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4770 
4771 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4772 		    "nv_intr_pkt_pio: done"));
4773 	}
4774 }
4775 
4776 
4777 /*
4778  * ATA command, DMA data in/out
4779  */
4780 static void
4781 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
4782 {
4783 	uchar_t status;
4784 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4785 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4786 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4787 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4788 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4789 	uchar_t	bmicx;
4790 	uchar_t bm_status;
4791 
4792 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4793 
4794 	/*
4795 	 * stop DMA engine.
4796 	 */
4797 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
4798 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
4799 
4800 	/*
4801 	 * get the status and clear the IRQ, and check for DMA error
4802 	 */
4803 	status = nv_get8(cmdhdl, nvp->nvp_status);
4804 
4805 	/*
4806 	 * check for drive errors
4807 	 */
4808 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4809 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4810 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4811 		(void) nv_bm_status_clear(nvp);
4812 
4813 		return;
4814 	}
4815 
4816 	bm_status = nv_bm_status_clear(nvp);
4817 
4818 	/*
4819 	 * check for bus master errors
4820 	 */
4821 	if (bm_status & BMISX_IDERR) {
4822 		spkt->satapkt_reason = SATA_PKT_RESET;
4823 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4824 		    nvp->nvp_altstatus);
4825 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4826 		nv_reset(nvp);
4827 
4828 		return;
4829 	}
4830 
4831 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4832 }
4833 
4834 
4835 /*
4836  * Wait for a register of a controller to achieve a specific state.
4837  * To return normally, all the bits in the first sub-mask must be ON,
4838  * all the bits in the second sub-mask must be OFF.
4839  * If timeout_usec microseconds pass without the controller achieving
4840  * the desired bit configuration, return TRUE, else FALSE.
4841  *
4842  * hybrid waiting algorithm: if not in interrupt context, busy looping will
4843  * occur for the first 250 us, then switch over to a sleeping wait.
4844  *
4845  */
4846 int
4847 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
4848     int type_wait)
4849 {
4850 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4851 	hrtime_t end, cur, start_sleep, start;
4852 	int first_time = B_TRUE;
4853 	ushort_t val;
4854 
4855 	for (;;) {
4856 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4857 
4858 		if ((val & onbits) == onbits && (val & offbits) == 0) {
4859 
4860 			return (B_TRUE);
4861 		}
4862 
4863 		cur = gethrtime();
4864 
4865 		/*
4866 		 * store the start time and calculate the end
4867 		 * time.  also calculate "start_sleep" which is
4868 		 * the point after which the driver will stop busy
4869 		 * waiting and change to sleep waiting.
4870 		 */
4871 		if (first_time) {
4872 			first_time = B_FALSE;
4873 			/*
4874 			 * start and end are in nanoseconds
4875 			 */
4876 			start = cur;
4877 			end = start + timeout_usec * 1000;
4878 			/*
4879 			 * add 1 ms to start
4880 			 */
4881 			start_sleep =  start + 250000;
4882 
4883 			if (servicing_interrupt()) {
4884 				type_wait = NV_NOSLEEP;
4885 			}
4886 		}
4887 
4888 		if (cur > end) {
4889 
4890 			break;
4891 		}
4892 
4893 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4894 #if ! defined(__lock_lint)
4895 			delay(1);
4896 #endif
4897 		} else {
4898 			drv_usecwait(nv_usec_delay);
4899 		}
4900 	}
4901 
4902 	return (B_FALSE);
4903 }
4904 
4905 
4906 /*
4907  * This is a slightly more complicated version that checks
4908  * for error conditions and bails-out rather than looping
4909  * until the timeout is exceeded.
4910  *
4911  * hybrid waiting algorithm: if not in interrupt context, busy looping will
4912  * occur for the first 250 us, then switch over to a sleeping wait.
4913  */
4914 int
4915 nv_wait3(
4916 	nv_port_t	*nvp,
4917 	uchar_t		onbits1,
4918 	uchar_t		offbits1,
4919 	uchar_t		failure_onbits2,
4920 	uchar_t		failure_offbits2,
4921 	uchar_t		failure_onbits3,
4922 	uchar_t		failure_offbits3,
4923 	uint_t		timeout_usec,
4924 	int		type_wait)
4925 {
4926 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4927 	hrtime_t end, cur, start_sleep, start;
4928 	int first_time = B_TRUE;
4929 	ushort_t val;
4930 
4931 	for (;;) {
4932 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4933 
4934 		/*
4935 		 * check for expected condition
4936 		 */
4937 		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
4938 
4939 			return (B_TRUE);
4940 		}
4941 
4942 		/*
4943 		 * check for error conditions
4944 		 */
4945 		if ((val & failure_onbits2) == failure_onbits2 &&
4946 		    (val & failure_offbits2) == 0) {
4947 
4948 			return (B_FALSE);
4949 		}
4950 
4951 		if ((val & failure_onbits3) == failure_onbits3 &&
4952 		    (val & failure_offbits3) == 0) {
4953 
4954 			return (B_FALSE);
4955 		}
4956 
4957 		/*
4958 		 * store the start time and calculate the end
4959 		 * time.  also calculate "start_sleep" which is
4960 		 * the point after which the driver will stop busy
4961 		 * waiting and change to sleep waiting.
4962 		 */
4963 		if (first_time) {
4964 			first_time = B_FALSE;
4965 			/*
4966 			 * start and end are in nanoseconds
4967 			 */
4968 			cur = start = gethrtime();
4969 			end = start + timeout_usec * 1000;
4970 			/*
4971 			 * add 1 ms to start
4972 			 */
4973 			start_sleep =  start + 250000;
4974 
4975 			if (servicing_interrupt()) {
4976 				type_wait = NV_NOSLEEP;
4977 			}
4978 		} else {
4979 			cur = gethrtime();
4980 		}
4981 
4982 		if (cur > end) {
4983 
4984 			break;
4985 		}
4986 
4987 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4988 #if ! defined(__lock_lint)
4989 			delay(1);
4990 #endif
4991 		} else {
4992 			drv_usecwait(nv_usec_delay);
4993 		}
4994 	}
4995 
4996 	return (B_FALSE);
4997 }
4998 
4999 
5000 /*
5001  * nv_check_link() checks if a specified link is active device present
5002  * and communicating.
5003  */
5004 static boolean_t
5005 nv_check_link(uint32_t sstatus)
5006 {
5007 	uint8_t det;
5008 
5009 	det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT;
5010 
5011 	return (det == SSTATUS_DET_DEVPRE_PHYCOM);
5012 }
5013 
5014 
5015 /*
5016  * nv_port_state_change() reports the state of the port to the
5017  * sata module by calling sata_hba_event_notify().  This
5018  * function is called any time the state of the port is changed
5019  */
5020 static void
5021 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5022 {
5023 	sata_device_t sd;
5024 
5025 	bzero((void *)&sd, sizeof (sata_device_t));
5026 	sd.satadev_rev = SATA_DEVICE_REV;
5027 	nv_copy_registers(nvp, &sd, NULL);
5028 
5029 	/*
5030 	 * When NCQ is implemented sactive and snotific field need to be
5031 	 * updated.
5032 	 */
5033 	sd.satadev_addr.cport = nvp->nvp_port_num;
5034 	sd.satadev_addr.qual = addr_type;
5035 	sd.satadev_state = state;
5036 
5037 	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5038 }
5039 
5040 
5041 /*
5042  * timeout processing:
5043  *
5044  * Check if any packets have crossed a timeout threshold.  If so, then
5045  * abort the packet.  This function is not NCQ aware.
5046  *
5047  * If reset was invoked in any other place than nv_sata_probe(), then
5048  * monitor for reset completion here.
5049  *
5050  */
5051 static void
5052 nv_timeout(void *arg)
5053 {
5054 	nv_port_t *nvp = arg;
5055 	nv_slot_t *nv_slotp;
5056 	int restart_timeout = B_FALSE;
5057 
5058 	mutex_enter(&nvp->nvp_mutex);
5059 
5060 	/*
5061 	 * If the probe entry point is driving the reset and signature
5062 	 * acquisition, just return.
5063 	 */
5064 	if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
5065 		goto finished;
5066 	}
5067 
5068 	/*
5069 	 * If the port is not in the init state, it likely
5070 	 * means the link was lost while a timeout was active.
5071 	 */
5072 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5073 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5074 		    "nv_timeout: port uninitialized"));
5075 
5076 		goto finished;
5077 	}
5078 
5079 	if (nvp->nvp_state & NV_PORT_RESET) {
5080 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5081 		uint32_t sstatus;
5082 
5083 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5084 		    "nv_timeout(): port waiting for signature"));
5085 
5086 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5087 
5088 		/*
5089 		 * check for link presence.  If the link remains
5090 		 * missing for more than 2 seconds, send a remove
5091 		 * event and abort signature acquisition.
5092 		 */
5093 		if (nv_check_link(sstatus) == B_FALSE) {
5094 			clock_t e_link_lost = ddi_get_lbolt();
5095 
5096 			if (nvp->nvp_link_lost_time == 0) {
5097 				nvp->nvp_link_lost_time = e_link_lost;
5098 			}
5099 			if (TICK_TO_SEC(e_link_lost -
5100 			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
5101 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5102 				    "probe: intermittent link lost while"
5103 				    " resetting"));
5104 				restart_timeout = B_TRUE;
5105 			} else {
5106 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5107 				    "link lost during signature acquisition."
5108 				    "  Giving up"));
5109 				nv_port_state_change(nvp,
5110 				    SATA_EVNT_DEVICE_DETACHED|
5111 				    SATA_EVNT_LINK_LOST,
5112 				    SATA_ADDR_CPORT, 0);
5113 				nvp->nvp_state |= NV_PORT_HOTREMOVED;
5114 				nvp->nvp_state &= ~NV_PORT_RESET;
5115 			}
5116 
5117 			goto finished;
5118 		} else {
5119 
5120 			nvp->nvp_link_lost_time = 0;
5121 		}
5122 
5123 		nv_read_signature(nvp);
5124 
5125 		if (nvp->nvp_signature != 0) {
5126 			if ((nvp->nvp_type == SATA_DTYPE_ATADISK) ||
5127 			    (nvp->nvp_type == SATA_DTYPE_ATAPICD)) {
5128 				nvp->nvp_state |= NV_PORT_RESTORE;
5129 				nv_port_state_change(nvp,
5130 				    SATA_EVNT_DEVICE_RESET,
5131 				    SATA_ADDR_DCPORT,
5132 				    SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE);
5133 			}
5134 
5135 			goto finished;
5136 		}
5137 
5138 		/*
5139 		 * Reset if more than 5 seconds has passed without
5140 		 * acquiring a signature.
5141 		 */
5142 		if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) {
5143 			nv_reset(nvp);
5144 		}
5145 
5146 		restart_timeout = B_TRUE;
5147 		goto finished;
5148 	}
5149 
5150 
5151 	/*
5152 	 * not yet NCQ aware
5153 	 */
5154 	nv_slotp = &(nvp->nvp_slot[0]);
5155 
5156 	/*
5157 	 * this happens early on before nv_slotp is set
5158 	 * up OR when a device was unexpectedly removed and
5159 	 * there was an active packet.
5160 	 */
5161 	if (nv_slotp == NULL) {
5162 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5163 		    "nv_timeout: nv_slotp == NULL"));
5164 
5165 		goto finished;
5166 	}
5167 
5168 	/*
5169 	 * perform timeout checking and processing only if there is an
5170 	 * active packet on the port
5171 	 */
5172 	if (nv_slotp->nvslot_spkt != NULL)  {
5173 		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5174 		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5175 		uint8_t cmd = satacmd->satacmd_cmd_reg;
5176 		uint64_t lba;
5177 
5178 #if ! defined(__lock_lint) && defined(DEBUG)
5179 
5180 		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5181 		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5182 		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5183 		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5184 		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5185 		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5186 #endif
5187 
5188 		/*
5189 		 * timeout not needed if there is a polling thread
5190 		 */
5191 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5192 
5193 			goto finished;
5194 		}
5195 
5196 		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5197 		    spkt->satapkt_time) {
5198 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5199 			    "abort timeout: "
5200 			    "nvslot_stime: %ld max ticks till timeout: "
5201 			    "%ld cur_time: %ld cmd=%x lba=%d",
5202 			    nv_slotp->nvslot_stime, drv_usectohz(MICROSEC *
5203 			    spkt->satapkt_time), ddi_get_lbolt(), cmd, lba));
5204 
5205 			(void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT);
5206 
5207 		} else {
5208 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:"
5209 			    " still in use so restarting timeout"));
5210 		}
5211 		restart_timeout = B_TRUE;
5212 
5213 	} else {
5214 		/*
5215 		 * there was no active packet, so do not re-enable timeout
5216 		 */
5217 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5218 		    "nv_timeout: no active packet so not re-arming timeout"));
5219 	}
5220 
5221 	finished:
5222 
5223 	if (restart_timeout == B_TRUE) {
5224 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
5225 		    drv_usectohz(NV_ONE_SEC));
5226 	} else {
5227 		nvp->nvp_timeout_id = 0;
5228 	}
5229 	mutex_exit(&nvp->nvp_mutex);
5230 }
5231 
5232 
5233 /*
5234  * enable or disable the 3 interrupt types the driver is
5235  * interested in: completion, add and remove.
5236  */
5237 static void
5238 ck804_set_intr(nv_port_t *nvp, int flag)
5239 {
5240 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5241 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5242 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5243 	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5244 	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5245 	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5246 	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5247 
5248 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5249 		int_en = nv_get8(bar5_hdl,
5250 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5251 		int_en &= ~intr_bits[port];
5252 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5253 		    int_en);
5254 		return;
5255 	}
5256 
5257 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5258 
5259 	/*
5260 	 * controller level lock also required since access to an 8-bit
5261 	 * interrupt register is shared between both channels.
5262 	 */
5263 	mutex_enter(&nvc->nvc_mutex);
5264 
5265 	if (flag & NV_INTR_CLEAR_ALL) {
5266 		NVLOG((NVDBG_INTR, nvc, nvp,
5267 		    "ck804_set_intr: NV_INTR_CLEAR_ALL"));
5268 
5269 		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5270 		    (uint8_t *)(nvc->nvc_ck804_int_status));
5271 
5272 		if (intr_status & clear_all_bits[port]) {
5273 
5274 			nv_put8(nvc->nvc_bar_hdl[5],
5275 			    (uint8_t *)(nvc->nvc_ck804_int_status),
5276 			    clear_all_bits[port]);
5277 
5278 			NVLOG((NVDBG_INTR, nvc, nvp,
5279 			    "interrupt bits cleared %x",
5280 			    intr_status & clear_all_bits[port]));
5281 		}
5282 	}
5283 
5284 	if (flag & NV_INTR_DISABLE) {
5285 		NVLOG((NVDBG_INTR, nvc, nvp,
5286 		    "ck804_set_intr: NV_INTR_DISABLE"));
5287 		int_en = nv_get8(bar5_hdl,
5288 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5289 		int_en &= ~intr_bits[port];
5290 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5291 		    int_en);
5292 	}
5293 
5294 	if (flag & NV_INTR_ENABLE) {
5295 		NVLOG((NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE"));
5296 		int_en = nv_get8(bar5_hdl,
5297 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5298 		int_en |= intr_bits[port];
5299 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5300 		    int_en);
5301 	}
5302 
5303 	mutex_exit(&nvc->nvc_mutex);
5304 }
5305 
5306 
5307 /*
5308  * enable or disable the 3 interrupts the driver is interested in:
5309  * completion interrupt, hot add, and hot remove interrupt.
5310  */
5311 static void
5312 mcp5x_set_intr(nv_port_t *nvp, int flag)
5313 {
5314 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5315 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5316 	uint16_t intr_bits =
5317 	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5318 	uint16_t int_en;
5319 
5320 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5321 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5322 		int_en &= ~intr_bits;
5323 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5324 		return;
5325 	}
5326 
5327 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5328 
5329 	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
5330 
5331 	if (flag & NV_INTR_CLEAR_ALL) {
5332 		NVLOG((NVDBG_INTR, nvc, nvp,
5333 		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL"));
5334 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
5335 	}
5336 
5337 	if (flag & NV_INTR_ENABLE) {
5338 		NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE"));
5339 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5340 		int_en |= intr_bits;
5341 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5342 	}
5343 
5344 	if (flag & NV_INTR_DISABLE) {
5345 		NVLOG((NVDBG_INTR, nvc, nvp,
5346 		    "mcp5x_set_intr: NV_INTR_DISABLE"));
5347 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5348 		int_en &= ~intr_bits;
5349 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5350 	}
5351 }
5352 
5353 
5354 static void
5355 nv_resume(nv_port_t *nvp)
5356 {
5357 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
5358 
5359 	mutex_enter(&nvp->nvp_mutex);
5360 
5361 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5362 		mutex_exit(&nvp->nvp_mutex);
5363 		return;
5364 	}
5365 
5366 #ifdef SGPIO_SUPPORT
5367 	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5368 	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5369 #endif
5370 
5371 	/* Enable interrupt */
5372 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5373 
5374 	/*
5375 	 * power may have been removed to the port and the
5376 	 * drive, and/or a drive may have been added or removed.
5377 	 * Force a reset which will cause a probe and re-establish
5378 	 * any state needed on the drive.
5379 	 */
5380 	nv_reset(nvp);
5381 
5382 	mutex_exit(&nvp->nvp_mutex);
5383 }
5384 
5385 
5386 static void
5387 nv_suspend(nv_port_t *nvp)
5388 {
5389 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
5390 
5391 	mutex_enter(&nvp->nvp_mutex);
5392 
5393 #ifdef SGPIO_SUPPORT
5394 	nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5395 	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5396 #endif
5397 
5398 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5399 		mutex_exit(&nvp->nvp_mutex);
5400 		return;
5401 	}
5402 
5403 	/*
5404 	 * Stop the timeout handler.
5405 	 * (It will be restarted in nv_reset() during nv_resume().)
5406 	 */
5407 	if (nvp->nvp_timeout_id) {
5408 		(void) untimeout(nvp->nvp_timeout_id);
5409 		nvp->nvp_timeout_id = 0;
5410 	}
5411 
5412 	/* Disable interrupt */
5413 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5414 	    NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
5415 
5416 	mutex_exit(&nvp->nvp_mutex);
5417 }
5418 
5419 
5420 static void
5421 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
5422 {
5423 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5424 	sata_cmd_t *scmd = &spkt->satapkt_cmd;
5425 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5426 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5427 	uchar_t status;
5428 	struct sata_cmd_flags flags;
5429 
5430 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()"));
5431 
5432 	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5433 	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
5434 	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5435 
5436 	if (spkt == NULL) {
5437 
5438 		return;
5439 	}
5440 
5441 	/*
5442 	 * in the error case, implicitly set the return of regs needed
5443 	 * for error handling.
5444 	 */
5445 	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
5446 	    nvp->nvp_altstatus);
5447 
5448 	flags = scmd->satacmd_flags;
5449 
5450 	if (status & SATA_STATUS_ERR) {
5451 		flags.sata_copy_out_lba_low_msb = B_TRUE;
5452 		flags.sata_copy_out_lba_mid_msb = B_TRUE;
5453 		flags.sata_copy_out_lba_high_msb = B_TRUE;
5454 		flags.sata_copy_out_lba_low_lsb = B_TRUE;
5455 		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
5456 		flags.sata_copy_out_lba_high_lsb = B_TRUE;
5457 		flags.sata_copy_out_error_reg = B_TRUE;
5458 		flags.sata_copy_out_sec_count_msb = B_TRUE;
5459 		flags.sata_copy_out_sec_count_lsb = B_TRUE;
5460 		scmd->satacmd_status_reg = status;
5461 	}
5462 
5463 	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
5464 
5465 		/*
5466 		 * set HOB so that high byte will be read
5467 		 */
5468 		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
5469 
5470 		/*
5471 		 * get the requested high bytes
5472 		 */
5473 		if (flags.sata_copy_out_sec_count_msb) {
5474 			scmd->satacmd_sec_count_msb =
5475 			    nv_get8(cmdhdl, nvp->nvp_count);
5476 		}
5477 
5478 		if (flags.sata_copy_out_lba_low_msb) {
5479 			scmd->satacmd_lba_low_msb =
5480 			    nv_get8(cmdhdl, nvp->nvp_sect);
5481 		}
5482 
5483 		if (flags.sata_copy_out_lba_mid_msb) {
5484 			scmd->satacmd_lba_mid_msb =
5485 			    nv_get8(cmdhdl, nvp->nvp_lcyl);
5486 		}
5487 
5488 		if (flags.sata_copy_out_lba_high_msb) {
5489 			scmd->satacmd_lba_high_msb =
5490 			    nv_get8(cmdhdl, nvp->nvp_hcyl);
5491 		}
5492 	}
5493 
5494 	/*
5495 	 * disable HOB so that low byte is read
5496 	 */
5497 	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
5498 
5499 	/*
5500 	 * get the requested low bytes
5501 	 */
5502 	if (flags.sata_copy_out_sec_count_lsb) {
5503 		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
5504 	}
5505 
5506 	if (flags.sata_copy_out_lba_low_lsb) {
5507 		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
5508 	}
5509 
5510 	if (flags.sata_copy_out_lba_mid_lsb) {
5511 		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
5512 	}
5513 
5514 	if (flags.sata_copy_out_lba_high_lsb) {
5515 		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
5516 	}
5517 
5518 	/*
5519 	 * get the device register if requested
5520 	 */
5521 	if (flags.sata_copy_out_device_reg) {
5522 		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
5523 	}
5524 
5525 	/*
5526 	 * get the error register if requested
5527 	 */
5528 	if (flags.sata_copy_out_error_reg) {
5529 		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5530 	}
5531 }
5532 
5533 
5534 /*
5535  * Hot plug and remove interrupts can occur when the device is reset.  Just
5536  * masking the interrupt doesn't always work well because if a
5537  * different interrupt arrives on the other port, the driver can still
5538  * end up checking the state of the other port and discover the hot
5539  * interrupt flag is set even though it was masked.  Checking for recent
5540  * reset activity and then ignoring turns out to be the easiest way.
5541  */
5542 static void
5543 nv_report_add_remove(nv_port_t *nvp, int flags)
5544 {
5545 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5546 	clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time;
5547 	uint32_t sstatus;
5548 	int i;
5549 
5550 	/*
5551 	 * If reset within last 1 second ignore.  This should be
5552 	 * reworked and improved instead of having this somewhat
5553 	 * heavy handed clamping job.
5554 	 */
5555 	if (time_diff < drv_usectohz(NV_ONE_SEC)) {
5556 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
5557 		    "ignoring plug interrupt was %dms ago",
5558 		    TICK_TO_MSEC(time_diff)));
5559 
5560 		return;
5561 	}
5562 
5563 	/*
5564 	 * wait up to 1ms for sstatus to settle and reflect the true
5565 	 * status of the port.  Failure to do so can create confusion
5566 	 * in probe, where the incorrect sstatus value can still
5567 	 * persist.
5568 	 */
5569 	for (i = 0; i < 1000; i++) {
5570 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5571 
5572 		if ((flags == NV_PORT_HOTREMOVED) &&
5573 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
5574 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5575 			break;
5576 		}
5577 
5578 		if ((flags != NV_PORT_HOTREMOVED) &&
5579 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
5580 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5581 			break;
5582 		}
5583 		drv_usecwait(1);
5584 	}
5585 
5586 	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5587 	    "sstatus took %i us for DEVPRE_PHYCOM to settle", i));
5588 
5589 	if (flags == NV_PORT_HOTREMOVED) {
5590 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5591 		    "nv_report_add_remove() hot removed"));
5592 		nv_port_state_change(nvp,
5593 		    SATA_EVNT_DEVICE_DETACHED,
5594 		    SATA_ADDR_CPORT, 0);
5595 
5596 		nvp->nvp_state |= NV_PORT_HOTREMOVED;
5597 	} else {
5598 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5599 		    "nv_report_add_remove() hot plugged"));
5600 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5601 		    SATA_ADDR_CPORT, 0);
5602 	}
5603 }
5604 
5605 /*
5606  * Get request sense data and stuff it the command's sense buffer.
5607  * Start a request sense command in order to get sense data to insert
5608  * in the sata packet's rqsense buffer.  The command completion
5609  * processing is in nv_intr_pkt_pio.
5610  *
5611  * The sata framework provides a function to allocate and set-up a
5612  * request sense packet command. The reasons it is not being used here is:
5613  * a) it cannot be called in an interrupt context and this function is
5614  *    called in an interrupt context.
5615  * b) it allocates DMA resources that are not used here because this is
5616  *    implemented using PIO.
5617  *
5618  * If, in the future, this is changed to use DMA, the sata framework should
5619  * be used to allocate and set-up the error retrieval (request sense)
5620  * command.
5621  */
5622 static int
5623 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
5624 {
5625 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5626 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5627 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5628 	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
5629 
5630 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5631 	    "nv_start_rqsense_pio: start"));
5632 
5633 	/* clear the local request sense buffer before starting the command */
5634 	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
5635 
5636 	/* Write the request sense PACKET command */
5637 
5638 	/* select the drive */
5639 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
5640 
5641 	/* make certain the drive selected */
5642 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
5643 	    NV_SEC2USEC(5), 0) == B_FALSE) {
5644 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5645 		    "nv_start_rqsense_pio: drive select failed"));
5646 		return (NV_FAILURE);
5647 	}
5648 
5649 	/* set up the command */
5650 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
5651 	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
5652 	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
5653 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
5654 	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
5655 
5656 	/* initiate the command by writing the command register last */
5657 	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
5658 
5659 	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
5660 	NV_DELAY_NSEC(400);
5661 
5662 	/*
5663 	 * Wait for the device to indicate that it is ready for the command
5664 	 * ATAPI protocol state - HP0: Check_Status_A
5665 	 */
5666 
5667 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
5668 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
5669 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
5670 	    4000000, 0) == B_FALSE) {
5671 		if (nv_get8(cmdhdl, nvp->nvp_status) &
5672 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
5673 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5674 			    "nv_start_rqsense_pio: rqsense dev error (HP0)"));
5675 		} else {
5676 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5677 			    "nv_start_rqsense_pio: rqsense timeout (HP0)"));
5678 		}
5679 
5680 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5681 		nv_complete_io(nvp, spkt, 0);
5682 		nv_reset(nvp);
5683 
5684 		return (NV_FAILURE);
5685 	}
5686 
5687 	/*
5688 	 * Put the ATAPI command in the data register
5689 	 * ATAPI protocol state - HP1: Send_Packet
5690 	 */
5691 
5692 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
5693 	    (ushort_t *)nvp->nvp_data,
5694 	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
5695 
5696 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5697 	    "nv_start_rqsense_pio: exiting into HP3"));
5698 
5699 	return (NV_SUCCESS);
5700 }
5701 
5702 /*
5703  * quiesce(9E) entry point.
5704  *
5705  * This function is called when the system is single-threaded at high
5706  * PIL with preemption disabled. Therefore, this function must not be
5707  * blocked.
5708  *
5709  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5710  * DDI_FAILURE indicates an error condition and should almost never happen.
5711  */
5712 static int
5713 nv_quiesce(dev_info_t *dip)
5714 {
5715 	int port, instance = ddi_get_instance(dip);
5716 	nv_ctl_t *nvc;
5717 
5718 	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
5719 		return (DDI_FAILURE);
5720 
5721 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
5722 		nv_port_t *nvp = &(nvc->nvc_port[port]);
5723 		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5724 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5725 		uint32_t sctrl;
5726 
5727 		/*
5728 		 * Stop the controllers from generating interrupts.
5729 		 */
5730 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
5731 
5732 		/*
5733 		 * clear signature registers
5734 		 */
5735 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
5736 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
5737 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
5738 		nv_put8(cmdhdl, nvp->nvp_count, 0);
5739 
5740 		nvp->nvp_signature = 0;
5741 		nvp->nvp_type = 0;
5742 		nvp->nvp_state |= NV_PORT_RESET;
5743 		nvp->nvp_reset_time = ddi_get_lbolt();
5744 		nvp->nvp_link_lost_time = 0;
5745 
5746 		/*
5747 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
5748 		 */
5749 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5750 
5751 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
5752 		    sctrl | SCONTROL_DET_COMRESET);
5753 
5754 		/*
5755 		 * wait 1ms
5756 		 */
5757 		drv_usecwait(1000);
5758 
5759 		/*
5760 		 * de-assert reset in PHY
5761 		 */
5762 		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
5763 	}
5764 
5765 	return (DDI_SUCCESS);
5766 }
5767 
5768 
5769 #ifdef SGPIO_SUPPORT
5770 /*
5771  * NVIDIA specific SGPIO LED support
5772  * Please refer to the NVIDIA documentation for additional details
5773  */
5774 
5775 /*
5776  * nv_sgp_led_init
5777  * Detect SGPIO support.  If present, initialize.
5778  */
5779 static void
5780 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
5781 {
5782 	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
5783 	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
5784 	nv_sgp_cmn_t *cmn;	/* shared data structure */
5785 	int i;
5786 	char tqname[SGPIO_TQ_NAME_LEN];
5787 	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
5788 
5789 	/*
5790 	 * Initialize with appropriately invalid values in case this function
5791 	 * exits without initializing SGPIO (for example, there is no SGPIO
5792 	 * support).
5793 	 */
5794 	nvc->nvc_sgp_csr = 0;
5795 	nvc->nvc_sgp_cbp = NULL;
5796 
5797 	/*
5798 	 * Only try to initialize SGPIO LED support if this property
5799 	 * indicates it should be.
5800 	 */
5801 	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
5802 	    "enable-sgpio-leds", 0) != 1)
5803 		return;
5804 
5805 	/*
5806 	 * CK804 can pass the sgpio_detect test even though it does not support
5807 	 * SGPIO, so don't even look at a CK804.
5808 	 */
5809 	if (nvc->nvc_mcp5x_flag != B_TRUE)
5810 		return;
5811 
5812 	/*
5813 	 * The NVIDIA SGPIO support can nominally handle 6 drives.
5814 	 * However, the current implementation only supports 4 drives.
5815 	 * With two drives per controller, that means only look at the
5816 	 * first two controllers.
5817 	 */
5818 	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
5819 		return;
5820 
5821 	/* confirm that the SGPIO registers are there */
5822 	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
5823 		NVLOG((NVDBG_INIT, nvc, NULL,
5824 		    "SGPIO registers not detected"));
5825 		return;
5826 	}
5827 
5828 	/* save off the SGPIO_CSR I/O address */
5829 	nvc->nvc_sgp_csr = csrp;
5830 
5831 	/* map in Control Block */
5832 	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
5833 	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
5834 
5835 	/* initialize the SGPIO h/w */
5836 	if (nv_sgp_init(nvc) == NV_FAILURE) {
5837 		nv_cmn_err(CE_WARN, nvc, NULL,
5838 		    "!Unable to initialize SGPIO");
5839 	}
5840 
5841 	/*
5842 	 * Initialize the shared space for this instance.  This could
5843 	 * involve allocating the space, saving a pointer to the space
5844 	 * and starting the taskq that actually turns the LEDs on and off.
5845 	 * Or, it could involve just getting the pointer to the already
5846 	 * allocated space.
5847 	 */
5848 
5849 	mutex_enter(&nv_sgp_c2c_mutex);
5850 
5851 	/* try and find our CBP in the mapping table */
5852 	cmn = NULL;
5853 	for (i = 0; i < NV_MAX_CBPS; i++) {
5854 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == cbp) {
5855 			cmn = nv_sgp_cbp2cmn[i].c2cm_cmn;
5856 			break;
5857 		}
5858 
5859 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
5860 			break;
5861 	}
5862 
5863 	if (i >= NV_MAX_CBPS) {
5864 		/*
5865 		 * CBP to shared space mapping table is full
5866 		 */
5867 		nvc->nvc_sgp_cmn = NULL;
5868 		nv_cmn_err(CE_WARN, nvc, NULL,
5869 		    "!LED handling not initialized - too many controllers");
5870 	} else if (cmn == NULL) {
5871 		/*
5872 		 * Allocate the shared space, point the SGPIO scratch register
5873 		 * at it and start the led update taskq.
5874 		 */
5875 
5876 		/* allocate shared space */
5877 		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
5878 		    KM_SLEEP);
5879 		if (cmn == NULL) {
5880 			nv_cmn_err(CE_WARN, nvc, NULL,
5881 			    "!Failed to allocate shared data");
5882 			return;
5883 		}
5884 
5885 		nvc->nvc_sgp_cmn = cmn;
5886 
5887 		/* initialize the shared data structure */
5888 		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
5889 		cmn->nvs_connected = 0;
5890 		cmn->nvs_activity = 0;
5891 		cmn->nvs_cbp = cbp;
5892 
5893 		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
5894 		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
5895 		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
5896 
5897 		/* put the address in the SGPIO scratch register */
5898 #if defined(__amd64)
5899 		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
5900 #else
5901 		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
5902 #endif
5903 
5904 		/* add an entry to the cbp to cmn mapping table */
5905 
5906 		/* i should be the next available table position */
5907 		nv_sgp_cbp2cmn[i].c2cm_cbp = cbp;
5908 		nv_sgp_cbp2cmn[i].c2cm_cmn = cmn;
5909 
5910 		/* start the activity LED taskq */
5911 
5912 		/*
5913 		 * The taskq name should be unique and the time
5914 		 */
5915 		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
5916 		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
5917 		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
5918 		    TASKQ_DEFAULTPRI, 0);
5919 		if (cmn->nvs_taskq == NULL) {
5920 			cmn->nvs_taskq_delay = 0;
5921 			nv_cmn_err(CE_WARN, nvc, NULL,
5922 			    "!Failed to start activity LED taskq");
5923 		} else {
5924 			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
5925 			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
5926 			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
5927 		}
5928 	} else {
5929 		nvc->nvc_sgp_cmn = cmn;
5930 		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
5931 	}
5932 
5933 	mutex_exit(&nv_sgp_c2c_mutex);
5934 }
5935 
5936 /*
5937  * nv_sgp_detect
5938  * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
5939  * report back whether both were readable.
5940  */
5941 static int
5942 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
5943     uint32_t *cbpp)
5944 {
5945 	/* get the SGPIO_CSRP */
5946 	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
5947 	if (*csrpp == 0) {
5948 		return (NV_FAILURE);
5949 	}
5950 
5951 	/* SGPIO_CSRP is good, get the SGPIO_CBP */
5952 	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
5953 	if (*cbpp == 0) {
5954 		return (NV_FAILURE);
5955 	}
5956 
5957 	/* SGPIO_CBP is good, so we must support SGPIO */
5958 	return (NV_SUCCESS);
5959 }
5960 
5961 /*
5962  * nv_sgp_init
5963  * Initialize SGPIO.
5964  * The initialization process is described by NVIDIA, but the hardware does
5965  * not always behave as documented, so several steps have been changed and/or
5966  * omitted.
5967  */
5968 static int
5969 nv_sgp_init(nv_ctl_t *nvc)
5970 {
5971 	int seq;
5972 	int rval = NV_SUCCESS;
5973 	hrtime_t start, end;
5974 	uint32_t cmd;
5975 	uint32_t status;
5976 	int drive_count;
5977 
5978 	status = nv_sgp_csr_read(nvc);
5979 	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
5980 		/* SGPIO logic is in reset state and requires initialization */
5981 
5982 		/* noting the Sequence field value */
5983 		seq = SGPIO_CSR_SEQ(status);
5984 
5985 		/* issue SGPIO_CMD_READ_PARAMS command */
5986 		cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
5987 		nv_sgp_csr_write(nvc, cmd);
5988 
5989 		DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
5990 
5991 		/* poll for command completion */
5992 		start = gethrtime();
5993 		end = start + NV_SGP_CMD_TIMEOUT;
5994 		for (;;) {
5995 			status = nv_sgp_csr_read(nvc);
5996 
5997 			/* break on error */
5998 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) {
5999 				NVLOG((NVDBG_ALWAYS, nvc, NULL,
6000 				    "Command error during initialization"));
6001 				rval = NV_FAILURE;
6002 				break;
6003 			}
6004 
6005 			/* command processing is taking place */
6006 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) {
6007 				if (SGPIO_CSR_SEQ(status) != seq) {
6008 					NVLOG((NVDBG_ALWAYS, nvc, NULL,
6009 					    "Sequence number change error"));
6010 				}
6011 
6012 				break;
6013 			}
6014 
6015 			/* if completion not detected in 2000ms ... */
6016 
6017 			if (gethrtime() > end)
6018 				break;
6019 
6020 			/* wait 400 ns before checking again */
6021 			NV_DELAY_NSEC(400);
6022 		}
6023 	}
6024 
6025 	if (rval == NV_FAILURE)
6026 		return (rval);
6027 
6028 	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
6029 		NVLOG((NVDBG_ALWAYS, nvc, NULL,
6030 		    "SGPIO logic not operational after init - state %d",
6031 		    SGPIO_CSR_SSTAT(status)));
6032 		/*
6033 		 * Should return (NV_FAILURE) but the hardware can be
6034 		 * operational even if the SGPIO Status does not indicate
6035 		 * this.
6036 		 */
6037 	}
6038 
6039 	/*
6040 	 * NVIDIA recommends reading the supported drive count even
6041 	 * though they also indicate that it is always 4 at this time.
6042 	 */
6043 	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
6044 	if (drive_count != SGPIO_DRV_CNT_VALUE) {
6045 		NVLOG((NVDBG_INIT, nvc, NULL,
6046 		    "SGPIO reported undocumented drive count - %d",
6047 		    drive_count));
6048 	}
6049 
6050 	NVLOG((NVDBG_INIT, nvc, NULL,
6051 	    "initialized ctlr: %d csr: 0x%08x",
6052 	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr));
6053 
6054 	return (rval);
6055 }
6056 
6057 static int
6058 nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6059 {
6060 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6061 
6062 	if (cmn == NULL)
6063 		return (NV_FAILURE);
6064 
6065 	mutex_enter(&cmn->nvs_slock);
6066 	cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6067 	mutex_exit(&cmn->nvs_slock);
6068 
6069 	return (NV_SUCCESS);
6070 }
6071 
6072 /*
6073  * nv_sgp_csr_read
6074  * This is just a 32-bit port read from the value that was obtained from the
6075  * PCI config space.
6076  *
6077  * XXX It was advised to use the in[bwl] function for this, even though they
6078  * are obsolete interfaces.
6079  */
6080 static int
6081 nv_sgp_csr_read(nv_ctl_t *nvc)
6082 {
6083 	return (inl(nvc->nvc_sgp_csr));
6084 }
6085 
6086 /*
6087  * nv_sgp_csr_write
6088  * This is just a 32-bit I/O port write.  The port number was obtained from
6089  * the PCI config space.
6090  *
6091  * XXX It was advised to use the out[bwl] function for this, even though they
6092  * are obsolete interfaces.
6093  */
6094 static void
6095 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6096 {
6097 	outl(nvc->nvc_sgp_csr, val);
6098 }
6099 
6100 /*
6101  * nv_sgp_write_data
6102  * Cause SGPIO to send Control Block data
6103  */
6104 static int
6105 nv_sgp_write_data(nv_ctl_t *nvc)
6106 {
6107 	hrtime_t start, end;
6108 	uint32_t status;
6109 	uint32_t cmd;
6110 
6111 	/* issue command */
6112 	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6113 	nv_sgp_csr_write(nvc, cmd);
6114 
6115 	/* poll for completion */
6116 	start = gethrtime();
6117 	end = start + NV_SGP_CMD_TIMEOUT;
6118 	for (;;) {
6119 		status = nv_sgp_csr_read(nvc);
6120 
6121 		/* break on error completion */
6122 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6123 			break;
6124 
6125 		/* break on successful completion */
6126 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6127 			break;
6128 
6129 		/* Wait 400 ns and try again */
6130 		NV_DELAY_NSEC(400);
6131 
6132 		if (gethrtime() > end)
6133 			break;
6134 	}
6135 
6136 	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6137 		return (NV_SUCCESS);
6138 
6139 	return (NV_FAILURE);
6140 }
6141 
6142 /*
6143  * nv_sgp_activity_led_ctl
6144  * This is run as a taskq.  It wakes up at a fixed interval and checks to
6145  * see if any of the activity LEDs need to be changed.
6146  */
6147 static void
6148 nv_sgp_activity_led_ctl(void *arg)
6149 {
6150 	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6151 	nv_sgp_cmn_t *cmn;
6152 	volatile nv_sgp_cb_t *cbp;
6153 	clock_t ticks;
6154 	uint8_t drv_leds;
6155 	uint32_t old_leds;
6156 	uint32_t new_led_state;
6157 	int i;
6158 
6159 	cmn = nvc->nvc_sgp_cmn;
6160 	cbp = nvc->nvc_sgp_cbp;
6161 
6162 	do {
6163 		/* save off the old state of all of the LEDs */
6164 		old_leds = cbp->sgpio0_tr;
6165 
6166 		DTRACE_PROBE3(sgpio__activity__state,
6167 		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6168 		    int, old_leds);
6169 
6170 		new_led_state = 0;
6171 
6172 		/* for each drive */
6173 		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6174 
6175 			/* get the current state of the LEDs for the drive */
6176 			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6177 
6178 			if ((cmn->nvs_connected & (1 << i)) == 0) {
6179 				/* if not connected, turn off activity */
6180 				drv_leds &= ~TR_ACTIVE_MASK;
6181 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6182 
6183 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6184 				new_led_state |=
6185 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6186 
6187 				continue;
6188 			}
6189 
6190 			if ((cmn->nvs_activity & (1 << i)) == 0) {
6191 				/* connected, but not active */
6192 				drv_leds &= ~TR_ACTIVE_MASK;
6193 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6194 
6195 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6196 				new_led_state |=
6197 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6198 
6199 				continue;
6200 			}
6201 
6202 			/* connected and active */
6203 			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6204 				/* was enabled, so disable */
6205 				drv_leds &= ~TR_ACTIVE_MASK;
6206 				drv_leds |=
6207 				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6208 
6209 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6210 				new_led_state |=
6211 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6212 			} else {
6213 				/* was disabled, so enable */
6214 				drv_leds &= ~TR_ACTIVE_MASK;
6215 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6216 
6217 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6218 				new_led_state |=
6219 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6220 			}
6221 
6222 			/*
6223 			 * clear the activity bit
6224 			 * if there is drive activity again within the
6225 			 * loop interval (now 1/16 second), nvs_activity
6226 			 * will be reset and the "connected and active"
6227 			 * condition above will cause the LED to blink
6228 			 * off and on at the loop interval rate.  The
6229 			 * rate may be increased (interval shortened) as
6230 			 * long as it is not more than 1/30 second.
6231 			 */
6232 			mutex_enter(&cmn->nvs_slock);
6233 			cmn->nvs_activity &= ~(1 << i);
6234 			mutex_exit(&cmn->nvs_slock);
6235 		}
6236 
6237 		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6238 
6239 		/* write out LED values */
6240 
6241 		mutex_enter(&cmn->nvs_slock);
6242 		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6243 		cbp->sgpio0_tr |= new_led_state;
6244 		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6245 		mutex_exit(&cmn->nvs_slock);
6246 
6247 		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6248 			NVLOG((NVDBG_VERBOSE, nvc, NULL,
6249 			    "nv_sgp_write_data failure updating active LED"));
6250 		}
6251 
6252 		/* now rest for the interval */
6253 		mutex_enter(&cmn->nvs_tlock);
6254 		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6255 		if (ticks > 0)
6256 			(void) cv_timedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6257 			    ddi_get_lbolt() + ticks);
6258 		mutex_exit(&cmn->nvs_tlock);
6259 	} while (ticks > 0);
6260 }
6261 
6262 /*
6263  * nv_sgp_drive_connect
6264  * Set the flag used to indicate that the drive is attached to the HBA.
6265  * Used to let the taskq know that it should turn the Activity LED on.
6266  */
6267 static void
6268 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6269 {
6270 	nv_sgp_cmn_t *cmn;
6271 
6272 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6273 		return;
6274 	cmn = nvc->nvc_sgp_cmn;
6275 
6276 	mutex_enter(&cmn->nvs_slock);
6277 	cmn->nvs_connected |= (1 << drive);
6278 	mutex_exit(&cmn->nvs_slock);
6279 }
6280 
6281 /*
6282  * nv_sgp_drive_disconnect
6283  * Clears the flag used to indicate that the drive is no longer attached
6284  * to the HBA.  Used to let the taskq know that it should turn the
6285  * Activity LED off.  The flag that indicates that the drive is in use is
6286  * also cleared.
6287  */
6288 static void
6289 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6290 {
6291 	nv_sgp_cmn_t *cmn;
6292 
6293 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6294 		return;
6295 	cmn = nvc->nvc_sgp_cmn;
6296 
6297 	mutex_enter(&cmn->nvs_slock);
6298 	cmn->nvs_connected &= ~(1 << drive);
6299 	cmn->nvs_activity &= ~(1 << drive);
6300 	mutex_exit(&cmn->nvs_slock);
6301 }
6302 
6303 /*
6304  * nv_sgp_drive_active
6305  * Sets the flag used to indicate that the drive has been accessed and the
6306  * LED should be flicked off, then on.  It is cleared at a fixed time
6307  * interval by the LED taskq and set by the sata command start.
6308  */
6309 static void
6310 nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6311 {
6312 	nv_sgp_cmn_t *cmn;
6313 
6314 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6315 		return;
6316 	cmn = nvc->nvc_sgp_cmn;
6317 
6318 	DTRACE_PROBE1(sgpio__active, int, drive);
6319 
6320 	mutex_enter(&cmn->nvs_slock);
6321 	cmn->nvs_connected |= (1 << drive);
6322 	cmn->nvs_activity |= (1 << drive);
6323 	mutex_exit(&cmn->nvs_slock);
6324 }
6325 
6326 
6327 /*
6328  * nv_sgp_locate
6329  * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
6330  * maintained in the SGPIO Control Block.
6331  */
6332 static void
6333 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
6334 {
6335 	uint8_t leds;
6336 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6337 	nv_sgp_cmn_t *cmn;
6338 
6339 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6340 		return;
6341 	cmn = nvc->nvc_sgp_cmn;
6342 
6343 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6344 		return;
6345 
6346 	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
6347 
6348 	mutex_enter(&cmn->nvs_slock);
6349 
6350 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6351 
6352 	leds &= ~TR_LOCATE_MASK;
6353 	leds |= TR_LOCATE_SET(value);
6354 
6355 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6356 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6357 
6358 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6359 
6360 	mutex_exit(&cmn->nvs_slock);
6361 
6362 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6363 		nv_cmn_err(CE_WARN, nvc, NULL,
6364 		    "!nv_sgp_write_data failure updating OK2RM/Locate LED");
6365 	}
6366 }
6367 
6368 /*
6369  * nv_sgp_error
6370  * Turns the Error/Failure LED off or on for a particular drive.  State is
6371  * maintained in the SGPIO Control Block.
6372  */
6373 static void
6374 nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
6375 {
6376 	uint8_t leds;
6377 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6378 	nv_sgp_cmn_t *cmn;
6379 
6380 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6381 		return;
6382 	cmn = nvc->nvc_sgp_cmn;
6383 
6384 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6385 		return;
6386 
6387 	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
6388 
6389 	mutex_enter(&cmn->nvs_slock);
6390 
6391 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6392 
6393 	leds &= ~TR_ERROR_MASK;
6394 	leds |= TR_ERROR_SET(value);
6395 
6396 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6397 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6398 
6399 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6400 
6401 	mutex_exit(&cmn->nvs_slock);
6402 
6403 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6404 		nv_cmn_err(CE_WARN, nvc, NULL,
6405 		    "!nv_sgp_write_data failure updating Fail/Error LED");
6406 	}
6407 }
6408 
6409 static void
6410 nv_sgp_cleanup(nv_ctl_t *nvc)
6411 {
6412 	int drive, i;
6413 	uint8_t drv_leds;
6414 	uint32_t led_state;
6415 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6416 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6417 	extern void psm_unmap_phys(caddr_t, size_t);
6418 
6419 	/*
6420 	 * If the SGPIO Control Block isn't mapped or the shared data
6421 	 * structure isn't present in this instance, there isn't much that
6422 	 * can be cleaned up.
6423 	 */
6424 	if ((cb == NULL) || (cmn == NULL))
6425 		return;
6426 
6427 	/* turn off activity LEDs for this controller */
6428 	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6429 
6430 	/* get the existing LED state */
6431 	led_state = cb->sgpio0_tr;
6432 
6433 	/* turn off port 0 */
6434 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
6435 	led_state &= SGPIO0_TR_DRV_CLR(drive);
6436 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6437 
6438 	/* turn off port 1 */
6439 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
6440 	led_state &= SGPIO0_TR_DRV_CLR(drive);
6441 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6442 
6443 	/* set the new led state, which should turn off this ctrl's LEDs */
6444 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6445 	(void) nv_sgp_write_data(nvc);
6446 
6447 	/* clear the controller's in use bit */
6448 	mutex_enter(&cmn->nvs_slock);
6449 	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
6450 	mutex_exit(&cmn->nvs_slock);
6451 
6452 	if (cmn->nvs_in_use == 0) {
6453 		/* if all "in use" bits cleared, take everything down */
6454 
6455 		if (cmn->nvs_taskq != NULL) {
6456 			/* allow activity taskq to exit */
6457 			cmn->nvs_taskq_delay = 0;
6458 			cv_broadcast(&cmn->nvs_cv);
6459 
6460 			/* then destroy it */
6461 			ddi_taskq_destroy(cmn->nvs_taskq);
6462 		}
6463 
6464 		/* turn off all of the LEDs */
6465 		cb->sgpio0_tr = 0;
6466 		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6467 		(void) nv_sgp_write_data(nvc);
6468 
6469 		cb->sgpio_sr = NULL;
6470 
6471 		/* zero out the CBP to cmn mapping */
6472 		for (i = 0; i < NV_MAX_CBPS; i++) {
6473 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == cmn->nvs_cbp) {
6474 				nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
6475 				break;
6476 			}
6477 
6478 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
6479 				break;
6480 		}
6481 
6482 		/* free resources */
6483 		cv_destroy(&cmn->nvs_cv);
6484 		mutex_destroy(&cmn->nvs_tlock);
6485 		mutex_destroy(&cmn->nvs_slock);
6486 
6487 		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
6488 	}
6489 
6490 	nvc->nvc_sgp_cmn = NULL;
6491 
6492 	/* unmap the SGPIO Control Block */
6493 	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
6494 }
6495 #endif	/* SGPIO_SUPPORT */
6496