1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *
29  * nv_sata is a combo SATA HBA driver for ck804/mcp55 based chipsets.
30  *
31  * NCQ
32  * ---
33  *
34  * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
35  * and is likely to be revisited in the future.
36  *
37  *
38  * Power Management
39  * ----------------
40  *
41  * Normally power management would be responsible for ensuring the device
42  * is quiescent and then changing power states to the device, such as
43  * powering down parts or all of the device.  mcp55/ck804 is unique in
44  * that it is only available as part of a larger southbridge chipset, so
45  * removing power to the device isn't possible.  Switches to control
46  * power management states D0/D3 in the PCI configuration space appear to
47  * be supported but changes to these states are apparently are ignored.
48  * The only further PM that the driver _could_ do is shut down the PHY,
49  * but in order to deliver the first rev of the driver sooner than later,
50  * that will be deferred until some future phase.
51  *
52  * Since the driver currently will not directly change any power state to
53  * the device, no power() entry point will be required.  However, it is
54  * possible that in ACPI power state S3, aka suspend to RAM, that power
55  * can be removed to the device, and the driver cannot rely on BIOS to
56  * have reset any state.  For the time being, there is no known
57  * non-default configurations that need to be programmed.  This judgement
58  * is based on the port of the legacy ata driver not having any such
59  * functionality and based on conversations with the PM team.  If such a
60  * restoration is later deemed necessary it can be incorporated into the
61  * DDI_RESUME processing.
62  *
63  */
64 
65 #include <sys/scsi/scsi.h>
66 #include <sys/pci.h>
67 #include <sys/byteorder.h>
68 #include <sys/sunddi.h>
69 #include <sys/sata/sata_hba.h>
70 #ifdef SGPIO_SUPPORT
71 #include <sys/sata/adapters/nv_sata/nv_sgpio.h>
72 #include <sys/devctl.h>
73 #include <sys/sdt.h>
74 #endif
75 #include <sys/sata/adapters/nv_sata/nv_sata.h>
76 #include <sys/disp.h>
77 #include <sys/note.h>
78 #include <sys/promif.h>
79 
80 
81 /*
82  * Function prototypes for driver entry points
83  */
84 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
85 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
86 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
87     void *arg, void **result);
88 
89 /*
90  * Function prototypes for entry points from sata service module
91  * These functions are distinguished from other local functions
92  * by the prefix "nv_sata_"
93  */
94 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
95 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
96 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
97 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
98 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
99 
100 /*
101  * Local function prototypes
102  */
103 static uint_t mcp55_intr(caddr_t arg1, caddr_t arg2);
104 static uint_t mcp04_intr(caddr_t arg1, caddr_t arg2);
105 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
106 #ifdef NV_MSI_SUPPORTED
107 static int nv_add_msi_intrs(nv_ctl_t *nvc);
108 #endif
109 static void nv_rem_intrs(nv_ctl_t *nvc);
110 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
111 static int nv_start_nodata(nv_port_t *nvp, int slot);
112 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
113 static int nv_start_pio_in(nv_port_t *nvp, int slot);
114 static int nv_start_pio_out(nv_port_t *nvp, int slot);
115 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
116 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
117 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
118 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
119 static int nv_start_dma(nv_port_t *nvp, int slot);
120 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
121 static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
122 static void nv_uninit_ctl(nv_ctl_t *nvc);
123 static void mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
124 static void mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
125 static void nv_uninit_port(nv_port_t *nvp);
126 static int nv_init_port(nv_port_t *nvp);
127 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
128 static int mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
129 #ifdef NCQ
130 static int mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131 #endif
132 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
133 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
134     int state);
135 static boolean_t nv_check_link(uint32_t sstatus);
136 static void nv_common_reg_init(nv_ctl_t *nvc);
137 static void mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
138 static void nv_reset(nv_port_t *nvp);
139 static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
140 static void nv_timeout(void *);
141 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
142 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
143 static void nv_read_signature(nv_port_t *nvp);
144 static void mcp55_set_intr(nv_port_t *nvp, int flag);
145 static void mcp04_set_intr(nv_port_t *nvp, int flag);
146 static void nv_resume(nv_port_t *nvp);
147 static void nv_suspend(nv_port_t *nvp);
148 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
149 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
150 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
151     sata_pkt_t *spkt);
152 static void nv_report_add_remove(nv_port_t *nvp, int flags);
153 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
154 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
155     uchar_t failure_onbits2, uchar_t failure_offbits2,
156     uchar_t failure_onbits3, uchar_t failure_offbits3,
157     uint_t timeout_usec, int type_wait);
158 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
159     uint_t timeout_usec, int type_wait);
160 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
161 
162 #ifdef SGPIO_SUPPORT
163 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
164 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
165 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
166     cred_t *credp, int *rvalp);
167 
168 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
169 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
170     uint32_t *cbpp);
171 static int nv_sgp_init(nv_ctl_t *nvc);
172 static void nv_sgp_reset(nv_ctl_t *nvc);
173 static int nv_sgp_init_cmd(nv_ctl_t *nvc);
174 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
175 static int nv_sgp_csr_read(nv_ctl_t *nvc);
176 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
177 static int nv_sgp_write_data(nv_ctl_t *nvc);
178 static void nv_sgp_activity_led_ctl(void *arg);
179 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
180 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
181 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
182 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
183 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
184 static void nv_sgp_cleanup(nv_ctl_t *nvc);
185 #endif
186 
187 
188 /*
189  * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
190  * Verify if needed if ported to other ISA.
191  */
192 static ddi_dma_attr_t buffer_dma_attr = {
193 	DMA_ATTR_V0,		/* dma_attr_version */
194 	0,			/* dma_attr_addr_lo: lowest bus address */
195 	0xffffffffull,		/* dma_attr_addr_hi: */
196 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
197 	4,			/* dma_attr_align */
198 	1,			/* dma_attr_burstsizes. */
199 	1,			/* dma_attr_minxfer */
200 	0xffffffffull,		/* dma_attr_max xfer including all cookies */
201 	0xffffffffull,		/* dma_attr_seg */
202 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
203 	512,			/* dma_attr_granular */
204 	0,			/* dma_attr_flags */
205 };
206 
207 
208 /*
209  * DMA attributes for PRD tables
210  */
211 ddi_dma_attr_t nv_prd_dma_attr = {
212 	DMA_ATTR_V0,		/* dma_attr_version */
213 	0,			/* dma_attr_addr_lo */
214 	0xffffffffull,		/* dma_attr_addr_hi */
215 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
216 	4,			/* dma_attr_align */
217 	1,			/* dma_attr_burstsizes */
218 	1,			/* dma_attr_minxfer */
219 	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
220 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
221 	1,			/* dma_attr_sgllen */
222 	1,			/* dma_attr_granular */
223 	0			/* dma_attr_flags */
224 };
225 
226 /*
227  * Device access attributes
228  */
229 static ddi_device_acc_attr_t accattr = {
230     DDI_DEVICE_ATTR_V0,
231     DDI_STRUCTURE_LE_ACC,
232     DDI_STRICTORDER_ACC
233 };
234 
235 
236 #ifdef SGPIO_SUPPORT
237 static struct cb_ops nv_cb_ops = {
238 	nv_open,		/* open */
239 	nv_close,		/* close */
240 	nodev,			/* strategy (block) */
241 	nodev,			/* print (block) */
242 	nodev,			/* dump (block) */
243 	nodev,			/* read */
244 	nodev,			/* write */
245 	nv_ioctl,		/* ioctl */
246 	nodev,			/* devmap */
247 	nodev,			/* mmap */
248 	nodev,			/* segmap */
249 	nochpoll,		/* chpoll */
250 	ddi_prop_op,		/* prop_op */
251 	NULL,			/* streams */
252 	D_NEW | D_MP |
253 	D_64BIT | D_HOTPLUG,	/* flags */
254 	CB_REV			/* rev */
255 };
256 #endif  /* SGPIO_SUPPORT */
257 
258 
259 static struct dev_ops nv_dev_ops = {
260 	DEVO_REV,		/* devo_rev */
261 	0,			/* refcnt  */
262 	nv_getinfo,		/* info */
263 	nulldev,		/* identify */
264 	nulldev,		/* probe */
265 	nv_attach,		/* attach */
266 	nv_detach,		/* detach */
267 	nodev,			/* no reset */
268 #ifdef SGPIO_SUPPORT
269 	&nv_cb_ops,		/* driver operations */
270 #else
271 	(struct cb_ops *)0,	/* driver operations */
272 #endif
273 	NULL,			/* bus operations */
274 	NULL			/* power */
275 };
276 
277 
278 /*
279  * Request Sense CDB for ATAPI
280  */
281 static const uint8_t nv_rqsense_cdb[16] = {
282 	SCMD_REQUEST_SENSE,
283 	0,
284 	0,
285 	0,
286 	SATA_ATAPI_MIN_RQSENSE_LEN,
287 	0,
288 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
289 };
290 
291 
292 static sata_tran_hotplug_ops_t nv_hotplug_ops;
293 
294 extern struct mod_ops mod_driverops;
295 
296 static  struct modldrv modldrv = {
297 	&mod_driverops,	/* driverops */
298 	"Nvidia ck804/mcp55 HBA",
299 	&nv_dev_ops,	/* driver ops */
300 };
301 
302 static  struct modlinkage modlinkage = {
303 	MODREV_1,
304 	&modldrv,
305 	NULL
306 };
307 
308 
309 /*
310  * wait between checks of reg status
311  */
312 int nv_usec_delay = NV_WAIT_REG_CHECK;
313 
314 /*
315  * The following is needed for nv_vcmn_err()
316  */
317 static kmutex_t nv_log_mutex; /* protects nv_log_buf */
318 static char nv_log_buf[NV_STRING_512];
319 int nv_debug_flags = NVDBG_ALWAYS;
320 int nv_log_to_console = B_FALSE;
321 
322 int nv_log_delay = 0;
323 int nv_prom_print = B_FALSE;
324 
325 /*
326  * for debugging
327  */
328 #ifdef DEBUG
329 int ncq_commands = 0;
330 int non_ncq_commands = 0;
331 #endif
332 
333 /*
334  * Opaque state pointer to be initialized by ddi_soft_state_init()
335  */
336 static void *nv_statep	= NULL;
337 
338 
339 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
340 	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
341 	nv_sata_activate,	/* activate port. cfgadm -c connect */
342 	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
343 };
344 
345 
346 /*
347  *  nv module initialization
348  */
349 int
350 _init(void)
351 {
352 	int	error;
353 
354 	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
355 
356 	if (error != 0) {
357 
358 		return (error);
359 	}
360 
361 	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
362 
363 	if ((error = sata_hba_init(&modlinkage)) != 0) {
364 		ddi_soft_state_fini(&nv_statep);
365 		mutex_destroy(&nv_log_mutex);
366 
367 		return (error);
368 	}
369 
370 	error = mod_install(&modlinkage);
371 	if (error != 0) {
372 		sata_hba_fini(&modlinkage);
373 		ddi_soft_state_fini(&nv_statep);
374 		mutex_destroy(&nv_log_mutex);
375 
376 		return (error);
377 	}
378 
379 	return (error);
380 }
381 
382 
383 /*
384  * nv module uninitialize
385  */
386 int
387 _fini(void)
388 {
389 	int	error;
390 
391 	error = mod_remove(&modlinkage);
392 
393 	if (error != 0) {
394 		return (error);
395 	}
396 
397 	/*
398 	 * remove the resources allocated in _init()
399 	 */
400 	mutex_destroy(&nv_log_mutex);
401 	sata_hba_fini(&modlinkage);
402 	ddi_soft_state_fini(&nv_statep);
403 
404 	return (error);
405 }
406 
407 
408 /*
409  * nv _info entry point
410  */
411 int
412 _info(struct modinfo *modinfop)
413 {
414 	return (mod_info(&modlinkage, modinfop));
415 }
416 
417 
418 /*
419  * these wrappers for ddi_{get,put}8 are for observability
420  * with dtrace
421  */
422 #ifdef DEBUG
423 
424 static void
425 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
426 {
427 	ddi_put8(handle, dev_addr, value);
428 }
429 
430 static void
431 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
432 {
433 	ddi_put32(handle, dev_addr, value);
434 }
435 
436 static uint32_t
437 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
438 {
439 	return (ddi_get32(handle, dev_addr));
440 }
441 
442 static void
443 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
444 {
445 	ddi_put16(handle, dev_addr, value);
446 }
447 
448 static uint16_t
449 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
450 {
451 	return (ddi_get16(handle, dev_addr));
452 }
453 
454 static uint8_t
455 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
456 {
457 	return (ddi_get8(handle, dev_addr));
458 }
459 
460 #else
461 
462 #define	nv_put8 ddi_put8
463 #define	nv_put32 ddi_put32
464 #define	nv_get32 ddi_get32
465 #define	nv_put16 ddi_put16
466 #define	nv_get16 ddi_get16
467 #define	nv_get8 ddi_get8
468 
469 #endif
470 
471 
472 /*
473  * Driver attach
474  */
475 static int
476 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
477 {
478 	int status, attach_state, intr_types, bar, i, command;
479 	int inst = ddi_get_instance(dip);
480 	ddi_acc_handle_t pci_conf_handle;
481 	nv_ctl_t *nvc;
482 	uint8_t subclass;
483 	uint32_t reg32;
484 #ifdef SGPIO_SUPPORT
485 	pci_regspec_t *regs;
486 	int rlen;
487 #endif
488 
489 	switch (cmd) {
490 
491 	case DDI_ATTACH:
492 
493 		NVLOG((NVDBG_INIT, NULL, NULL,
494 		    "nv_attach(): DDI_ATTACH inst %d", inst));
495 
496 		attach_state = ATTACH_PROGRESS_NONE;
497 
498 		status = ddi_soft_state_zalloc(nv_statep, inst);
499 
500 		if (status != DDI_SUCCESS) {
501 			break;
502 		}
503 
504 		nvc = ddi_get_soft_state(nv_statep, inst);
505 
506 		nvc->nvc_dip = dip;
507 
508 		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
509 
510 		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
511 			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
512 			    PCI_CONF_REVID);
513 			NVLOG((NVDBG_INIT, NULL, NULL,
514 			    "inst %d: silicon revid is %x nv_debug_flags=%x",
515 			    inst, nvc->nvc_revid, nv_debug_flags));
516 		} else {
517 			break;
518 		}
519 
520 		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
521 
522 		/*
523 		 * If a device is attached after a suspend/resume, sometimes
524 		 * the command register is zero, as it might not be set by
525 		 * BIOS or a parent.  Set it again here.
526 		 */
527 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
528 
529 		if (command == 0) {
530 			cmn_err(CE_WARN, "nv_sata%d: restoring PCI command"
531 			    " register", inst);
532 			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
533 			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
534 		}
535 
536 		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
537 
538 		if (subclass & PCI_MASS_RAID) {
539 			cmn_err(CE_WARN,
540 			    "attach failed: RAID mode not supported");
541 			break;
542 		}
543 
544 		/*
545 		 * the 6 bars of the controller are:
546 		 * 0: port 0 task file
547 		 * 1: port 0 status
548 		 * 2: port 1 task file
549 		 * 3: port 1 status
550 		 * 4: bus master for both ports
551 		 * 5: extended registers for SATA features
552 		 */
553 		for (bar = 0; bar < 6; bar++) {
554 			status = ddi_regs_map_setup(dip, bar + 1,
555 			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
556 			    &nvc->nvc_bar_hdl[bar]);
557 
558 			if (status != DDI_SUCCESS) {
559 				NVLOG((NVDBG_INIT, nvc, NULL,
560 				    "ddi_regs_map_setup failure for bar"
561 				    " %d status = %d", bar, status));
562 				break;
563 			}
564 		}
565 
566 		attach_state |= ATTACH_PROGRESS_BARS;
567 
568 		/*
569 		 * initialize controller and driver core
570 		 */
571 		status = nv_init_ctl(nvc, pci_conf_handle);
572 
573 		if (status == NV_FAILURE) {
574 			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
575 
576 			break;
577 		}
578 
579 		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
580 
581 		/*
582 		 * initialize mutexes
583 		 */
584 		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
585 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
586 
587 		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
588 
589 		/*
590 		 * get supported interrupt types
591 		 */
592 		if (ddi_intr_get_supported_types(dip, &intr_types) !=
593 		    DDI_SUCCESS) {
594 			nv_cmn_err(CE_WARN, nvc, NULL,
595 			    "!ddi_intr_get_supported_types failed");
596 			NVLOG((NVDBG_INIT, nvc, NULL,
597 			    "interrupt supported types failed"));
598 
599 			break;
600 		}
601 
602 		NVLOG((NVDBG_INIT, nvc, NULL,
603 		    "ddi_intr_get_supported_types() returned: 0x%x",
604 		    intr_types));
605 
606 #ifdef NV_MSI_SUPPORTED
607 		if (intr_types & DDI_INTR_TYPE_MSI) {
608 			NVLOG((NVDBG_INIT, nvc, NULL,
609 			    "using MSI interrupt type"));
610 
611 			/*
612 			 * Try MSI first, but fall back to legacy if MSI
613 			 * attach fails
614 			 */
615 			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
616 				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
617 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
618 				NVLOG((NVDBG_INIT, nvc, NULL,
619 				    "MSI interrupt setup done"));
620 			} else {
621 				nv_cmn_err(CE_CONT, nvc, NULL,
622 				    "!MSI registration failed "
623 				    "will try Legacy interrupts");
624 			}
625 		}
626 #endif
627 
628 		/*
629 		 * Either the MSI interrupt setup has failed or only
630 		 * the fixed interrupts are available on the system.
631 		 */
632 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
633 		    (intr_types & DDI_INTR_TYPE_FIXED)) {
634 
635 			NVLOG((NVDBG_INIT, nvc, NULL,
636 			    "using Legacy interrupt type"));
637 
638 			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
639 				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
640 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
641 				NVLOG((NVDBG_INIT, nvc, NULL,
642 				    "Legacy interrupt setup done"));
643 			} else {
644 				nv_cmn_err(CE_WARN, nvc, NULL,
645 				    "!legacy interrupt setup failed");
646 				NVLOG((NVDBG_INIT, nvc, NULL,
647 				    "legacy interrupt setup failed"));
648 				break;
649 			}
650 		}
651 
652 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
653 			NVLOG((NVDBG_INIT, nvc, NULL,
654 			    "no interrupts registered"));
655 			break;
656 		}
657 
658 #ifdef SGPIO_SUPPORT
659 		/*
660 		 * save off the controller number
661 		 */
662 		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
663 		    "reg", (caddr_t)&regs, &rlen);
664 		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
665 		kmem_free(regs, rlen);
666 
667 		/*
668 		 * initialize SGPIO
669 		 */
670 		nv_sgp_led_init(nvc, pci_conf_handle);
671 #endif	/* SGPIO_SUPPORT */
672 
673 		/*
674 		 * attach to sata module
675 		 */
676 		if (sata_hba_attach(nvc->nvc_dip,
677 		    &nvc->nvc_sata_hba_tran,
678 		    DDI_ATTACH) != DDI_SUCCESS) {
679 			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
680 
681 			break;
682 		}
683 
684 		pci_config_teardown(&pci_conf_handle);
685 
686 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
687 
688 		return (DDI_SUCCESS);
689 
690 	case DDI_RESUME:
691 
692 		nvc = ddi_get_soft_state(nv_statep, inst);
693 
694 		NVLOG((NVDBG_INIT, nvc, NULL,
695 		    "nv_attach(): DDI_RESUME inst %d", inst));
696 
697 		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
698 			return (DDI_FAILURE);
699 		}
700 
701 		/*
702 		 * If a device is attached after a suspend/resume, sometimes
703 		 * the command register is zero, as it might not be set by
704 		 * BIOS or a parent.  Set it again here.
705 		 */
706 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
707 
708 		if (command == 0) {
709 			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
710 			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
711 		}
712 
713 		/*
714 		 * Need to set bit 2 to 1 at config offset 0x50
715 		 * to enable access to the bar5 registers.
716 		 */
717 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
718 
719 		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
720 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
721 			    reg32 | NV_BAR5_SPACE_EN);
722 		}
723 
724 		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
725 
726 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
727 			nv_resume(&(nvc->nvc_port[i]));
728 		}
729 
730 		pci_config_teardown(&pci_conf_handle);
731 
732 		return (DDI_SUCCESS);
733 
734 	default:
735 		return (DDI_FAILURE);
736 	}
737 
738 
739 	/*
740 	 * DDI_ATTACH failure path starts here
741 	 */
742 
743 	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
744 		nv_rem_intrs(nvc);
745 	}
746 
747 	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
748 		/*
749 		 * Remove timers
750 		 */
751 		int port = 0;
752 		nv_port_t *nvp;
753 
754 		for (; port < NV_MAX_PORTS(nvc); port++) {
755 			nvp = &(nvc->nvc_port[port]);
756 			if (nvp->nvp_timeout_id != 0) {
757 				(void) untimeout(nvp->nvp_timeout_id);
758 			}
759 		}
760 	}
761 
762 	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
763 		mutex_destroy(&nvc->nvc_mutex);
764 	}
765 
766 	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
767 		nv_uninit_ctl(nvc);
768 	}
769 
770 	if (attach_state & ATTACH_PROGRESS_BARS) {
771 		while (--bar >= 0) {
772 			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
773 		}
774 	}
775 
776 	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
777 		ddi_soft_state_free(nv_statep, inst);
778 	}
779 
780 	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
781 		pci_config_teardown(&pci_conf_handle);
782 	}
783 
784 	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
785 
786 	return (DDI_FAILURE);
787 }
788 
789 
790 static int
791 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
792 {
793 	int i, port, inst = ddi_get_instance(dip);
794 	nv_ctl_t *nvc;
795 	nv_port_t *nvp;
796 
797 	nvc = ddi_get_soft_state(nv_statep, inst);
798 
799 	switch (cmd) {
800 
801 	case DDI_DETACH:
802 
803 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
804 
805 		/*
806 		 * Remove interrupts
807 		 */
808 		nv_rem_intrs(nvc);
809 
810 		/*
811 		 * Remove timers
812 		 */
813 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
814 			nvp = &(nvc->nvc_port[port]);
815 			if (nvp->nvp_timeout_id != 0) {
816 				(void) untimeout(nvp->nvp_timeout_id);
817 			}
818 		}
819 
820 		/*
821 		 * Remove maps
822 		 */
823 		for (i = 0; i < 6; i++) {
824 			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
825 		}
826 
827 		/*
828 		 * Destroy mutexes
829 		 */
830 		mutex_destroy(&nvc->nvc_mutex);
831 
832 		/*
833 		 * Uninitialize the controller
834 		 */
835 		nv_uninit_ctl(nvc);
836 
837 #ifdef SGPIO_SUPPORT
838 		/*
839 		 * release SGPIO resources
840 		 */
841 		nv_sgp_cleanup(nvc);
842 #endif
843 
844 		/*
845 		 * unregister from the sata module
846 		 */
847 		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
848 
849 		/*
850 		 * Free soft state
851 		 */
852 		ddi_soft_state_free(nv_statep, inst);
853 
854 		return (DDI_SUCCESS);
855 
856 	case DDI_SUSPEND:
857 		/*
858 		 * The PM functions for suspend and resume are incomplete
859 		 * and need additional work.  It may or may not work in
860 		 * the current state.
861 		 */
862 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
863 
864 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
865 			nv_suspend(&(nvc->nvc_port[i]));
866 		}
867 
868 		nvc->nvc_state |= NV_CTRL_SUSPEND;
869 
870 		return (DDI_SUCCESS);
871 
872 	default:
873 		return (DDI_FAILURE);
874 	}
875 }
876 
877 
878 /*ARGSUSED*/
879 static int
880 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
881 {
882 	nv_ctl_t *nvc;
883 	int instance;
884 	dev_t dev;
885 
886 	dev = (dev_t)arg;
887 	instance = getminor(dev);
888 
889 	switch (infocmd) {
890 	case DDI_INFO_DEVT2DEVINFO:
891 		nvc = ddi_get_soft_state(nv_statep,  instance);
892 		if (nvc != NULL) {
893 			*result = nvc->nvc_dip;
894 			return (DDI_SUCCESS);
895 		} else {
896 			*result = NULL;
897 			return (DDI_FAILURE);
898 		}
899 	case DDI_INFO_DEVT2INSTANCE:
900 		*(int *)result = instance;
901 		break;
902 	default:
903 		break;
904 	}
905 	return (DDI_SUCCESS);
906 }
907 
908 
909 #ifdef SGPIO_SUPPORT
910 /* ARGSUSED */
911 static int
912 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
913 {
914 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
915 
916 	if (nvc == NULL) {
917 		return (ENXIO);
918 	}
919 
920 	return (0);
921 }
922 
923 
924 /* ARGSUSED */
925 static int
926 nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
927 {
928 	return (0);
929 }
930 
931 
932 /* ARGSUSED */
933 static int
934 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
935 {
936 	nv_ctl_t *nvc;
937 	int inst;
938 	int status;
939 	int ctlr, port;
940 	int drive;
941 	uint8_t curr_led;
942 	struct dc_led_ctl led;
943 
944 	inst = getminor(dev);
945 	if (inst == -1) {
946 		return (EBADF);
947 	}
948 
949 	nvc = ddi_get_soft_state(nv_statep, inst);
950 	if (nvc == NULL) {
951 		return (EBADF);
952 	}
953 
954 	switch (cmd) {
955 	case DEVCTL_SET_LED:
956 		status = ddi_copyin((void *)arg, &led,
957 		    sizeof (struct dc_led_ctl), mode);
958 		if (status != 0)
959 			return (EFAULT);
960 
961 		/*
962 		 * Since only the first two controller currently support
963 		 * SGPIO (as per NVIDIA docs), this code will as well.
964 		 * Note that this validate the port value within led_state
965 		 * as well.
966 		 */
967 
968 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
969 		if ((ctlr != 0) && (ctlr != 1))
970 			return (ENXIO);
971 
972 		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
973 		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
974 			return (EINVAL);
975 		}
976 
977 		drive = led.led_number;
978 
979 		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
980 		    (led.led_state == DCL_STATE_OFF)) {
981 
982 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
983 				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
984 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
985 				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
986 			} else {
987 				return (ENXIO);
988 			}
989 
990 			port = SGP_DRV_TO_PORT(led.led_number);
991 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
992 		}
993 
994 		if (led.led_ctl_active == DCL_CNTRL_ON) {
995 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
996 				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
997 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
998 				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
999 			} else {
1000 				return (ENXIO);
1001 			}
1002 
1003 			port = SGP_DRV_TO_PORT(led.led_number);
1004 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1005 		}
1006 
1007 		break;
1008 
1009 	case DEVCTL_GET_LED:
1010 		status = ddi_copyin((void *)arg, &led,
1011 		    sizeof (struct dc_led_ctl), mode);
1012 		if (status != 0)
1013 			return (EFAULT);
1014 
1015 		/*
1016 		 * Since only the first two controller currently support
1017 		 * SGPIO (as per NVIDIA docs), this code will as well.
1018 		 * Note that this validate the port value within led_state
1019 		 * as well.
1020 		 */
1021 
1022 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1023 		if ((ctlr != 0) && (ctlr != 1))
1024 			return (ENXIO);
1025 
1026 		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1027 		    led.led_number);
1028 
1029 		port = SGP_DRV_TO_PORT(led.led_number);
1030 		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1031 			led.led_ctl_active = DCL_CNTRL_ON;
1032 
1033 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1034 				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1035 					led.led_state = DCL_STATE_OFF;
1036 				else
1037 					led.led_state = DCL_STATE_ON;
1038 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1039 				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1040 					led.led_state = DCL_STATE_OFF;
1041 				else
1042 					led.led_state = DCL_STATE_ON;
1043 			} else {
1044 				return (ENXIO);
1045 			}
1046 		} else {
1047 			led.led_ctl_active = DCL_CNTRL_OFF;
1048 			/*
1049 			 * Not really off, but never set and no constant for
1050 			 * tri-state
1051 			 */
1052 			led.led_state = DCL_STATE_OFF;
1053 		}
1054 
1055 		status = ddi_copyout(&led, (void *)arg,
1056 		    sizeof (struct dc_led_ctl), mode);
1057 		if (status != 0)
1058 			return (EFAULT);
1059 
1060 		break;
1061 
1062 	case DEVCTL_NUM_LEDS:
1063 		led.led_number = SGPIO_DRV_CNT_VALUE;
1064 		led.led_ctl_active = 1;
1065 		led.led_type = 3;
1066 
1067 		/*
1068 		 * According to documentation, NVIDIA SGPIO is supposed to
1069 		 * support blinking, but it does not seem to work in practice.
1070 		 */
1071 		led.led_state = DCL_STATE_ON;
1072 
1073 		status = ddi_copyout(&led, (void *)arg,
1074 		    sizeof (struct dc_led_ctl), mode);
1075 		if (status != 0)
1076 			return (EFAULT);
1077 
1078 		break;
1079 
1080 	default:
1081 		return (EINVAL);
1082 	}
1083 
1084 	return (0);
1085 }
1086 #endif	/* SGPIO_SUPPORT */
1087 
1088 
1089 /*
1090  * Called by sata module to probe a port.  Port and device state
1091  * are not changed here... only reported back to the sata module.
1092  *
1093  * If probe confirms a device is present for the first time, it will
1094  * initiate a device reset, then probe will be called again and the
1095  * signature will be check.  If the signature is valid, data structures
1096  * will be initialized.
1097  */
1098 static int
1099 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1100 {
1101 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1102 	uint8_t cport = sd->satadev_addr.cport;
1103 	uint8_t pmport = sd->satadev_addr.pmport;
1104 	uint8_t qual = sd->satadev_addr.qual;
1105 	clock_t nv_lbolt = ddi_get_lbolt();
1106 	nv_port_t *nvp;
1107 
1108 	if (cport >= NV_MAX_PORTS(nvc)) {
1109 		sd->satadev_type = SATA_DTYPE_NONE;
1110 		sd->satadev_state = SATA_STATE_UNKNOWN;
1111 
1112 		return (SATA_FAILURE);
1113 	}
1114 
1115 	ASSERT(nvc->nvc_port != NULL);
1116 	nvp = &(nvc->nvc_port[cport]);
1117 	ASSERT(nvp != NULL);
1118 
1119 	NVLOG((NVDBG_PROBE, nvc, nvp,
1120 	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1121 	    "qual: 0x%x", cport, pmport, qual));
1122 
1123 	mutex_enter(&nvp->nvp_mutex);
1124 
1125 	/*
1126 	 * This check seems to be done in the SATA module.
1127 	 * It may not be required here
1128 	 */
1129 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1130 		nv_cmn_err(CE_WARN, nvc, nvp,
1131 		    "port inactive.  Use cfgadm to activate");
1132 		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1133 		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1134 		mutex_exit(&nvp->nvp_mutex);
1135 
1136 		return (SATA_FAILURE);
1137 	}
1138 
1139 	if (qual == SATA_ADDR_PMPORT) {
1140 		sd->satadev_type = SATA_DTYPE_NONE;
1141 		sd->satadev_state = SATA_STATE_UNKNOWN;
1142 		mutex_exit(&nvp->nvp_mutex);
1143 		nv_cmn_err(CE_WARN, nvc, nvp,
1144 		    "controller does not support port multiplier");
1145 
1146 		return (SATA_FAILURE);
1147 	}
1148 
1149 	sd->satadev_state = SATA_PSTATE_PWRON;
1150 
1151 	nv_copy_registers(nvp, sd, NULL);
1152 
1153 	/*
1154 	 * determine link status
1155 	 */
1156 	if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) {
1157 		uint8_t det;
1158 
1159 		/*
1160 		 * Reset will cause the link to go down for a short period of
1161 		 * time.  If link is lost for less than 2 seconds ignore it
1162 		 * so that the reset can progress.
1163 		 */
1164 		if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
1165 
1166 			if (nvp->nvp_link_lost_time == 0) {
1167 				nvp->nvp_link_lost_time = nv_lbolt;
1168 			}
1169 
1170 			if (TICK_TO_SEC(nv_lbolt -
1171 			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
1172 				NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp,
1173 				    "probe: intermittent link lost while"
1174 				    " resetting"));
1175 				/*
1176 				 * fake status of link so that probe continues
1177 				 */
1178 				SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1179 				    SSTATUS_IPM_ACTIVE);
1180 				SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1181 				    SSTATUS_DET_DEVPRE_PHYCOM);
1182 				sd->satadev_type = SATA_DTYPE_UNKNOWN;
1183 				mutex_exit(&nvp->nvp_mutex);
1184 
1185 				return (SATA_SUCCESS);
1186 			} else {
1187 				nvp->nvp_state &=
1188 				    ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
1189 			}
1190 		}
1191 
1192 		/*
1193 		 * no link, so tear down port and abort all active packets
1194 		 */
1195 
1196 		det = (sd->satadev_scr.sstatus & SSTATUS_DET) >>
1197 		    SSTATUS_DET_SHIFT;
1198 
1199 		switch (det) {
1200 		case SSTATUS_DET_NODEV:
1201 		case SSTATUS_DET_PHYOFFLINE:
1202 			sd->satadev_type = SATA_DTYPE_NONE;
1203 			break;
1204 		default:
1205 			sd->satadev_type = SATA_DTYPE_UNKNOWN;
1206 			break;
1207 		}
1208 
1209 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1210 		    "probe: link lost invoking nv_abort_active"));
1211 
1212 		(void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT);
1213 		nv_uninit_port(nvp);
1214 
1215 		mutex_exit(&nvp->nvp_mutex);
1216 
1217 		return (SATA_SUCCESS);
1218 	} else {
1219 		nvp->nvp_link_lost_time = 0;
1220 	}
1221 
1222 	/*
1223 	 * A device is present so clear hotremoved flag
1224 	 */
1225 	nvp->nvp_state &= ~NV_PORT_HOTREMOVED;
1226 
1227 #ifdef SGPIO_SUPPORT
1228 	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1229 	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1230 #endif
1231 
1232 	/*
1233 	 * If the signature was acquired previously there is no need to
1234 	 * do it again.
1235 	 */
1236 	if (nvp->nvp_signature != 0) {
1237 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1238 		    "probe: signature acquired previously"));
1239 		sd->satadev_type = nvp->nvp_type;
1240 		mutex_exit(&nvp->nvp_mutex);
1241 
1242 		return (SATA_SUCCESS);
1243 	}
1244 
1245 	/*
1246 	 * If NV_PORT_RESET is not set, this is the first time through
1247 	 * so perform reset and return.
1248 	 */
1249 	if ((nvp->nvp_state & NV_PORT_RESET) == 0) {
1250 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1251 		    "probe: first reset to get sig"));
1252 		nvp->nvp_state |= NV_PORT_RESET_PROBE;
1253 		nv_reset(nvp);
1254 		sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1255 		nvp->nvp_probe_time = nv_lbolt;
1256 		mutex_exit(&nvp->nvp_mutex);
1257 
1258 		return (SATA_SUCCESS);
1259 	}
1260 
1261 	/*
1262 	 * Reset was done previously.  see if the signature is
1263 	 * available.
1264 	 */
1265 	nv_read_signature(nvp);
1266 	sd->satadev_type = nvp->nvp_type;
1267 
1268 	/*
1269 	 * Some drives may require additional resets to get a
1270 	 * valid signature.  If a drive was not just powered up, the signature
1271 	 * should arrive within half a second of reset.  Therefore if more
1272 	 * than 5 seconds has elapsed while waiting for a signature, reset
1273 	 * again.  These extra resets do not appear to create problems when
1274 	 * the drive is spinning up for more than this reset period.
1275 	 */
1276 	if (nvp->nvp_signature == 0) {
1277 		if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) {
1278 			NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset"
1279 			    " during signature acquisition"));
1280 			nv_reset(nvp);
1281 		}
1282 
1283 		mutex_exit(&nvp->nvp_mutex);
1284 
1285 		return (SATA_SUCCESS);
1286 	}
1287 
1288 	NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms",
1289 	    TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time)));
1290 
1291 	/*
1292 	 * nv_sata only deals with ATA disks and ATAPI CD/DVDs so far.  If
1293 	 * it is not either of those, then just return.
1294 	 */
1295 	if ((nvp->nvp_type != SATA_DTYPE_ATADISK) &&
1296 	    (nvp->nvp_type != SATA_DTYPE_ATAPICD)) {
1297 		NVLOG((NVDBG_PROBE, nvc, nvp, "Driver currently handles only"
1298 		    " disks/CDs/DVDs.  Signature acquired was %X",
1299 		    nvp->nvp_signature));
1300 		mutex_exit(&nvp->nvp_mutex);
1301 
1302 		return (SATA_SUCCESS);
1303 	}
1304 
1305 	/*
1306 	 * make sure structures are initialized
1307 	 */
1308 	if (nv_init_port(nvp) == NV_SUCCESS) {
1309 		NVLOG((NVDBG_PROBE, nvc, nvp,
1310 		    "device detected and set up at port %d", cport));
1311 		mutex_exit(&nvp->nvp_mutex);
1312 
1313 		return (SATA_SUCCESS);
1314 	} else {
1315 		nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data "
1316 		    "structures for port %d", cport);
1317 		mutex_exit(&nvp->nvp_mutex);
1318 
1319 		return (SATA_FAILURE);
1320 	}
1321 	/*NOTREACHED*/
1322 }
1323 
1324 
1325 /*
1326  * Called by sata module to start a new command.
1327  */
1328 static int
1329 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1330 {
1331 	int cport = spkt->satapkt_device.satadev_addr.cport;
1332 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1333 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1334 	int ret;
1335 
1336 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1337 	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1338 
1339 	mutex_enter(&nvp->nvp_mutex);
1340 
1341 	/*
1342 	 * hotremoved is an intermediate state where the link was lost,
1343 	 * but the hotplug event has not yet been processed by the sata
1344 	 * module.  Fail the request.
1345 	 */
1346 	if (nvp->nvp_state & NV_PORT_HOTREMOVED) {
1347 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1348 		spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN;
1349 		NVLOG((NVDBG_ERRS, nvc, nvp,
1350 		    "nv_sata_start: NV_PORT_HOTREMOVED"));
1351 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1352 		mutex_exit(&nvp->nvp_mutex);
1353 
1354 		return (SATA_TRAN_PORT_ERROR);
1355 	}
1356 
1357 	if (nvp->nvp_state & NV_PORT_RESET) {
1358 		NVLOG((NVDBG_ERRS, nvc, nvp,
1359 		    "still waiting for reset completion"));
1360 		spkt->satapkt_reason = SATA_PKT_BUSY;
1361 		mutex_exit(&nvp->nvp_mutex);
1362 
1363 		/*
1364 		 * If in panic, timeouts do not occur, so fake one
1365 		 * so that the signature can be acquired to complete
1366 		 * the reset handling.
1367 		 */
1368 		if (ddi_in_panic()) {
1369 			nv_timeout(nvp);
1370 		}
1371 
1372 		return (SATA_TRAN_BUSY);
1373 	}
1374 
1375 	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1376 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1377 		NVLOG((NVDBG_ERRS, nvc, nvp,
1378 		    "nv_sata_start: SATA_DTYPE_NONE"));
1379 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1380 		mutex_exit(&nvp->nvp_mutex);
1381 
1382 		return (SATA_TRAN_PORT_ERROR);
1383 	}
1384 
1385 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1386 		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1387 		nv_cmn_err(CE_WARN, nvc, nvp,
1388 		    "port multipliers not supported by controller");
1389 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1390 		mutex_exit(&nvp->nvp_mutex);
1391 
1392 		return (SATA_TRAN_CMD_UNSUPPORTED);
1393 	}
1394 
1395 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1396 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1397 		NVLOG((NVDBG_ERRS, nvc, nvp,
1398 		    "nv_sata_start: port not yet initialized"));
1399 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1400 		mutex_exit(&nvp->nvp_mutex);
1401 
1402 		return (SATA_TRAN_PORT_ERROR);
1403 	}
1404 
1405 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1406 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1407 		NVLOG((NVDBG_ERRS, nvc, nvp,
1408 		    "nv_sata_start: NV_PORT_INACTIVE"));
1409 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1410 		mutex_exit(&nvp->nvp_mutex);
1411 
1412 		return (SATA_TRAN_PORT_ERROR);
1413 	}
1414 
1415 	if (nvp->nvp_state & NV_PORT_FAILED) {
1416 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1417 		NVLOG((NVDBG_ERRS, nvc, nvp,
1418 		    "nv_sata_start: NV_PORT_FAILED state"));
1419 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1420 		mutex_exit(&nvp->nvp_mutex);
1421 
1422 		return (SATA_TRAN_PORT_ERROR);
1423 	}
1424 
1425 	/*
1426 	 * after a device reset, and then when sata module restore processing
1427 	 * is complete, the sata module will set sata_clear_dev_reset which
1428 	 * indicates that restore processing has completed and normal
1429 	 * non-restore related commands should be processed.
1430 	 */
1431 	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1432 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1433 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1434 		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1435 	}
1436 
1437 	/*
1438 	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1439 	 * only allow commands which restore device state.  The sata module
1440 	 * marks such commands with with sata_ignore_dev_reset.
1441 	 *
1442 	 * during coredump, nv_reset is called and but then the restore
1443 	 * doesn't happen.  For now, workaround by ignoring the wait for
1444 	 * restore if the system is panicing.
1445 	 */
1446 	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1447 	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1448 	    (ddi_in_panic() == 0)) {
1449 		spkt->satapkt_reason = SATA_PKT_BUSY;
1450 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1451 		    "nv_sata_start: waiting for restore "));
1452 		mutex_exit(&nvp->nvp_mutex);
1453 
1454 		return (SATA_TRAN_BUSY);
1455 	}
1456 
1457 	if (nvp->nvp_state & NV_PORT_ABORTING) {
1458 		spkt->satapkt_reason = SATA_PKT_BUSY;
1459 		NVLOG((NVDBG_ERRS, nvc, nvp,
1460 		    "nv_sata_start: NV_PORT_ABORTING"));
1461 		mutex_exit(&nvp->nvp_mutex);
1462 
1463 		return (SATA_TRAN_BUSY);
1464 	}
1465 
1466 	if (spkt->satapkt_op_mode &
1467 	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1468 
1469 		ret = nv_start_sync(nvp, spkt);
1470 
1471 		mutex_exit(&nvp->nvp_mutex);
1472 
1473 		return (ret);
1474 	}
1475 
1476 	/*
1477 	 * start command asynchronous command
1478 	 */
1479 	ret = nv_start_async(nvp, spkt);
1480 
1481 	mutex_exit(&nvp->nvp_mutex);
1482 
1483 	return (ret);
1484 }
1485 
1486 
1487 /*
1488  * SATA_OPMODE_POLLING implies the driver is in a
1489  * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1490  * If only SATA_OPMODE_SYNCH is set, the driver can use
1491  * interrupts and sleep wait on a cv.
1492  *
1493  * If SATA_OPMODE_POLLING is set, the driver can't use
1494  * interrupts and must busy wait and simulate the
1495  * interrupts by waiting for BSY to be cleared.
1496  *
1497  * Synchronous mode has to return BUSY if there are
1498  * any other commands already on the drive.
1499  */
1500 static int
1501 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1502 {
1503 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1504 	int ret;
1505 
1506 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1507 
1508 	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1509 		spkt->satapkt_reason = SATA_PKT_BUSY;
1510 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1511 		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1512 		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1513 		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1514 		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1515 
1516 		return (SATA_TRAN_BUSY);
1517 	}
1518 
1519 	/*
1520 	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1521 	 */
1522 	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1523 	    servicing_interrupt()) {
1524 		spkt->satapkt_reason = SATA_PKT_BUSY;
1525 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1526 		    "SYNC mode not allowed during interrupt"));
1527 
1528 		return (SATA_TRAN_BUSY);
1529 
1530 	}
1531 
1532 	/*
1533 	 * disable interrupt generation if in polled mode
1534 	 */
1535 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1536 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1537 	}
1538 
1539 	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1540 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1541 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1542 		}
1543 
1544 		return (ret);
1545 	}
1546 
1547 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1548 		mutex_exit(&nvp->nvp_mutex);
1549 		ret = nv_poll_wait(nvp, spkt);
1550 		mutex_enter(&nvp->nvp_mutex);
1551 
1552 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1553 
1554 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1555 		    " done % reason %d", ret));
1556 
1557 		return (ret);
1558 	}
1559 
1560 	/*
1561 	 * non-polling synchronous mode handling.  The interrupt will signal
1562 	 * when the IO is completed.
1563 	 */
1564 	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1565 
1566 	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1567 
1568 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1569 	}
1570 
1571 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1572 	    " done % reason %d", spkt->satapkt_reason));
1573 
1574 	return (SATA_TRAN_ACCEPTED);
1575 }
1576 
1577 
1578 static int
1579 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1580 {
1581 	int ret;
1582 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1583 #if ! defined(__lock_lint)
1584 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1585 #endif
1586 
1587 	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1588 
1589 	for (;;) {
1590 
1591 		NV_DELAY_NSEC(400);
1592 
1593 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1594 		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1595 		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1596 			mutex_enter(&nvp->nvp_mutex);
1597 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1598 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1599 			nv_reset(nvp);
1600 			nv_complete_io(nvp, spkt, 0);
1601 			mutex_exit(&nvp->nvp_mutex);
1602 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1603 			    "SATA_STATUS_BSY"));
1604 
1605 			return (SATA_TRAN_ACCEPTED);
1606 		}
1607 
1608 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1609 
1610 		/*
1611 		 * Simulate interrupt.
1612 		 */
1613 		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1614 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1615 
1616 		if (ret != DDI_INTR_CLAIMED) {
1617 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1618 			    " unclaimed -- resetting"));
1619 			mutex_enter(&nvp->nvp_mutex);
1620 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1621 			nv_reset(nvp);
1622 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1623 			nv_complete_io(nvp, spkt, 0);
1624 			mutex_exit(&nvp->nvp_mutex);
1625 
1626 			return (SATA_TRAN_ACCEPTED);
1627 		}
1628 
1629 #if ! defined(__lock_lint)
1630 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1631 			/*
1632 			 * packet is complete
1633 			 */
1634 			return (SATA_TRAN_ACCEPTED);
1635 		}
1636 #endif
1637 	}
1638 	/*NOTREACHED*/
1639 }
1640 
1641 
1642 /*
1643  * Called by sata module to abort outstanding packets.
1644  */
1645 /*ARGSUSED*/
1646 static int
1647 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1648 {
1649 	int cport = spkt->satapkt_device.satadev_addr.cport;
1650 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1651 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1652 	int c_a, ret;
1653 
1654 	ASSERT(cport < NV_MAX_PORTS(nvc));
1655 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1656 
1657 	mutex_enter(&nvp->nvp_mutex);
1658 
1659 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1660 		mutex_exit(&nvp->nvp_mutex);
1661 		nv_cmn_err(CE_WARN, nvc, nvp,
1662 		    "abort request failed: port inactive");
1663 
1664 		return (SATA_FAILURE);
1665 	}
1666 
1667 	/*
1668 	 * spkt == NULL then abort all commands
1669 	 */
1670 	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED);
1671 
1672 	if (c_a) {
1673 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1674 		    "packets aborted running=%d", c_a));
1675 		ret = SATA_SUCCESS;
1676 	} else {
1677 		if (spkt == NULL) {
1678 			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1679 		} else {
1680 			NVLOG((NVDBG_ENTRY, nvc, nvp,
1681 			    "can't find spkt to abort"));
1682 		}
1683 		ret = SATA_FAILURE;
1684 	}
1685 
1686 	mutex_exit(&nvp->nvp_mutex);
1687 
1688 	return (ret);
1689 }
1690 
1691 
1692 /*
1693  * if spkt == NULL abort all pkts running, otherwise
1694  * abort the requested packet.  must be called with nv_mutex
1695  * held and returns with it held.  Not NCQ aware.
1696  */
1697 static int
1698 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason)
1699 {
1700 	int aborted = 0, i, reset_once = B_FALSE;
1701 	struct nv_slot *nv_slotp;
1702 	sata_pkt_t *spkt_slot;
1703 
1704 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1705 
1706 	/*
1707 	 * return if the port is not configured
1708 	 */
1709 	if (nvp->nvp_slot == NULL) {
1710 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1711 		    "nv_abort_active: not configured so returning"));
1712 
1713 		return (0);
1714 	}
1715 
1716 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1717 
1718 	nvp->nvp_state |= NV_PORT_ABORTING;
1719 
1720 	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1721 
1722 		nv_slotp = &(nvp->nvp_slot[i]);
1723 		spkt_slot = nv_slotp->nvslot_spkt;
1724 
1725 		/*
1726 		 * skip if not active command in slot
1727 		 */
1728 		if (spkt_slot == NULL) {
1729 			continue;
1730 		}
1731 
1732 		/*
1733 		 * if a specific packet was requested, skip if
1734 		 * this is not a match
1735 		 */
1736 		if ((spkt != NULL) && (spkt != spkt_slot)) {
1737 			continue;
1738 		}
1739 
1740 		/*
1741 		 * stop the hardware.  This could need reworking
1742 		 * when NCQ is enabled in the driver.
1743 		 */
1744 		if (reset_once == B_FALSE) {
1745 			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1746 
1747 			/*
1748 			 * stop DMA engine
1749 			 */
1750 			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1751 
1752 			nv_reset(nvp);
1753 			reset_once = B_TRUE;
1754 		}
1755 
1756 		spkt_slot->satapkt_reason = abort_reason;
1757 		nv_complete_io(nvp, spkt_slot, i);
1758 		aborted++;
1759 	}
1760 
1761 	nvp->nvp_state &= ~NV_PORT_ABORTING;
1762 
1763 	return (aborted);
1764 }
1765 
1766 
1767 /*
1768  * Called by sata module to reset a port, device, or the controller.
1769  */
1770 static int
1771 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1772 {
1773 	int cport = sd->satadev_addr.cport;
1774 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1775 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1776 	int ret = SATA_SUCCESS;
1777 
1778 	ASSERT(cport < NV_MAX_PORTS(nvc));
1779 
1780 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1781 
1782 	mutex_enter(&nvp->nvp_mutex);
1783 
1784 	switch (sd->satadev_addr.qual) {
1785 
1786 	case SATA_ADDR_CPORT:
1787 		/*FALLTHROUGH*/
1788 	case SATA_ADDR_DCPORT:
1789 		nv_reset(nvp);
1790 		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1791 
1792 		break;
1793 	case SATA_ADDR_CNTRL:
1794 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1795 		    "nv_sata_reset: constroller reset not supported"));
1796 
1797 		break;
1798 	case SATA_ADDR_PMPORT:
1799 	case SATA_ADDR_DPMPORT:
1800 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1801 		    "nv_sata_reset: port multipliers not supported"));
1802 		/*FALLTHROUGH*/
1803 	default:
1804 		/*
1805 		 * unsupported case
1806 		 */
1807 		ret = SATA_FAILURE;
1808 		break;
1809 	}
1810 
1811 	if (ret == SATA_SUCCESS) {
1812 		/*
1813 		 * If the port is inactive, do a quiet reset and don't attempt
1814 		 * to wait for reset completion or do any post reset processing
1815 		 */
1816 		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1817 			nvp->nvp_state &= ~NV_PORT_RESET;
1818 			nvp->nvp_reset_time = 0;
1819 		}
1820 
1821 		/*
1822 		 * clear the port failed flag
1823 		 */
1824 		nvp->nvp_state &= ~NV_PORT_FAILED;
1825 	}
1826 
1827 	mutex_exit(&nvp->nvp_mutex);
1828 
1829 	return (ret);
1830 }
1831 
1832 
1833 /*
1834  * Sata entry point to handle port activation.  cfgadm -c connect
1835  */
1836 static int
1837 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1838 {
1839 	int cport = sd->satadev_addr.cport;
1840 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1841 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1842 
1843 	ASSERT(cport < NV_MAX_PORTS(nvc));
1844 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1845 
1846 	mutex_enter(&nvp->nvp_mutex);
1847 
1848 	sd->satadev_state = SATA_STATE_READY;
1849 
1850 	nv_copy_registers(nvp, sd, NULL);
1851 
1852 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1853 
1854 	nvp->nvp_state = 0;
1855 
1856 	mutex_exit(&nvp->nvp_mutex);
1857 
1858 	return (SATA_SUCCESS);
1859 }
1860 
1861 
1862 /*
1863  * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1864  */
1865 static int
1866 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1867 {
1868 	int cport = sd->satadev_addr.cport;
1869 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1870 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1871 
1872 	ASSERT(cport < NV_MAX_PORTS(nvc));
1873 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1874 
1875 	mutex_enter(&nvp->nvp_mutex);
1876 
1877 	(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1878 
1879 	/*
1880 	 * mark the device as inaccessible
1881 	 */
1882 	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1883 
1884 	/*
1885 	 * disable the interrupts on port
1886 	 */
1887 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1888 
1889 	nv_uninit_port(nvp);
1890 
1891 	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1892 	nv_copy_registers(nvp, sd, NULL);
1893 
1894 	mutex_exit(&nvp->nvp_mutex);
1895 
1896 	return (SATA_SUCCESS);
1897 }
1898 
1899 
1900 /*
1901  * find an empty slot in the driver's queue, increment counters,
1902  * and then invoke the appropriate PIO or DMA start routine.
1903  */
1904 static int
1905 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1906 {
1907 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1908 	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1909 	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1910 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1911 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1912 	nv_slot_t *nv_slotp;
1913 	boolean_t dma_cmd;
1914 
1915 	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1916 	    sata_cmdp->satacmd_cmd_reg));
1917 
1918 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1919 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1920 		nvp->nvp_ncq_run++;
1921 		/*
1922 		 * search for an empty NCQ slot.  by the time, it's already
1923 		 * been determined by the caller that there is room on the
1924 		 * queue.
1925 		 */
1926 		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1927 		    on_bit <<= 1) {
1928 			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1929 				break;
1930 			}
1931 		}
1932 
1933 		/*
1934 		 * the first empty slot found, should not exceed the queue
1935 		 * depth of the drive.  if it does it's an error.
1936 		 */
1937 		ASSERT(slot != nvp->nvp_queue_depth);
1938 
1939 		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1940 		    nvp->nvp_sactive);
1941 		ASSERT((sactive & on_bit) == 0);
1942 		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1943 		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1944 		    on_bit));
1945 		nvp->nvp_sactive_cache |= on_bit;
1946 
1947 		ncq = NVSLOT_NCQ;
1948 
1949 	} else {
1950 		nvp->nvp_non_ncq_run++;
1951 		slot = 0;
1952 	}
1953 
1954 	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1955 
1956 	ASSERT(nv_slotp->nvslot_spkt == NULL);
1957 
1958 	nv_slotp->nvslot_spkt = spkt;
1959 	nv_slotp->nvslot_flags = ncq;
1960 
1961 	/*
1962 	 * the sata module doesn't indicate which commands utilize the
1963 	 * DMA engine, so find out using this switch table.
1964 	 */
1965 	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1966 	case SATAC_READ_DMA_EXT:
1967 	case SATAC_WRITE_DMA_EXT:
1968 	case SATAC_WRITE_DMA:
1969 	case SATAC_READ_DMA:
1970 	case SATAC_READ_DMA_QUEUED:
1971 	case SATAC_READ_DMA_QUEUED_EXT:
1972 	case SATAC_WRITE_DMA_QUEUED:
1973 	case SATAC_WRITE_DMA_QUEUED_EXT:
1974 	case SATAC_READ_FPDMA_QUEUED:
1975 	case SATAC_WRITE_FPDMA_QUEUED:
1976 		dma_cmd = B_TRUE;
1977 		break;
1978 	default:
1979 		dma_cmd = B_FALSE;
1980 	}
1981 
1982 	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1983 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1984 		nv_slotp->nvslot_start = nv_start_dma;
1985 		nv_slotp->nvslot_intr = nv_intr_dma;
1986 	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1987 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "packet command"));
1988 		nv_slotp->nvslot_start = nv_start_pkt_pio;
1989 		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1990 		if ((direction == SATA_DIR_READ) ||
1991 		    (direction == SATA_DIR_WRITE)) {
1992 			nv_slotp->nvslot_byte_count =
1993 			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1994 			nv_slotp->nvslot_v_addr =
1995 			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1996 			/*
1997 			 * Freeing DMA resources allocated by the framework
1998 			 * now to avoid buffer overwrite (dma sync) problems
1999 			 * when the buffer is released at command completion.
2000 			 * Primarily an issue on systems with more than
2001 			 * 4GB of memory.
2002 			 */
2003 			sata_free_dma_resources(spkt);
2004 		}
2005 	} else if (direction == SATA_DIR_NODATA_XFER) {
2006 		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
2007 		nv_slotp->nvslot_start = nv_start_nodata;
2008 		nv_slotp->nvslot_intr = nv_intr_nodata;
2009 	} else if (direction == SATA_DIR_READ) {
2010 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
2011 		nv_slotp->nvslot_start = nv_start_pio_in;
2012 		nv_slotp->nvslot_intr = nv_intr_pio_in;
2013 		nv_slotp->nvslot_byte_count =
2014 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2015 		nv_slotp->nvslot_v_addr =
2016 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2017 		/*
2018 		 * Freeing DMA resources allocated by the framework now to
2019 		 * avoid buffer overwrite (dma sync) problems when the buffer
2020 		 * is released at command completion.  This is not an issue
2021 		 * for write because write does not update the buffer.
2022 		 * Primarily an issue on systems with more than 4GB of memory.
2023 		 */
2024 		sata_free_dma_resources(spkt);
2025 	} else if (direction == SATA_DIR_WRITE) {
2026 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
2027 		nv_slotp->nvslot_start = nv_start_pio_out;
2028 		nv_slotp->nvslot_intr = nv_intr_pio_out;
2029 		nv_slotp->nvslot_byte_count =
2030 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2031 		nv_slotp->nvslot_v_addr =
2032 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2033 	} else {
2034 		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2035 		    " %d cookies %d cmd %x",
2036 		    sata_cmdp->satacmd_flags.sata_data_direction,
2037 		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2038 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2039 		ret = SATA_TRAN_CMD_UNSUPPORTED;
2040 
2041 		goto fail;
2042 	}
2043 
2044 	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2045 	    SATA_TRAN_ACCEPTED) {
2046 #ifdef SGPIO_SUPPORT
2047 		nv_sgp_drive_active(nvp->nvp_ctlp,
2048 		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2049 #endif
2050 		nv_slotp->nvslot_stime = ddi_get_lbolt();
2051 
2052 		/*
2053 		 * start timer if it's not already running and this packet
2054 		 * is not requesting polled mode.
2055 		 */
2056 		if ((nvp->nvp_timeout_id == 0) &&
2057 		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2058 			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2059 			    drv_usectohz(NV_ONE_SEC));
2060 		}
2061 
2062 		return (SATA_TRAN_ACCEPTED);
2063 	}
2064 
2065 	fail:
2066 
2067 	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2068 
2069 	if (ncq == NVSLOT_NCQ) {
2070 		nvp->nvp_ncq_run--;
2071 		nvp->nvp_sactive_cache &= ~on_bit;
2072 	} else {
2073 		nvp->nvp_non_ncq_run--;
2074 	}
2075 	nv_slotp->nvslot_spkt = NULL;
2076 	nv_slotp->nvslot_flags = 0;
2077 
2078 	return (ret);
2079 }
2080 
2081 
2082 /*
2083  * Check if the signature is ready and if non-zero translate
2084  * it into a solaris sata defined type.
2085  */
2086 static void
2087 nv_read_signature(nv_port_t *nvp)
2088 {
2089 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2090 
2091 	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2092 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2093 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2094 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2095 
2096 	switch (nvp->nvp_signature) {
2097 
2098 	case NV_SIG_DISK:
2099 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
2100 		nvp->nvp_type = SATA_DTYPE_ATADISK;
2101 		break;
2102 	case NV_SIG_ATAPI:
2103 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2104 		    "drive is an optical device"));
2105 		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2106 		break;
2107 	case NV_SIG_PM:
2108 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2109 		    "device is a port multiplier"));
2110 		nvp->nvp_type = SATA_DTYPE_PMULT;
2111 		break;
2112 	case NV_SIG_NOTREADY:
2113 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2114 		    "signature not ready"));
2115 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2116 		break;
2117 	default:
2118 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2119 		    " recognized", nvp->nvp_signature);
2120 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2121 		break;
2122 	}
2123 
2124 	if (nvp->nvp_signature) {
2125 		nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
2126 	}
2127 }
2128 
2129 
2130 /*
2131  * Reset the port
2132  */
2133 static void
2134 nv_reset(nv_port_t *nvp)
2135 {
2136 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2137 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2138 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2139 	uint32_t sctrl;
2140 
2141 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()"));
2142 
2143 	ASSERT(mutex_owned(&nvp->nvp_mutex));
2144 
2145 	/*
2146 	 * clear signature registers
2147 	 */
2148 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
2149 	nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2150 	nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2151 	nv_put8(cmdhdl, nvp->nvp_count, 0);
2152 
2153 	nvp->nvp_signature = 0;
2154 	nvp->nvp_type = 0;
2155 	nvp->nvp_state |= NV_PORT_RESET;
2156 	nvp->nvp_reset_time = ddi_get_lbolt();
2157 	nvp->nvp_link_lost_time = 0;
2158 
2159 	/*
2160 	 * assert reset in PHY by writing a 1 to bit 0 scontrol
2161 	 */
2162 	sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2163 
2164 	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET);
2165 
2166 	/*
2167 	 * wait 1ms
2168 	 */
2169 	drv_usecwait(1000);
2170 
2171 	/*
2172 	 * de-assert reset in PHY
2173 	 */
2174 	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
2175 
2176 	/*
2177 	 * make sure timer is running
2178 	 */
2179 	if (nvp->nvp_timeout_id == 0) {
2180 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2181 		    drv_usectohz(NV_ONE_SEC));
2182 	}
2183 }
2184 
2185 
2186 /*
2187  * Initialize register handling specific to mcp55
2188  */
2189 /* ARGSUSED */
2190 static void
2191 mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2192 {
2193 	nv_port_t *nvp;
2194 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2195 	uint8_t off, port;
2196 
2197 	nvc->nvc_mcp55_ctl = (uint32_t *)(bar5 + MCP55_CTL);
2198 	nvc->nvc_mcp55_ncq = (uint32_t *)(bar5 + MCP55_NCQ);
2199 
2200 	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2201 		nvp = &(nvc->nvc_port[port]);
2202 		nvp->nvp_mcp55_int_status =
2203 		    (uint16_t *)(bar5 + MCP55_INT_STATUS + off);
2204 		nvp->nvp_mcp55_int_ctl =
2205 		    (uint16_t *)(bar5 + MCP55_INT_CTL + off);
2206 
2207 		/*
2208 		 * clear any previous interrupts asserted
2209 		 */
2210 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_status,
2211 		    MCP55_INT_CLEAR);
2212 
2213 		/*
2214 		 * These are the interrupts to accept for now.  The spec
2215 		 * says these are enable bits, but nvidia has indicated
2216 		 * these are masking bits.  Even though they may be masked
2217 		 * out to prevent asserting the main interrupt, they can
2218 		 * still be asserted while reading the interrupt status
2219 		 * register, so that needs to be considered in the interrupt
2220 		 * handler.
2221 		 */
2222 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_ctl,
2223 		    ~(MCP55_INT_IGNORE));
2224 	}
2225 
2226 	/*
2227 	 * Allow the driver to program the BM on the first command instead
2228 	 * of waiting for an interrupt.
2229 	 */
2230 #ifdef NCQ
2231 	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2232 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq, flags);
2233 	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2234 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ctl, flags);
2235 #endif
2236 
2237 
2238 #if 0
2239 	/*
2240 	 * This caused problems on some but not all mcp55 based systems.
2241 	 * DMA writes would never complete.  This happens even on small
2242 	 * mem systems, and only setting NV_40BIT_PRD below and not
2243 	 * buffer_dma_attr.dma_attr_addr_hi, so it seems to be a hardware
2244 	 * issue that needs further investigation.
2245 	 */
2246 
2247 	/*
2248 	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2249 	 * Enable DMA to take advantage of that.
2250 	 *
2251 	 */
2252 	if (nvc->nvc_revid >= 0xa3) {
2253 		uint32_t reg32;
2254 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev id is %X and"
2255 		    " is capable of 40-bit addressing", nvc->nvc_revid));
2256 		buffer_dma_attr.dma_attr_addr_hi = 0xffffffffffull;
2257 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2258 		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2259 		    reg32 |NV_40BIT_PRD);
2260 	} else {
2261 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev is %X and is "
2262 		    "not capable of 40-bit addressing", nvc->nvc_revid));
2263 	}
2264 #endif
2265 
2266 }
2267 
2268 
2269 /*
2270  * Initialize register handling specific to mcp04
2271  */
2272 static void
2273 mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2274 {
2275 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2276 	uint32_t reg32;
2277 	uint16_t reg16;
2278 	nv_port_t *nvp;
2279 	int j;
2280 
2281 	/*
2282 	 * delay hotplug interrupts until PHYRDY.
2283 	 */
2284 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2285 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2286 	    reg32 | MCP04_CFG_DELAY_HOTPLUG_INTR);
2287 
2288 	/*
2289 	 * enable hot plug interrupts for channel x and y
2290 	 */
2291 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2292 	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2293 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2294 	    NV_HIRQ_EN | reg16);
2295 
2296 
2297 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2298 	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2299 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2300 	    NV_HIRQ_EN | reg16);
2301 
2302 	nvc->nvc_mcp04_int_status = (uint8_t *)(bar5 + MCP04_SATA_INT_STATUS);
2303 
2304 	/*
2305 	 * clear any existing interrupt pending then enable
2306 	 */
2307 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2308 		nvp = &(nvc->nvc_port[j]);
2309 		mutex_enter(&nvp->nvp_mutex);
2310 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2311 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2312 		mutex_exit(&nvp->nvp_mutex);
2313 	}
2314 }
2315 
2316 
2317 /*
2318  * Initialize the controller and set up driver data structures.
2319  * determine if ck804 or mcp55 class.
2320  */
2321 static int
2322 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2323 {
2324 	struct sata_hba_tran stran;
2325 	nv_port_t *nvp;
2326 	int j, ck804 = B_TRUE;
2327 	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2328 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2329 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2330 	uint32_t reg32;
2331 	uint8_t reg8, reg8_save;
2332 
2333 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2334 
2335 	/*
2336 	 * Need to set bit 2 to 1 at config offset 0x50
2337 	 * to enable access to the bar5 registers.
2338 	 */
2339 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2340 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2341 	    reg32 | NV_BAR5_SPACE_EN);
2342 
2343 	/*
2344 	 * Determine if this is ck804 or mcp55.  ck804 will map in the
2345 	 * task file registers into bar5 while mcp55 won't.  The offset of
2346 	 * the task file registers in mcp55's space is unused, so it will
2347 	 * return zero.  So check one of the task file registers to see if it is
2348 	 * writable and reads back what was written.  If it's mcp55 it will
2349 	 * return back 0xff whereas ck804 will return the value written.
2350 	 */
2351 	reg8_save = nv_get8(bar5_hdl,
2352 	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2353 
2354 
2355 	for (j = 1; j < 3; j++) {
2356 
2357 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2358 		reg8 = nv_get8(bar5_hdl,
2359 		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2360 
2361 		if (reg8 != j) {
2362 			ck804 = B_FALSE;
2363 			break;
2364 		}
2365 	}
2366 
2367 	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2368 
2369 	if (ck804 == B_TRUE) {
2370 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2371 		nvc->nvc_interrupt = mcp04_intr;
2372 		nvc->nvc_reg_init = mcp04_reg_init;
2373 		nvc->nvc_set_intr = mcp04_set_intr;
2374 	} else {
2375 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP55"));
2376 		nvc->nvc_interrupt = mcp55_intr;
2377 		nvc->nvc_reg_init = mcp55_reg_init;
2378 		nvc->nvc_set_intr = mcp55_set_intr;
2379 	}
2380 
2381 
2382 	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV_2;
2383 	stran.sata_tran_hba_dip = nvc->nvc_dip;
2384 	stran.sata_tran_hba_dma_attr = &buffer_dma_attr;
2385 	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2386 	stran.sata_tran_hba_features_support =
2387 	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2388 	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2389 	stran.sata_tran_probe_port = nv_sata_probe;
2390 	stran.sata_tran_start = nv_sata_start;
2391 	stran.sata_tran_abort = nv_sata_abort;
2392 	stran.sata_tran_reset_dport = nv_sata_reset;
2393 	stran.sata_tran_selftest = NULL;
2394 	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2395 	stran.sata_tran_pwrmgt_ops = NULL;
2396 	stran.sata_tran_ioctl = NULL;
2397 	nvc->nvc_sata_hba_tran = stran;
2398 
2399 	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2400 	    KM_SLEEP);
2401 
2402 	/*
2403 	 * initialize registers common to all chipsets
2404 	 */
2405 	nv_common_reg_init(nvc);
2406 
2407 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2408 		nvp = &(nvc->nvc_port[j]);
2409 
2410 		cmd_addr = nvp->nvp_cmd_addr;
2411 		ctl_addr = nvp->nvp_ctl_addr;
2412 		bm_addr = nvp->nvp_bm_addr;
2413 
2414 		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2415 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2416 
2417 		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2418 
2419 		nvp->nvp_data	= cmd_addr + NV_DATA;
2420 		nvp->nvp_error	= cmd_addr + NV_ERROR;
2421 		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2422 		nvp->nvp_count	= cmd_addr + NV_COUNT;
2423 		nvp->nvp_sect	= cmd_addr + NV_SECT;
2424 		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2425 		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2426 		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2427 		nvp->nvp_status	= cmd_addr + NV_STATUS;
2428 		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2429 		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2430 		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2431 
2432 		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2433 		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2434 		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2435 
2436 		nvp->nvp_state = 0;
2437 	}
2438 
2439 	/*
2440 	 * initialize register by calling chip specific reg initialization
2441 	 */
2442 	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2443 
2444 	return (NV_SUCCESS);
2445 }
2446 
2447 
2448 /*
2449  * Initialize data structures with enough slots to handle queuing, if
2450  * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2451  * NCQ support is built into the driver and enabled.  It might have been
2452  * better to derive the true size from the drive itself, but the sata
2453  * module only sends down that information on the first NCQ command,
2454  * which means possibly re-sizing the structures on an interrupt stack,
2455  * making error handling more messy.  The easy way is to just allocate
2456  * all 32 slots, which is what most drives support anyway.
2457  */
2458 static int
2459 nv_init_port(nv_port_t *nvp)
2460 {
2461 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2462 	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2463 	dev_info_t *dip = nvc->nvc_dip;
2464 	ddi_device_acc_attr_t dev_attr;
2465 	size_t buf_size;
2466 	ddi_dma_cookie_t cookie;
2467 	uint_t count;
2468 	int rc, i;
2469 
2470 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2471 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2472 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2473 
2474 	if (nvp->nvp_state & NV_PORT_INIT) {
2475 		NVLOG((NVDBG_INIT, nvc, nvp,
2476 		    "nv_init_port previously initialized"));
2477 
2478 		return (NV_SUCCESS);
2479 	} else {
2480 		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2481 	}
2482 
2483 	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2484 	    NV_QUEUE_SLOTS, KM_SLEEP);
2485 
2486 	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2487 	    NV_QUEUE_SLOTS, KM_SLEEP);
2488 
2489 	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2490 	    NV_QUEUE_SLOTS, KM_SLEEP);
2491 
2492 	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2493 	    NV_QUEUE_SLOTS, KM_SLEEP);
2494 
2495 	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2496 	    KM_SLEEP);
2497 
2498 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2499 
2500 		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2501 		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2502 
2503 		if (rc != DDI_SUCCESS) {
2504 			nv_uninit_port(nvp);
2505 
2506 			return (NV_FAILURE);
2507 		}
2508 
2509 		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2510 		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2511 		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2512 		    &(nvp->nvp_sg_acc_hdl[i]));
2513 
2514 		if (rc != DDI_SUCCESS) {
2515 			nv_uninit_port(nvp);
2516 
2517 			return (NV_FAILURE);
2518 		}
2519 
2520 		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2521 		    nvp->nvp_sg_addr[i], buf_size,
2522 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2523 		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2524 
2525 		if (rc != DDI_DMA_MAPPED) {
2526 			nv_uninit_port(nvp);
2527 
2528 			return (NV_FAILURE);
2529 		}
2530 
2531 		ASSERT(count == 1);
2532 		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2533 
2534 		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2535 
2536 		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2537 	}
2538 
2539 	/*
2540 	 * nvp_queue_depth represents the actual drive queue depth, not the
2541 	 * number of slots allocated in the structures (which may be more).
2542 	 * Actual queue depth is only learned after the first NCQ command, so
2543 	 * initialize it to 1 for now.
2544 	 */
2545 	nvp->nvp_queue_depth = 1;
2546 
2547 	nvp->nvp_state |= NV_PORT_INIT;
2548 
2549 	return (NV_SUCCESS);
2550 }
2551 
2552 
2553 /*
2554  * Free dynamically allocated structures for port.
2555  */
2556 static void
2557 nv_uninit_port(nv_port_t *nvp)
2558 {
2559 	int i;
2560 
2561 	/*
2562 	 * It is possible to reach here before a port has been initialized or
2563 	 * after it has already been uninitialized.  Just return in that case.
2564 	 */
2565 	if (nvp->nvp_slot == NULL) {
2566 
2567 		return;
2568 	}
2569 
2570 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2571 	    "nv_uninit_port uninitializing"));
2572 
2573 	nvp->nvp_type = SATA_DTYPE_NONE;
2574 
2575 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2576 		if (nvp->nvp_sg_paddr[i]) {
2577 			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2578 		}
2579 
2580 		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2581 			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2582 		}
2583 
2584 		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2585 			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2586 		}
2587 	}
2588 
2589 	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2590 	nvp->nvp_slot = NULL;
2591 
2592 	kmem_free(nvp->nvp_sg_dma_hdl,
2593 	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2594 	nvp->nvp_sg_dma_hdl = NULL;
2595 
2596 	kmem_free(nvp->nvp_sg_acc_hdl,
2597 	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2598 	nvp->nvp_sg_acc_hdl = NULL;
2599 
2600 	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2601 	nvp->nvp_sg_addr = NULL;
2602 
2603 	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2604 	nvp->nvp_sg_paddr = NULL;
2605 
2606 	nvp->nvp_state &= ~NV_PORT_INIT;
2607 	nvp->nvp_signature = 0;
2608 }
2609 
2610 
2611 /*
2612  * Cache register offsets and access handles to frequently accessed registers
2613  * which are common to either chipset.
2614  */
2615 static void
2616 nv_common_reg_init(nv_ctl_t *nvc)
2617 {
2618 	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2619 	uchar_t *bm_addr_offset, *sreg_offset;
2620 	uint8_t bar, port;
2621 	nv_port_t *nvp;
2622 
2623 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2624 		if (port == 0) {
2625 			bar = NV_BAR_0;
2626 			bm_addr_offset = 0;
2627 			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2628 		} else {
2629 			bar = NV_BAR_2;
2630 			bm_addr_offset = (uchar_t *)8;
2631 			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2632 		}
2633 
2634 		nvp = &(nvc->nvc_port[port]);
2635 		nvp->nvp_ctlp = nvc;
2636 		nvp->nvp_port_num = port;
2637 		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2638 
2639 		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2640 		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2641 		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2642 		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2643 		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2644 		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2645 		    (long)bm_addr_offset;
2646 
2647 		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2648 		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2649 		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2650 		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2651 	}
2652 }
2653 
2654 
2655 static void
2656 nv_uninit_ctl(nv_ctl_t *nvc)
2657 {
2658 	int port;
2659 	nv_port_t *nvp;
2660 
2661 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2662 
2663 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2664 		nvp = &(nvc->nvc_port[port]);
2665 		mutex_enter(&nvp->nvp_mutex);
2666 		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2667 		nv_uninit_port(nvp);
2668 		mutex_exit(&nvp->nvp_mutex);
2669 		mutex_destroy(&nvp->nvp_mutex);
2670 		cv_destroy(&nvp->nvp_poll_cv);
2671 	}
2672 
2673 	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2674 	nvc->nvc_port = NULL;
2675 }
2676 
2677 
2678 /*
2679  * mcp04 interrupt.  This is a wrapper around mcp04_intr_process so
2680  * that interrupts from other devices can be disregarded while dtracing.
2681  */
2682 /* ARGSUSED */
2683 static uint_t
2684 mcp04_intr(caddr_t arg1, caddr_t arg2)
2685 {
2686 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2687 	uint8_t intr_status;
2688 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2689 
2690 	intr_status = ddi_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2691 
2692 	if (intr_status == 0) {
2693 
2694 		return (DDI_INTR_UNCLAIMED);
2695 	}
2696 
2697 	mcp04_intr_process(nvc, intr_status);
2698 
2699 	return (DDI_INTR_CLAIMED);
2700 }
2701 
2702 
2703 /*
2704  * Main interrupt handler for ck804.  handles normal device
2705  * interrupts as well as port hot plug and remove interrupts.
2706  *
2707  */
2708 static void
2709 mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2710 {
2711 
2712 	int port, i;
2713 	nv_port_t *nvp;
2714 	nv_slot_t *nv_slotp;
2715 	uchar_t	status;
2716 	sata_pkt_t *spkt;
2717 	uint8_t bmstatus, clear_bits;
2718 	ddi_acc_handle_t bmhdl;
2719 	int nvcleared = 0;
2720 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2721 	uint32_t sstatus;
2722 	int port_mask_hot[] = {
2723 		MCP04_INT_PDEV_HOT, MCP04_INT_SDEV_HOT,
2724 	};
2725 	int port_mask_pm[] = {
2726 		MCP04_INT_PDEV_PM, MCP04_INT_SDEV_PM,
2727 	};
2728 
2729 	NVLOG((NVDBG_INTR, nvc, NULL,
2730 	    "mcp04_intr_process entered intr_status=%x", intr_status));
2731 
2732 	/*
2733 	 * For command completion interrupt, explicit clear is not required.
2734 	 * however, for the error cases explicit clear is performed.
2735 	 */
2736 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2737 
2738 		int port_mask[] = {MCP04_INT_PDEV_INT, MCP04_INT_SDEV_INT};
2739 
2740 		if ((port_mask[port] & intr_status) == 0) {
2741 			continue;
2742 		}
2743 
2744 		NVLOG((NVDBG_INTR, nvc, NULL,
2745 		    "mcp04_intr_process interrupt on port %d", port));
2746 
2747 		nvp = &(nvc->nvc_port[port]);
2748 
2749 		mutex_enter(&nvp->nvp_mutex);
2750 
2751 		/*
2752 		 * there was a corner case found where an interrupt
2753 		 * arrived before nvp_slot was set.  Should
2754 		 * probably should track down why that happens and try
2755 		 * to eliminate that source and then get rid of this
2756 		 * check.
2757 		 */
2758 		if (nvp->nvp_slot == NULL) {
2759 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2760 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2761 			    "received before initialization "
2762 			    "completed status=%x", status));
2763 			mutex_exit(&nvp->nvp_mutex);
2764 
2765 			/*
2766 			 * clear interrupt bits
2767 			 */
2768 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2769 			    port_mask[port]);
2770 
2771 			continue;
2772 		}
2773 
2774 		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
2775 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2776 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2777 			    " no command in progress status=%x", status));
2778 			mutex_exit(&nvp->nvp_mutex);
2779 
2780 			/*
2781 			 * clear interrupt bits
2782 			 */
2783 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2784 			    port_mask[port]);
2785 
2786 			continue;
2787 		}
2788 
2789 		bmhdl = nvp->nvp_bm_hdl;
2790 		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2791 
2792 		if (!(bmstatus & BMISX_IDEINTS)) {
2793 			mutex_exit(&nvp->nvp_mutex);
2794 
2795 			continue;
2796 		}
2797 
2798 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2799 
2800 		if (status & SATA_STATUS_BSY) {
2801 			mutex_exit(&nvp->nvp_mutex);
2802 
2803 			continue;
2804 		}
2805 
2806 		nv_slotp = &(nvp->nvp_slot[0]);
2807 
2808 		ASSERT(nv_slotp);
2809 
2810 		spkt = nv_slotp->nvslot_spkt;
2811 
2812 		if (spkt == NULL) {
2813 			mutex_exit(&nvp->nvp_mutex);
2814 
2815 			continue;
2816 		}
2817 
2818 		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2819 
2820 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2821 
2822 		/*
2823 		 * If there is no link cannot be certain about the completion
2824 		 * of the packet, so abort it.
2825 		 */
2826 		if (nv_check_link((&spkt->satapkt_device)->
2827 		    satadev_scr.sstatus) == B_FALSE) {
2828 
2829 			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2830 
2831 		} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2832 
2833 			nv_complete_io(nvp, spkt, 0);
2834 		}
2835 
2836 		mutex_exit(&nvp->nvp_mutex);
2837 	}
2838 
2839 	/*
2840 	 * mcp04 often doesn't correctly distinguish hot add/remove
2841 	 * interrupts.  Frequently both the ADD and the REMOVE bits
2842 	 * are asserted, whether it was a remove or add.  Use sstatus
2843 	 * to distinguish hot add from hot remove.
2844 	 */
2845 
2846 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2847 		clear_bits = 0;
2848 
2849 		nvp = &(nvc->nvc_port[port]);
2850 		mutex_enter(&nvp->nvp_mutex);
2851 
2852 		if ((port_mask_pm[port] & intr_status) != 0) {
2853 			clear_bits = port_mask_pm[port];
2854 			NVLOG((NVDBG_HOT, nvc, nvp,
2855 			    "clearing PM interrupt bit: %x",
2856 			    intr_status & port_mask_pm[port]));
2857 		}
2858 
2859 		if ((port_mask_hot[port] & intr_status) == 0) {
2860 			if (clear_bits != 0) {
2861 				goto clear;
2862 			} else {
2863 				mutex_exit(&nvp->nvp_mutex);
2864 				continue;
2865 			}
2866 		}
2867 
2868 		/*
2869 		 * reaching here means there was a hot add or remove.
2870 		 */
2871 		clear_bits |= port_mask_hot[port];
2872 
2873 		ASSERT(nvc->nvc_port[port].nvp_sstatus);
2874 
2875 		sstatus = nv_get32(bar5_hdl,
2876 		    nvc->nvc_port[port].nvp_sstatus);
2877 
2878 		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
2879 		    SSTATUS_DET_DEVPRE_PHYCOM) {
2880 			nv_report_add_remove(nvp, 0);
2881 		} else {
2882 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2883 		}
2884 	clear:
2885 		/*
2886 		 * clear interrupt bits.  explicit interrupt clear is
2887 		 * required for hotplug interrupts.
2888 		 */
2889 		nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status, clear_bits);
2890 
2891 		/*
2892 		 * make sure it's flushed and cleared.  If not try
2893 		 * again.  Sometimes it has been observed to not clear
2894 		 * on the first try.
2895 		 */
2896 		intr_status = nv_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2897 
2898 		/*
2899 		 * make 10 additional attempts to clear the interrupt
2900 		 */
2901 		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
2902 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
2903 			    "still not clear try=%d", intr_status,
2904 			    ++nvcleared));
2905 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2906 			    clear_bits);
2907 			intr_status = nv_get8(bar5_hdl,
2908 			    nvc->nvc_mcp04_int_status);
2909 		}
2910 
2911 		/*
2912 		 * if still not clear, log a message and disable the
2913 		 * port. highly unlikely that this path is taken, but it
2914 		 * gives protection against a wedged interrupt.
2915 		 */
2916 		if (intr_status & clear_bits) {
2917 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2918 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2919 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2920 			nvp->nvp_state |= NV_PORT_FAILED;
2921 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2922 			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
2923 			    "interrupt.  disabling port intr_status=%X",
2924 			    intr_status);
2925 		}
2926 
2927 		mutex_exit(&nvp->nvp_mutex);
2928 	}
2929 }
2930 
2931 
2932 /*
2933  * Interrupt handler for mcp55.  It is invoked by the wrapper for each port
2934  * on the controller, to handle completion and hot plug and remove events.
2935  *
2936  */
2937 static uint_t
2938 mcp55_intr_port(nv_port_t *nvp)
2939 {
2940 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2941 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2942 	uint8_t clear = 0, intr_cycles = 0;
2943 	int ret = DDI_INTR_UNCLAIMED;
2944 	uint16_t int_status;
2945 
2946 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered"));
2947 
2948 	for (;;) {
2949 		/*
2950 		 * read current interrupt status
2951 		 */
2952 		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_status);
2953 
2954 		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
2955 
2956 		/*
2957 		 * MCP55_INT_IGNORE interrupts will show up in the status,
2958 		 * but are masked out from causing an interrupt to be generated
2959 		 * to the processor.  Ignore them here by masking them out.
2960 		 */
2961 		int_status &= ~(MCP55_INT_IGNORE);
2962 
2963 		/*
2964 		 * exit the loop when no more interrupts to process
2965 		 */
2966 		if (int_status == 0) {
2967 
2968 			break;
2969 		}
2970 
2971 		if (int_status & MCP55_INT_COMPLETE) {
2972 			NVLOG((NVDBG_INTR, nvc, nvp,
2973 			    "mcp55_packet_complete_intr"));
2974 			/*
2975 			 * since int_status was set, return DDI_INTR_CLAIMED
2976 			 * from the DDI's perspective even though the packet
2977 			 * completion may not have succeeded.  If it fails,
2978 			 * need to manually clear the interrupt, otherwise
2979 			 * clearing is implicit.
2980 			 */
2981 			ret = DDI_INTR_CLAIMED;
2982 			if (mcp55_packet_complete_intr(nvc, nvp) ==
2983 			    NV_FAILURE) {
2984 				clear = MCP55_INT_COMPLETE;
2985 			} else {
2986 				intr_cycles = 0;
2987 			}
2988 		}
2989 
2990 		if (int_status & MCP55_INT_DMA_SETUP) {
2991 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr"));
2992 
2993 			/*
2994 			 * Needs to be cleared before starting the BM, so do it
2995 			 * now.  make sure this is still working.
2996 			 */
2997 			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status,
2998 			    MCP55_INT_DMA_SETUP);
2999 #ifdef NCQ
3000 			ret = mcp55_dma_setup_intr(nvc, nvp);
3001 #endif
3002 		}
3003 
3004 		if (int_status & MCP55_INT_REM) {
3005 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55 device removed"));
3006 			clear = MCP55_INT_REM;
3007 			ret = DDI_INTR_CLAIMED;
3008 
3009 			mutex_enter(&nvp->nvp_mutex);
3010 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3011 			mutex_exit(&nvp->nvp_mutex);
3012 
3013 		} else if (int_status & MCP55_INT_ADD) {
3014 			NVLOG((NVDBG_HOT, nvc, nvp, "mcp55 device added"));
3015 			clear = MCP55_INT_ADD;
3016 			ret = DDI_INTR_CLAIMED;
3017 
3018 			mutex_enter(&nvp->nvp_mutex);
3019 			nv_report_add_remove(nvp, 0);
3020 			mutex_exit(&nvp->nvp_mutex);
3021 		}
3022 
3023 		if (clear) {
3024 			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, clear);
3025 			clear = 0;
3026 		}
3027 
3028 		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3029 			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3030 			    "processing.  Disabling port int_status=%X"
3031 			    " clear=%X", int_status, clear);
3032 			mutex_enter(&nvp->nvp_mutex);
3033 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3034 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3035 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3036 			nvp->nvp_state |= NV_PORT_FAILED;
3037 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
3038 			mutex_exit(&nvp->nvp_mutex);
3039 		}
3040 	}
3041 
3042 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret));
3043 
3044 	return (ret);
3045 }
3046 
3047 
3048 /* ARGSUSED */
3049 static uint_t
3050 mcp55_intr(caddr_t arg1, caddr_t arg2)
3051 {
3052 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3053 	int ret;
3054 
3055 	ret = mcp55_intr_port(&(nvc->nvc_port[0]));
3056 	ret |= mcp55_intr_port(&(nvc->nvc_port[1]));
3057 
3058 	return (ret);
3059 }
3060 
3061 
3062 #ifdef NCQ
3063 /*
3064  * with software driven NCQ on mcp55, an interrupt occurs right
3065  * before the drive is ready to do a DMA transfer.  At this point,
3066  * the PRD table needs to be programmed and the DMA engine enabled
3067  * and ready to go.
3068  *
3069  * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3070  * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3071  * -- clear bit 0 of master command reg
3072  * -- program PRD
3073  * -- clear the interrupt status bit for the DMA Setup FIS
3074  * -- set bit 0 of the bus master command register
3075  */
3076 static int
3077 mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3078 {
3079 	int slot;
3080 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3081 	uint8_t bmicx;
3082 	int port = nvp->nvp_port_num;
3083 	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3084 	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3085 
3086 	nv_cmn_err(CE_PANIC, nvc, nvp,
3087 	    "this is should not be executed at all until NCQ");
3088 
3089 	mutex_enter(&nvp->nvp_mutex);
3090 
3091 	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq);
3092 
3093 	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3094 
3095 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr slot %d"
3096 	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
3097 
3098 	/*
3099 	 * halt the DMA engine.  This step is necessary according to
3100 	 * the mcp55 spec, probably since there may have been a "first" packet
3101 	 * that already programmed the DMA engine, but may not turn out to
3102 	 * be the first one processed.
3103 	 */
3104 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3105 
3106 #if 0
3107 	if (bmicx & BMICX_SSBM) {
3108 		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3109 		    "another packet.  Cancelling and reprogramming"));
3110 		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3111 	}
3112 #endif
3113 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3114 
3115 	nv_start_dma_engine(nvp, slot);
3116 
3117 	mutex_exit(&nvp->nvp_mutex);
3118 
3119 	return (DDI_INTR_CLAIMED);
3120 }
3121 #endif /* NCQ */
3122 
3123 
3124 /*
3125  * packet completion interrupt.  If the packet is complete, invoke
3126  * the packet completion callback.
3127  */
3128 static int
3129 mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3130 {
3131 	uint8_t status, bmstatus;
3132 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3133 	int sactive;
3134 	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3135 	sata_pkt_t *spkt;
3136 	nv_slot_t *nv_slotp;
3137 
3138 	mutex_enter(&nvp->nvp_mutex);
3139 
3140 	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3141 
3142 	if (!(bmstatus & BMISX_IDEINTS)) {
3143 		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
3144 		mutex_exit(&nvp->nvp_mutex);
3145 
3146 		return (NV_FAILURE);
3147 	}
3148 
3149 	/*
3150 	 * If the just completed item is a non-ncq command, the busy
3151 	 * bit should not be set
3152 	 */
3153 	if (nvp->nvp_non_ncq_run) {
3154 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3155 		if (status & SATA_STATUS_BSY) {
3156 			nv_cmn_err(CE_WARN, nvc, nvp,
3157 			    "unexpected SATA_STATUS_BSY set");
3158 			mutex_exit(&nvp->nvp_mutex);
3159 			/*
3160 			 * calling function will clear interrupt.  then
3161 			 * the real interrupt will either arrive or the
3162 			 * packet timeout handling will take over and
3163 			 * reset.
3164 			 */
3165 			return (NV_FAILURE);
3166 		}
3167 
3168 	} else {
3169 		/*
3170 		 * NCQ check for BSY here and wait if still bsy before
3171 		 * continuing. Rather than wait for it to be cleared
3172 		 * when starting a packet and wasting CPU time, the starting
3173 		 * thread can exit immediate, but might have to spin here
3174 		 * for a bit possibly.  Needs more work and experimentation.
3175 		 */
3176 		ASSERT(nvp->nvp_ncq_run);
3177 	}
3178 
3179 
3180 	if (nvp->nvp_ncq_run) {
3181 		ncq_command = B_TRUE;
3182 		ASSERT(nvp->nvp_non_ncq_run == 0);
3183 	} else {
3184 		ASSERT(nvp->nvp_non_ncq_run != 0);
3185 	}
3186 
3187 	/*
3188 	 * active_pkt_bit will represent the bitmap of the single completed
3189 	 * packet.  Because of the nature of sw assisted NCQ, only one
3190 	 * command will complete per interrupt.
3191 	 */
3192 
3193 	if (ncq_command == B_FALSE) {
3194 		active_pkt = 0;
3195 	} else {
3196 		/*
3197 		 * NCQ: determine which command just completed, by examining
3198 		 * which bit cleared in the register since last written.
3199 		 */
3200 		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3201 
3202 		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3203 
3204 		ASSERT(active_pkt_bit);
3205 
3206 
3207 		/*
3208 		 * this failure path needs more work to handle the
3209 		 * error condition and recovery.
3210 		 */
3211 		if (active_pkt_bit == 0) {
3212 			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3213 
3214 			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3215 			    "nvp->nvp_sactive %X", sactive,
3216 			    nvp->nvp_sactive_cache);
3217 
3218 			(void) nv_get8(cmdhdl, nvp->nvp_status);
3219 
3220 			mutex_exit(&nvp->nvp_mutex);
3221 
3222 			return (NV_FAILURE);
3223 		}
3224 
3225 		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3226 		    active_pkt++, active_pkt_bit >>= 1) {
3227 		}
3228 
3229 		/*
3230 		 * make sure only one bit is ever turned on
3231 		 */
3232 		ASSERT(active_pkt_bit == 1);
3233 
3234 		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3235 	}
3236 
3237 	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3238 
3239 	spkt = nv_slotp->nvslot_spkt;
3240 
3241 	ASSERT(spkt != NULL);
3242 
3243 	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3244 
3245 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3246 
3247 	/*
3248 	 * If there is no link cannot be certain about the completion
3249 	 * of the packet, so abort it.
3250 	 */
3251 	if (nv_check_link((&spkt->satapkt_device)->
3252 	    satadev_scr.sstatus) == B_FALSE) {
3253 		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
3254 
3255 	} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3256 
3257 		nv_complete_io(nvp, spkt, active_pkt);
3258 	}
3259 
3260 	mutex_exit(&nvp->nvp_mutex);
3261 
3262 	return (NV_SUCCESS);
3263 }
3264 
3265 
3266 static void
3267 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3268 {
3269 
3270 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3271 
3272 	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3273 		nvp->nvp_ncq_run--;
3274 	} else {
3275 		nvp->nvp_non_ncq_run--;
3276 	}
3277 
3278 	/*
3279 	 * mark the packet slot idle so it can be reused.  Do this before
3280 	 * calling satapkt_comp so the slot can be reused.
3281 	 */
3282 	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3283 
3284 	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3285 		/*
3286 		 * If this is not timed polled mode cmd, which has an
3287 		 * active thread monitoring for completion, then need
3288 		 * to signal the sleeping thread that the cmd is complete.
3289 		 */
3290 		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3291 			cv_signal(&nvp->nvp_poll_cv);
3292 		}
3293 
3294 		return;
3295 	}
3296 
3297 	if (spkt->satapkt_comp != NULL) {
3298 		mutex_exit(&nvp->nvp_mutex);
3299 		(*spkt->satapkt_comp)(spkt);
3300 		mutex_enter(&nvp->nvp_mutex);
3301 	}
3302 }
3303 
3304 
3305 /*
3306  * check whether packet is ncq command or not.  for ncq command,
3307  * start it if there is still room on queue.  for non-ncq command only
3308  * start if no other command is running.
3309  */
3310 static int
3311 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3312 {
3313 	uint8_t cmd, ncq;
3314 
3315 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3316 
3317 	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3318 
3319 	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3320 	    (cmd == SATAC_READ_FPDMA_QUEUED));
3321 
3322 	if (ncq == B_FALSE) {
3323 
3324 		if ((nvp->nvp_non_ncq_run == 1) ||
3325 		    (nvp->nvp_ncq_run > 0)) {
3326 			/*
3327 			 * next command is non-ncq which can't run
3328 			 * concurrently.  exit and return queue full.
3329 			 */
3330 			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3331 
3332 			return (SATA_TRAN_QUEUE_FULL);
3333 		}
3334 
3335 		return (nv_start_common(nvp, spkt));
3336 	}
3337 
3338 	/*
3339 	 * ncq == B_TRUE
3340 	 */
3341 	if (nvp->nvp_non_ncq_run == 1) {
3342 		/*
3343 		 * cannot start any NCQ commands when there
3344 		 * is a non-NCQ command running.
3345 		 */
3346 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3347 
3348 		return (SATA_TRAN_QUEUE_FULL);
3349 	}
3350 
3351 #ifdef NCQ
3352 	/*
3353 	 * this is not compiled for now as satapkt_device.satadev_qdepth
3354 	 * is being pulled out until NCQ support is later addressed
3355 	 *
3356 	 * nvp_queue_depth is initialized by the first NCQ command
3357 	 * received.
3358 	 */
3359 	if (nvp->nvp_queue_depth == 1) {
3360 		nvp->nvp_queue_depth =
3361 		    spkt->satapkt_device.satadev_qdepth;
3362 
3363 		ASSERT(nvp->nvp_queue_depth > 1);
3364 
3365 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3366 		    "nv_process_queue: nvp_queue_depth set to %d",
3367 		    nvp->nvp_queue_depth));
3368 	}
3369 #endif
3370 
3371 	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3372 		/*
3373 		 * max number of NCQ commands already active
3374 		 */
3375 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3376 
3377 		return (SATA_TRAN_QUEUE_FULL);
3378 	}
3379 
3380 	return (nv_start_common(nvp, spkt));
3381 }
3382 
3383 
3384 /*
3385  * configure INTx and legacy interrupts
3386  */
3387 static int
3388 nv_add_legacy_intrs(nv_ctl_t *nvc)
3389 {
3390 	dev_info_t	*devinfo = nvc->nvc_dip;
3391 	int		actual, count = 0;
3392 	int		x, y, rc, inum = 0;
3393 
3394 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3395 
3396 	/*
3397 	 * get number of interrupts
3398 	 */
3399 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3400 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3401 		NVLOG((NVDBG_INTR, nvc, NULL,
3402 		    "ddi_intr_get_nintrs() failed, "
3403 		    "rc %d count %d", rc, count));
3404 
3405 		return (DDI_FAILURE);
3406 	}
3407 
3408 	/*
3409 	 * allocate an array of interrupt handles
3410 	 */
3411 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3412 	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3413 
3414 	/*
3415 	 * call ddi_intr_alloc()
3416 	 */
3417 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3418 	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3419 
3420 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3421 		nv_cmn_err(CE_WARN, nvc, NULL,
3422 		    "ddi_intr_alloc() failed, rc %d", rc);
3423 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3424 
3425 		return (DDI_FAILURE);
3426 	}
3427 
3428 	if (actual < count) {
3429 		nv_cmn_err(CE_WARN, nvc, NULL,
3430 		    "ddi_intr_alloc: requested: %d, received: %d",
3431 		    count, actual);
3432 
3433 		goto failure;
3434 	}
3435 
3436 	nvc->nvc_intr_cnt = actual;
3437 
3438 	/*
3439 	 * get intr priority
3440 	 */
3441 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3442 	    DDI_SUCCESS) {
3443 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3444 
3445 		goto failure;
3446 	}
3447 
3448 	/*
3449 	 * Test for high level mutex
3450 	 */
3451 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3452 		nv_cmn_err(CE_WARN, nvc, NULL,
3453 		    "nv_add_legacy_intrs: high level intr not supported");
3454 
3455 		goto failure;
3456 	}
3457 
3458 	for (x = 0; x < actual; x++) {
3459 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3460 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3461 			nv_cmn_err(CE_WARN, nvc, NULL,
3462 			    "ddi_intr_add_handler() failed");
3463 
3464 			goto failure;
3465 		}
3466 	}
3467 
3468 	/*
3469 	 * call ddi_intr_enable() for legacy interrupts
3470 	 */
3471 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3472 		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3473 	}
3474 
3475 	return (DDI_SUCCESS);
3476 
3477 	failure:
3478 	/*
3479 	 * free allocated intr and nvc_htable
3480 	 */
3481 	for (y = 0; y < actual; y++) {
3482 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3483 	}
3484 
3485 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3486 
3487 	return (DDI_FAILURE);
3488 }
3489 
3490 #ifdef	NV_MSI_SUPPORTED
3491 /*
3492  * configure MSI interrupts
3493  */
3494 static int
3495 nv_add_msi_intrs(nv_ctl_t *nvc)
3496 {
3497 	dev_info_t	*devinfo = nvc->nvc_dip;
3498 	int		count, avail, actual;
3499 	int		x, y, rc, inum = 0;
3500 
3501 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3502 
3503 	/*
3504 	 * get number of interrupts
3505 	 */
3506 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3507 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3508 		nv_cmn_err(CE_WARN, nvc, NULL,
3509 		    "ddi_intr_get_nintrs() failed, "
3510 		    "rc %d count %d", rc, count);
3511 
3512 		return (DDI_FAILURE);
3513 	}
3514 
3515 	/*
3516 	 * get number of available interrupts
3517 	 */
3518 	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3519 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3520 		nv_cmn_err(CE_WARN, nvc, NULL,
3521 		    "ddi_intr_get_navail() failed, "
3522 		    "rc %d avail %d", rc, avail);
3523 
3524 		return (DDI_FAILURE);
3525 	}
3526 
3527 	if (avail < count) {
3528 		nv_cmn_err(CE_WARN, nvc, NULL,
3529 		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3530 		    avail, count);
3531 	}
3532 
3533 	/*
3534 	 * allocate an array of interrupt handles
3535 	 */
3536 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3537 	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3538 
3539 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3540 	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3541 
3542 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3543 		nv_cmn_err(CE_WARN, nvc, NULL,
3544 		    "ddi_intr_alloc() failed, rc %d", rc);
3545 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3546 
3547 		return (DDI_FAILURE);
3548 	}
3549 
3550 	/*
3551 	 * Use interrupt count returned or abort?
3552 	 */
3553 	if (actual < count) {
3554 		NVLOG((NVDBG_INIT, nvc, NULL,
3555 		    "Requested: %d, Received: %d", count, actual));
3556 	}
3557 
3558 	nvc->nvc_intr_cnt = actual;
3559 
3560 	/*
3561 	 * get priority for first msi, assume remaining are all the same
3562 	 */
3563 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3564 	    DDI_SUCCESS) {
3565 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3566 
3567 		goto failure;
3568 	}
3569 
3570 	/*
3571 	 * test for high level mutex
3572 	 */
3573 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3574 		nv_cmn_err(CE_WARN, nvc, NULL,
3575 		    "nv_add_msi_intrs: high level intr not supported");
3576 
3577 		goto failure;
3578 	}
3579 
3580 	/*
3581 	 * Call ddi_intr_add_handler()
3582 	 */
3583 	for (x = 0; x < actual; x++) {
3584 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3585 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3586 			nv_cmn_err(CE_WARN, nvc, NULL,
3587 			    "ddi_intr_add_handler() failed");
3588 
3589 			goto failure;
3590 		}
3591 	}
3592 
3593 	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3594 
3595 	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3596 		(void) ddi_intr_block_enable(nvc->nvc_htable,
3597 		    nvc->nvc_intr_cnt);
3598 	} else {
3599 		/*
3600 		 * Call ddi_intr_enable() for MSI non block enable
3601 		 */
3602 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3603 			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3604 		}
3605 	}
3606 
3607 	return (DDI_SUCCESS);
3608 
3609 	failure:
3610 	/*
3611 	 * free allocated intr and nvc_htable
3612 	 */
3613 	for (y = 0; y < actual; y++) {
3614 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3615 	}
3616 
3617 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3618 
3619 	return (DDI_FAILURE);
3620 }
3621 #endif
3622 
3623 
3624 static void
3625 nv_rem_intrs(nv_ctl_t *nvc)
3626 {
3627 	int x, i;
3628 	nv_port_t *nvp;
3629 
3630 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3631 
3632 	/*
3633 	 * prevent controller from generating interrupts by
3634 	 * masking them out.  This is an extra precaution.
3635 	 */
3636 	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3637 		nvp = (&nvc->nvc_port[i]);
3638 		mutex_enter(&nvp->nvp_mutex);
3639 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3640 		mutex_exit(&nvp->nvp_mutex);
3641 	}
3642 
3643 	/*
3644 	 * disable all interrupts
3645 	 */
3646 	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3647 	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3648 		(void) ddi_intr_block_disable(nvc->nvc_htable,
3649 		    nvc->nvc_intr_cnt);
3650 	} else {
3651 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3652 			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3653 		}
3654 	}
3655 
3656 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3657 		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3658 		(void) ddi_intr_free(nvc->nvc_htable[x]);
3659 	}
3660 
3661 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3662 }
3663 
3664 
3665 /*
3666  * variable argument wrapper for cmn_err.  prefixes the instance and port
3667  * number if possible
3668  */
3669 static void
3670 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3671 {
3672 	char port[NV_STRING_10];
3673 	char inst[NV_STRING_10];
3674 
3675 	mutex_enter(&nv_log_mutex);
3676 
3677 	if (nvc) {
3678 		(void) snprintf(inst, NV_STRING_10, "inst %d",
3679 		    ddi_get_instance(nvc->nvc_dip));
3680 	} else {
3681 		inst[0] = '\0';
3682 	}
3683 
3684 	if (nvp) {
3685 		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3686 	} else {
3687 		port[0] = '\0';
3688 	}
3689 
3690 	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3691 	    (inst[0]|port[0] ? ": " :""));
3692 
3693 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3694 	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3695 
3696 	/*
3697 	 * normally set to log to console but in some debug situations it
3698 	 * may be useful to log only to a file.
3699 	 */
3700 	if (nv_log_to_console) {
3701 		if (nv_prom_print) {
3702 			prom_printf("%s\n", nv_log_buf);
3703 		} else {
3704 			cmn_err(ce, "%s", nv_log_buf);
3705 		}
3706 
3707 
3708 	} else {
3709 		cmn_err(ce, "!%s", nv_log_buf);
3710 	}
3711 
3712 	mutex_exit(&nv_log_mutex);
3713 }
3714 
3715 
3716 /*
3717  * wrapper for cmn_err
3718  */
3719 static void
3720 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3721 {
3722 	va_list ap;
3723 
3724 	va_start(ap, fmt);
3725 	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
3726 	va_end(ap);
3727 }
3728 
3729 
3730 #if defined(DEBUG)
3731 /*
3732  * prefixes the instance and port number if possible to the debug message
3733  */
3734 static void
3735 nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3736 {
3737 	va_list ap;
3738 
3739 	if ((nv_debug_flags & flag) == 0) {
3740 		return;
3741 	}
3742 
3743 	va_start(ap, fmt);
3744 	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
3745 	va_end(ap);
3746 
3747 	/*
3748 	 * useful for some debugging situations
3749 	 */
3750 	if (nv_log_delay) {
3751 		drv_usecwait(nv_log_delay);
3752 	}
3753 
3754 }
3755 #endif /* DEBUG */
3756 
3757 
3758 /*
3759  * program registers which are common to all commands
3760  */
3761 static void
3762 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
3763 {
3764 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3765 	sata_pkt_t *spkt;
3766 	sata_cmd_t *satacmd;
3767 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3768 	uint8_t cmd, ncq = B_FALSE;
3769 
3770 	spkt = nv_slotp->nvslot_spkt;
3771 	satacmd = &spkt->satapkt_cmd;
3772 	cmd = satacmd->satacmd_cmd_reg;
3773 
3774 	ASSERT(nvp->nvp_slot);
3775 
3776 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3777 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3778 		ncq = B_TRUE;
3779 	}
3780 
3781 	/*
3782 	 * select the drive
3783 	 */
3784 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3785 
3786 	/*
3787 	 * make certain the drive selected
3788 	 */
3789 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3790 	    NV_SEC2USEC(5), 0) == B_FALSE) {
3791 
3792 		return;
3793 	}
3794 
3795 	switch (spkt->satapkt_cmd.satacmd_addr_type) {
3796 
3797 	case ATA_ADDR_LBA:
3798 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
3799 
3800 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3801 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3802 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3803 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3804 
3805 		break;
3806 
3807 	case ATA_ADDR_LBA28:
3808 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3809 		    "ATA_ADDR_LBA28 mode"));
3810 		/*
3811 		 * NCQ only uses 48-bit addressing
3812 		 */
3813 		ASSERT(ncq != B_TRUE);
3814 
3815 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3816 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3817 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3818 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3819 
3820 		break;
3821 
3822 	case ATA_ADDR_LBA48:
3823 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3824 		    "ATA_ADDR_LBA48 mode"));
3825 
3826 		/*
3827 		 * for NCQ, tag goes into count register and real sector count
3828 		 * into features register.  The sata module does the translation
3829 		 * in the satacmd.
3830 		 */
3831 		if (ncq == B_TRUE) {
3832 			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
3833 			nv_put8(cmdhdl, nvp->nvp_feature,
3834 			    satacmd->satacmd_features_reg_ext);
3835 			nv_put8(cmdhdl, nvp->nvp_feature,
3836 			    satacmd->satacmd_features_reg);
3837 		} else {
3838 			nv_put8(cmdhdl, nvp->nvp_count,
3839 			    satacmd->satacmd_sec_count_msb);
3840 			nv_put8(cmdhdl, nvp->nvp_count,
3841 			    satacmd->satacmd_sec_count_lsb);
3842 		}
3843 
3844 		/*
3845 		 * send the high-order half first
3846 		 */
3847 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
3848 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
3849 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
3850 		/*
3851 		 * Send the low-order half
3852 		 */
3853 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3854 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3855 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3856 
3857 		break;
3858 
3859 	case 0:
3860 		/*
3861 		 * non-media access commands such as identify and features
3862 		 * take this path.
3863 		 */
3864 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3865 		nv_put8(cmdhdl, nvp->nvp_feature,
3866 		    satacmd->satacmd_features_reg);
3867 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3868 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3869 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3870 
3871 		break;
3872 
3873 	default:
3874 		break;
3875 	}
3876 
3877 	ASSERT(nvp->nvp_slot);
3878 }
3879 
3880 
3881 /*
3882  * start a command that involves no media access
3883  */
3884 static int
3885 nv_start_nodata(nv_port_t *nvp, int slot)
3886 {
3887 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3888 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3889 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3890 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3891 
3892 	nv_program_taskfile_regs(nvp, slot);
3893 
3894 	/*
3895 	 * This next one sets the controller in motion
3896 	 */
3897 	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
3898 
3899 	return (SATA_TRAN_ACCEPTED);
3900 }
3901 
3902 
3903 int
3904 nv_bm_status_clear(nv_port_t *nvp)
3905 {
3906 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3907 	uchar_t	status, ret;
3908 
3909 	/*
3910 	 * Get the current BM status
3911 	 */
3912 	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
3913 
3914 	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
3915 
3916 	/*
3917 	 * Clear the latches (and preserve the other bits)
3918 	 */
3919 	nv_put8(bmhdl, nvp->nvp_bmisx, status);
3920 
3921 	return (ret);
3922 }
3923 
3924 
3925 /*
3926  * program the bus master DMA engine with the PRD address for
3927  * the active slot command, and start the DMA engine.
3928  */
3929 static void
3930 nv_start_dma_engine(nv_port_t *nvp, int slot)
3931 {
3932 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3933 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3934 	uchar_t direction;
3935 
3936 	ASSERT(nv_slotp->nvslot_spkt != NULL);
3937 
3938 	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
3939 	    == SATA_DIR_READ) {
3940 		direction = BMICX_RWCON_WRITE_TO_MEMORY;
3941 	} else {
3942 		direction = BMICX_RWCON_READ_FROM_MEMORY;
3943 	}
3944 
3945 	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3946 	    "nv_start_dma_engine entered"));
3947 
3948 	/*
3949 	 * reset the controller's interrupt and error status bits
3950 	 */
3951 	(void) nv_bm_status_clear(nvp);
3952 
3953 	/*
3954 	 * program the PRD table physical start address
3955 	 */
3956 	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
3957 
3958 	/*
3959 	 * set the direction control and start the DMA controller
3960 	 */
3961 	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
3962 }
3963 
3964 /*
3965  * start dma command, either in or out
3966  */
3967 static int
3968 nv_start_dma(nv_port_t *nvp, int slot)
3969 {
3970 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3971 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3972 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3973 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3974 	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
3975 #ifdef NCQ
3976 	uint8_t ncq = B_FALSE;
3977 #endif
3978 	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
3979 	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
3980 	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
3981 	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
3982 
3983 	ASSERT(sg_count != 0);
3984 
3985 	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
3986 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
3987 		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
3988 		    sata_cmdp->satacmd_num_dma_cookies);
3989 
3990 		return (NV_FAILURE);
3991 	}
3992 
3993 	nv_program_taskfile_regs(nvp, slot);
3994 
3995 	/*
3996 	 * start the drive in motion
3997 	 */
3998 	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
3999 
4000 	/*
4001 	 * the drive starts processing the transaction when the cmd register
4002 	 * is written.  This is done here before programming the DMA engine to
4003 	 * parallelize and save some time.  In the event that the drive is ready
4004 	 * before DMA, it will wait.
4005 	 */
4006 #ifdef NCQ
4007 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4008 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4009 		ncq = B_TRUE;
4010 	}
4011 #endif
4012 
4013 	/*
4014 	 * copy the PRD list to PRD table in DMA accessible memory
4015 	 * so that the controller can access it.
4016 	 */
4017 	for (idx = 0; idx < sg_count; idx++, srcp++) {
4018 		uint32_t size;
4019 
4020 		ASSERT(srcp->dmac_size <= UINT16_MAX);
4021 
4022 		nv_put32(sghdl, dstp++, srcp->dmac_address);
4023 
4024 		size = srcp->dmac_size;
4025 
4026 		/*
4027 		 * If this is a 40-bit address, copy bits 32-40 of the
4028 		 * physical address to bits 16-24 of the PRD count.
4029 		 */
4030 		if (srcp->dmac_laddress > UINT32_MAX) {
4031 			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4032 		}
4033 
4034 		/*
4035 		 * set the end of table flag for the last entry
4036 		 */
4037 		if (idx == (sg_count - 1)) {
4038 			size |= PRDE_EOT;
4039 		}
4040 
4041 		nv_put32(sghdl, dstp++, size);
4042 	}
4043 
4044 	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4045 	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4046 
4047 	nv_start_dma_engine(nvp, slot);
4048 
4049 #ifdef NCQ
4050 	/*
4051 	 * optimization:  for SWNCQ, start DMA engine if this is the only
4052 	 * command running.  Preliminary NCQ efforts indicated this needs
4053 	 * more debugging.
4054 	 *
4055 	 * if (nvp->nvp_ncq_run <= 1)
4056 	 */
4057 
4058 	if (ncq == B_FALSE) {
4059 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4060 		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4061 		    " cmd = %X", non_ncq_commands++, cmd));
4062 		nv_start_dma_engine(nvp, slot);
4063 	} else {
4064 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
4065 		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
4066 	}
4067 #endif /* NCQ */
4068 
4069 	return (SATA_TRAN_ACCEPTED);
4070 }
4071 
4072 
4073 /*
4074  * start a PIO data-in ATA command
4075  */
4076 static int
4077 nv_start_pio_in(nv_port_t *nvp, int slot)
4078 {
4079 
4080 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4081 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4082 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4083 
4084 	nv_program_taskfile_regs(nvp, slot);
4085 
4086 	/*
4087 	 * This next one sets the drive in motion
4088 	 */
4089 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4090 
4091 	return (SATA_TRAN_ACCEPTED);
4092 }
4093 
4094 
4095 /*
4096  * start a PIO data-out ATA command
4097  */
4098 static int
4099 nv_start_pio_out(nv_port_t *nvp, int slot)
4100 {
4101 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4102 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4103 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4104 
4105 	nv_program_taskfile_regs(nvp, slot);
4106 
4107 	/*
4108 	 * this next one sets the drive in motion
4109 	 */
4110 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4111 
4112 	/*
4113 	 * wait for the busy bit to settle
4114 	 */
4115 	NV_DELAY_NSEC(400);
4116 
4117 	/*
4118 	 * wait for the drive to assert DRQ to send the first chunk
4119 	 * of data. Have to busy wait because there's no interrupt for
4120 	 * the first chunk. This is bad... uses a lot of cycles if the
4121 	 * drive responds too slowly or if the wait loop granularity
4122 	 * is too large. It's even worse if the drive is defective and
4123 	 * the loop times out.
4124 	 */
4125 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4126 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4127 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4128 	    4000000, 0) == B_FALSE) {
4129 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4130 
4131 		goto error;
4132 	}
4133 
4134 	/*
4135 	 * send the first block.
4136 	 */
4137 	nv_intr_pio_out(nvp, nv_slotp);
4138 
4139 	/*
4140 	 * If nvslot_flags is not set to COMPLETE yet, then processing
4141 	 * is OK so far, so return.  Otherwise, fall into error handling
4142 	 * below.
4143 	 */
4144 	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4145 
4146 		return (SATA_TRAN_ACCEPTED);
4147 	}
4148 
4149 	error:
4150 	/*
4151 	 * there was an error so reset the device and complete the packet.
4152 	 */
4153 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4154 	nv_complete_io(nvp, spkt, 0);
4155 	nv_reset(nvp);
4156 
4157 	return (SATA_TRAN_PORT_ERROR);
4158 }
4159 
4160 
4161 /*
4162  * start a ATAPI Packet command (PIO data in or out)
4163  */
4164 static int
4165 nv_start_pkt_pio(nv_port_t *nvp, int slot)
4166 {
4167 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4168 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4169 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4170 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4171 
4172 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4173 	    "nv_start_pkt_pio: start"));
4174 
4175 	/*
4176 	 * Write the PACKET command to the command register.  Normally
4177 	 * this would be done through nv_program_taskfile_regs().  It
4178 	 * is done here because some values need to be overridden.
4179 	 */
4180 
4181 	/* select the drive */
4182 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4183 
4184 	/* make certain the drive selected */
4185 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4186 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4187 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4188 		    "nv_start_pkt_pio: drive select failed"));
4189 		return (SATA_TRAN_PORT_ERROR);
4190 	}
4191 
4192 	/*
4193 	 * The command is always sent via PIO, despite whatever the SATA
4194 	 * framework sets in the command.  Overwrite the DMA bit to do this.
4195 	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4196 	 */
4197 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4198 
4199 	/* set appropriately by the sata framework */
4200 	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4201 	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4202 	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4203 	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4204 
4205 	/* initiate the command by writing the command register last */
4206 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4207 
4208 	/* Give the host controller time to do its thing */
4209 	NV_DELAY_NSEC(400);
4210 
4211 	/*
4212 	 * Wait for the device to indicate that it is ready for the command
4213 	 * ATAPI protocol state - HP0: Check_Status_A
4214 	 */
4215 
4216 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4217 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4218 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4219 	    4000000, 0) == B_FALSE) {
4220 		/*
4221 		 * Either an error or device fault occurred or the wait
4222 		 * timed out.  According to the ATAPI protocol, command
4223 		 * completion is also possible.  Other implementations of
4224 		 * this protocol don't handle this last case, so neither
4225 		 * does this code.
4226 		 */
4227 
4228 		if (nv_get8(cmdhdl, nvp->nvp_status) &
4229 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4230 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4231 
4232 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4233 			    "nv_start_pkt_pio: device error (HP0)"));
4234 		} else {
4235 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4236 
4237 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4238 			    "nv_start_pkt_pio: timeout (HP0)"));
4239 		}
4240 
4241 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4242 		nv_complete_io(nvp, spkt, 0);
4243 		nv_reset(nvp);
4244 
4245 		return (SATA_TRAN_PORT_ERROR);
4246 	}
4247 
4248 	/*
4249 	 * Put the ATAPI command in the data register
4250 	 * ATAPI protocol state - HP1: Send_Packet
4251 	 */
4252 
4253 	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4254 	    (ushort_t *)nvp->nvp_data,
4255 	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4256 
4257 	/*
4258 	 * See you in nv_intr_pkt_pio.
4259 	 * ATAPI protocol state - HP3: INTRQ_wait
4260 	 */
4261 
4262 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4263 	    "nv_start_pkt_pio: exiting into HP3"));
4264 
4265 	return (SATA_TRAN_ACCEPTED);
4266 }
4267 
4268 
4269 /*
4270  * Interrupt processing for a non-data ATA command.
4271  */
4272 static void
4273 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4274 {
4275 	uchar_t status;
4276 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4277 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4278 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4279 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4280 
4281 	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
4282 
4283 	status = nv_get8(cmdhdl, nvp->nvp_status);
4284 
4285 	/*
4286 	 * check for errors
4287 	 */
4288 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4289 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4290 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4291 		    nvp->nvp_altstatus);
4292 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4293 	} else {
4294 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4295 	}
4296 
4297 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4298 }
4299 
4300 
4301 /*
4302  * ATA command, PIO data in
4303  */
4304 static void
4305 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4306 {
4307 	uchar_t	status;
4308 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4309 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4310 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4311 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4312 	int count;
4313 
4314 	status = nv_get8(cmdhdl, nvp->nvp_status);
4315 
4316 	if (status & SATA_STATUS_BSY) {
4317 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4318 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4319 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4320 		    nvp->nvp_altstatus);
4321 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4322 		nv_reset(nvp);
4323 
4324 		return;
4325 	}
4326 
4327 	/*
4328 	 * check for errors
4329 	 */
4330 	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4331 	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4332 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4333 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4334 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4335 
4336 		return;
4337 	}
4338 
4339 	/*
4340 	 * read the next chunk of data (if any)
4341 	 */
4342 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4343 
4344 	/*
4345 	 * read count bytes
4346 	 */
4347 	ASSERT(count != 0);
4348 
4349 	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4350 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4351 
4352 	nv_slotp->nvslot_v_addr += count;
4353 	nv_slotp->nvslot_byte_count -= count;
4354 
4355 
4356 	if (nv_slotp->nvslot_byte_count != 0) {
4357 		/*
4358 		 * more to transfer.  Wait for next interrupt.
4359 		 */
4360 		return;
4361 	}
4362 
4363 	/*
4364 	 * transfer is complete. wait for the busy bit to settle.
4365 	 */
4366 	NV_DELAY_NSEC(400);
4367 
4368 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4369 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4370 }
4371 
4372 
4373 /*
4374  * ATA command PIO data out
4375  */
4376 static void
4377 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4378 {
4379 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4380 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4381 	uchar_t status;
4382 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4383 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4384 	int count;
4385 
4386 	/*
4387 	 * clear the IRQ
4388 	 */
4389 	status = nv_get8(cmdhdl, nvp->nvp_status);
4390 
4391 	if (status & SATA_STATUS_BSY) {
4392 		/*
4393 		 * this should not happen
4394 		 */
4395 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4396 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4397 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4398 		    nvp->nvp_altstatus);
4399 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4400 
4401 		return;
4402 	}
4403 
4404 	/*
4405 	 * check for errors
4406 	 */
4407 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4408 		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4409 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4410 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4411 
4412 		return;
4413 	}
4414 
4415 	/*
4416 	 * this is the condition which signals the drive is
4417 	 * no longer ready to transfer.  Likely that the transfer
4418 	 * completed successfully, but check that byte_count is
4419 	 * zero.
4420 	 */
4421 	if ((status & SATA_STATUS_DRQ) == 0) {
4422 
4423 		if (nv_slotp->nvslot_byte_count == 0) {
4424 			/*
4425 			 * complete; successful transfer
4426 			 */
4427 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4428 		} else {
4429 			/*
4430 			 * error condition, incomplete transfer
4431 			 */
4432 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4433 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4434 		}
4435 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4436 
4437 		return;
4438 	}
4439 
4440 	/*
4441 	 * write the next chunk of data
4442 	 */
4443 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4444 
4445 	/*
4446 	 * read or write count bytes
4447 	 */
4448 
4449 	ASSERT(count != 0);
4450 
4451 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4452 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4453 
4454 	nv_slotp->nvslot_v_addr += count;
4455 	nv_slotp->nvslot_byte_count -= count;
4456 }
4457 
4458 
4459 /*
4460  * ATAPI PACKET command, PIO in/out interrupt
4461  *
4462  * Under normal circumstances, one of four different interrupt scenarios
4463  * will result in this function being called:
4464  *
4465  * 1. Packet command data transfer
4466  * 2. Packet command completion
4467  * 3. Request sense data transfer
4468  * 4. Request sense command completion
4469  */
4470 static void
4471 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4472 {
4473 	uchar_t	status;
4474 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4475 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4476 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4477 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4478 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4479 	uint16_t ctlr_count;
4480 	int count;
4481 
4482 	/* ATAPI protocol state - HP2: Check_Status_B */
4483 
4484 	status = nv_get8(cmdhdl, nvp->nvp_status);
4485 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4486 	    "nv_intr_pkt_pio: status 0x%x", status));
4487 
4488 	if (status & SATA_STATUS_BSY) {
4489 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4490 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4491 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4492 		} else {
4493 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4494 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4495 
4496 			nv_reset(nvp);
4497 		}
4498 
4499 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4500 		    "nv_intr_pkt_pio: busy - status 0x%x", status));
4501 
4502 		return;
4503 	}
4504 
4505 	if ((status & SATA_STATUS_DF) != 0) {
4506 		/*
4507 		 * On device fault, just clean up and bail.  Request sense
4508 		 * will just default to its NO SENSE initialized value.
4509 		 */
4510 
4511 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4512 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4513 		}
4514 
4515 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4516 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4517 
4518 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4519 		    nvp->nvp_altstatus);
4520 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4521 		    nvp->nvp_error);
4522 
4523 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4524 		    "nv_intr_pkt_pio: device fault"));
4525 
4526 		return;
4527 	}
4528 
4529 	if ((status & SATA_STATUS_ERR) != 0) {
4530 		/*
4531 		 * On command error, figure out whether we are processing a
4532 		 * request sense.  If so, clean up and bail.  Otherwise,
4533 		 * do a REQUEST SENSE.
4534 		 */
4535 
4536 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4537 			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4538 			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4539 			    NV_FAILURE) {
4540 				nv_copy_registers(nvp, &spkt->satapkt_device,
4541 				    spkt);
4542 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4543 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4544 			}
4545 
4546 			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4547 			    nvp->nvp_altstatus);
4548 			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4549 			    nvp->nvp_error);
4550 		} else {
4551 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4552 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4553 
4554 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4555 		}
4556 
4557 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4558 		    "nv_intr_pkt_pio: error (status 0x%x)", status));
4559 
4560 		return;
4561 	}
4562 
4563 	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4564 		/*
4565 		 * REQUEST SENSE command processing
4566 		 */
4567 
4568 		if ((status & (SATA_STATUS_DRQ)) != 0) {
4569 			/* ATAPI state - HP4: Transfer_Data */
4570 
4571 			/* read the byte count from the controller */
4572 			ctlr_count =
4573 			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4574 			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4575 
4576 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4577 			    "nv_intr_pkt_pio: ctlr byte count - %d",
4578 			    ctlr_count));
4579 
4580 			if (ctlr_count == 0) {
4581 				/* no data to transfer - some devices do this */
4582 
4583 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4584 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4585 
4586 				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4587 				    "nv_intr_pkt_pio: done (no data)"));
4588 
4589 				return;
4590 			}
4591 
4592 			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4593 
4594 			/* transfer the data */
4595 			ddi_rep_get16(cmdhdl,
4596 			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4597 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4598 			    DDI_DEV_NO_AUTOINCR);
4599 
4600 			/* consume residual bytes */
4601 			ctlr_count -= count;
4602 
4603 			if (ctlr_count > 0) {
4604 				for (; ctlr_count > 0; ctlr_count -= 2)
4605 					(void) ddi_get16(cmdhdl,
4606 					    (ushort_t *)nvp->nvp_data);
4607 			}
4608 
4609 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4610 			    "nv_intr_pkt_pio: transition to HP2"));
4611 		} else {
4612 			/* still in ATAPI state - HP2 */
4613 
4614 			/*
4615 			 * In order to avoid clobbering the rqsense data
4616 			 * set by the SATA framework, the sense data read
4617 			 * from the device is put in a separate buffer and
4618 			 * copied into the packet after the request sense
4619 			 * command successfully completes.
4620 			 */
4621 			bcopy(nv_slotp->nvslot_rqsense_buff,
4622 			    spkt->satapkt_cmd.satacmd_rqsense,
4623 			    SATA_ATAPI_RQSENSE_LEN);
4624 
4625 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4626 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4627 
4628 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4629 			    "nv_intr_pkt_pio: request sense done"));
4630 		}
4631 
4632 		return;
4633 	}
4634 
4635 	/*
4636 	 * Normal command processing
4637 	 */
4638 
4639 	if ((status & (SATA_STATUS_DRQ)) != 0) {
4640 		/* ATAPI protocol state - HP4: Transfer_Data */
4641 
4642 		/* read the byte count from the controller */
4643 		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4644 		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4645 
4646 		if (ctlr_count == 0) {
4647 			/* no data to transfer - some devices do this */
4648 
4649 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4650 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4651 
4652 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4653 			    "nv_intr_pkt_pio: done (no data)"));
4654 
4655 			return;
4656 		}
4657 
4658 		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
4659 
4660 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4661 		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
4662 
4663 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4664 		    "nv_intr_pkt_pio: byte_count 0x%x",
4665 		    nv_slotp->nvslot_byte_count));
4666 
4667 		/* transfer the data */
4668 
4669 		if (direction == SATA_DIR_READ) {
4670 			ddi_rep_get16(cmdhdl,
4671 			    (ushort_t *)nv_slotp->nvslot_v_addr,
4672 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4673 			    DDI_DEV_NO_AUTOINCR);
4674 
4675 			ctlr_count -= count;
4676 
4677 			if (ctlr_count > 0) {
4678 				/* consume remainding bytes */
4679 
4680 				for (; ctlr_count > 0;
4681 				    ctlr_count -= 2)
4682 					(void) ddi_get16(cmdhdl,
4683 					    (ushort_t *)nvp->nvp_data);
4684 
4685 				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4686 				    "nv_intr_pkt_pio: bytes remained"));
4687 			}
4688 		} else {
4689 			ddi_rep_put16(cmdhdl,
4690 			    (ushort_t *)nv_slotp->nvslot_v_addr,
4691 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4692 			    DDI_DEV_NO_AUTOINCR);
4693 		}
4694 
4695 		nv_slotp->nvslot_v_addr += count;
4696 		nv_slotp->nvslot_byte_count -= count;
4697 
4698 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4699 		    "nv_intr_pkt_pio: transition to HP2"));
4700 	} else {
4701 		/* still in ATAPI state - HP2 */
4702 
4703 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4704 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4705 
4706 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4707 		    "nv_intr_pkt_pio: done"));
4708 	}
4709 }
4710 
4711 
4712 /*
4713  * ATA command, DMA data in/out
4714  */
4715 static void
4716 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
4717 {
4718 	uchar_t status;
4719 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4720 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4721 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4722 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4723 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4724 	uchar_t	bmicx;
4725 	uchar_t bm_status;
4726 
4727 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4728 
4729 	/*
4730 	 * stop DMA engine.
4731 	 */
4732 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
4733 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
4734 
4735 	/*
4736 	 * get the status and clear the IRQ, and check for DMA error
4737 	 */
4738 	status = nv_get8(cmdhdl, nvp->nvp_status);
4739 
4740 	/*
4741 	 * check for drive errors
4742 	 */
4743 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4744 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4745 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4746 		(void) nv_bm_status_clear(nvp);
4747 
4748 		return;
4749 	}
4750 
4751 	bm_status = nv_bm_status_clear(nvp);
4752 
4753 	/*
4754 	 * check for bus master errors
4755 	 */
4756 	if (bm_status & BMISX_IDERR) {
4757 		spkt->satapkt_reason = SATA_PKT_RESET;
4758 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4759 		    nvp->nvp_altstatus);
4760 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4761 		nv_reset(nvp);
4762 
4763 		return;
4764 	}
4765 
4766 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4767 }
4768 
4769 
4770 /*
4771  * Wait for a register of a controller to achieve a specific state.
4772  * To return normally, all the bits in the first sub-mask must be ON,
4773  * all the bits in the second sub-mask must be OFF.
4774  * If timeout_usec microseconds pass without the controller achieving
4775  * the desired bit configuration, return TRUE, else FALSE.
4776  *
4777  * hybrid waiting algorithm: if not in interrupt context, busy looping will
4778  * occur for the first 250 us, then switch over to a sleeping wait.
4779  *
4780  */
4781 int
4782 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
4783     int type_wait)
4784 {
4785 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4786 	hrtime_t end, cur, start_sleep, start;
4787 	int first_time = B_TRUE;
4788 	ushort_t val;
4789 
4790 	for (;;) {
4791 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4792 
4793 		if ((val & onbits) == onbits && (val & offbits) == 0) {
4794 
4795 			return (B_TRUE);
4796 		}
4797 
4798 		cur = gethrtime();
4799 
4800 		/*
4801 		 * store the start time and calculate the end
4802 		 * time.  also calculate "start_sleep" which is
4803 		 * the point after which the driver will stop busy
4804 		 * waiting and change to sleep waiting.
4805 		 */
4806 		if (first_time) {
4807 			first_time = B_FALSE;
4808 			/*
4809 			 * start and end are in nanoseconds
4810 			 */
4811 			start = cur;
4812 			end = start + timeout_usec * 1000;
4813 			/*
4814 			 * add 1 ms to start
4815 			 */
4816 			start_sleep =  start + 250000;
4817 
4818 			if (servicing_interrupt()) {
4819 				type_wait = NV_NOSLEEP;
4820 			}
4821 		}
4822 
4823 		if (cur > end) {
4824 
4825 			break;
4826 		}
4827 
4828 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4829 #if ! defined(__lock_lint)
4830 			delay(1);
4831 #endif
4832 		} else {
4833 			drv_usecwait(nv_usec_delay);
4834 		}
4835 	}
4836 
4837 	return (B_FALSE);
4838 }
4839 
4840 
4841 /*
4842  * This is a slightly more complicated version that checks
4843  * for error conditions and bails-out rather than looping
4844  * until the timeout is exceeded.
4845  *
4846  * hybrid waiting algorithm: if not in interrupt context, busy looping will
4847  * occur for the first 250 us, then switch over to a sleeping wait.
4848  */
4849 int
4850 nv_wait3(
4851 	nv_port_t	*nvp,
4852 	uchar_t		onbits1,
4853 	uchar_t		offbits1,
4854 	uchar_t		failure_onbits2,
4855 	uchar_t		failure_offbits2,
4856 	uchar_t		failure_onbits3,
4857 	uchar_t		failure_offbits3,
4858 	uint_t		timeout_usec,
4859 	int		type_wait)
4860 {
4861 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4862 	hrtime_t end, cur, start_sleep, start;
4863 	int first_time = B_TRUE;
4864 	ushort_t val;
4865 
4866 	for (;;) {
4867 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4868 
4869 		/*
4870 		 * check for expected condition
4871 		 */
4872 		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
4873 
4874 			return (B_TRUE);
4875 		}
4876 
4877 		/*
4878 		 * check for error conditions
4879 		 */
4880 		if ((val & failure_onbits2) == failure_onbits2 &&
4881 		    (val & failure_offbits2) == 0) {
4882 
4883 			return (B_FALSE);
4884 		}
4885 
4886 		if ((val & failure_onbits3) == failure_onbits3 &&
4887 		    (val & failure_offbits3) == 0) {
4888 
4889 			return (B_FALSE);
4890 		}
4891 
4892 		/*
4893 		 * store the start time and calculate the end
4894 		 * time.  also calculate "start_sleep" which is
4895 		 * the point after which the driver will stop busy
4896 		 * waiting and change to sleep waiting.
4897 		 */
4898 		if (first_time) {
4899 			first_time = B_FALSE;
4900 			/*
4901 			 * start and end are in nanoseconds
4902 			 */
4903 			cur = start = gethrtime();
4904 			end = start + timeout_usec * 1000;
4905 			/*
4906 			 * add 1 ms to start
4907 			 */
4908 			start_sleep =  start + 250000;
4909 
4910 			if (servicing_interrupt()) {
4911 				type_wait = NV_NOSLEEP;
4912 			}
4913 		} else {
4914 			cur = gethrtime();
4915 		}
4916 
4917 		if (cur > end) {
4918 
4919 			break;
4920 		}
4921 
4922 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4923 #if ! defined(__lock_lint)
4924 			delay(1);
4925 #endif
4926 		} else {
4927 			drv_usecwait(nv_usec_delay);
4928 		}
4929 	}
4930 
4931 	return (B_FALSE);
4932 }
4933 
4934 
4935 /*
4936  * nv_check_link() checks if a specified link is active device present
4937  * and communicating.
4938  */
4939 static boolean_t
4940 nv_check_link(uint32_t sstatus)
4941 {
4942 	uint8_t det;
4943 
4944 	det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT;
4945 
4946 	return (det == SSTATUS_DET_DEVPRE_PHYCOM);
4947 }
4948 
4949 
4950 /*
4951  * nv_port_state_change() reports the state of the port to the
4952  * sata module by calling sata_hba_event_notify().  This
4953  * function is called any time the state of the port is changed
4954  */
4955 static void
4956 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
4957 {
4958 	sata_device_t sd;
4959 
4960 	bzero((void *)&sd, sizeof (sata_device_t));
4961 	sd.satadev_rev = SATA_DEVICE_REV;
4962 	nv_copy_registers(nvp, &sd, NULL);
4963 
4964 	/*
4965 	 * When NCQ is implemented sactive and snotific field need to be
4966 	 * updated.
4967 	 */
4968 	sd.satadev_addr.cport = nvp->nvp_port_num;
4969 	sd.satadev_addr.qual = addr_type;
4970 	sd.satadev_state = state;
4971 
4972 	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
4973 }
4974 
4975 
4976 /*
4977  * timeout processing:
4978  *
4979  * Check if any packets have crossed a timeout threshold.  If so, then
4980  * abort the packet.  This function is not NCQ aware.
4981  *
4982  * If reset was invoked in any other place than nv_sata_probe(), then
4983  * monitor for reset completion here.
4984  *
4985  */
4986 static void
4987 nv_timeout(void *arg)
4988 {
4989 	nv_port_t *nvp = arg;
4990 	nv_slot_t *nv_slotp;
4991 	int restart_timeout = B_FALSE;
4992 
4993 	mutex_enter(&nvp->nvp_mutex);
4994 
4995 	/*
4996 	 * If the probe entry point is driving the reset and signature
4997 	 * acquisition, just return.
4998 	 */
4999 	if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
5000 		goto finished;
5001 	}
5002 
5003 	/*
5004 	 * If the port is not in the init state, it likely
5005 	 * means the link was lost while a timeout was active.
5006 	 */
5007 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5008 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5009 		    "nv_timeout: port uninitialized"));
5010 
5011 		goto finished;
5012 	}
5013 
5014 	if (nvp->nvp_state & NV_PORT_RESET) {
5015 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5016 		uint32_t sstatus;
5017 
5018 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5019 		    "nv_timeout(): port waiting for signature"));
5020 
5021 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5022 
5023 		/*
5024 		 * check for link presence.  If the link remains
5025 		 * missing for more than 2 seconds, send a remove
5026 		 * event and abort signature acquisition.
5027 		 */
5028 		if (nv_check_link(sstatus) == B_FALSE) {
5029 			clock_t e_link_lost = ddi_get_lbolt();
5030 
5031 			if (nvp->nvp_link_lost_time == 0) {
5032 				nvp->nvp_link_lost_time = e_link_lost;
5033 			}
5034 			if (TICK_TO_SEC(e_link_lost -
5035 			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
5036 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5037 				    "probe: intermittent link lost while"
5038 				    " resetting"));
5039 				restart_timeout = B_TRUE;
5040 			} else {
5041 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5042 				    "link lost during signature acquisition."
5043 				    "  Giving up"));
5044 				nv_port_state_change(nvp,
5045 				    SATA_EVNT_DEVICE_DETACHED|
5046 				    SATA_EVNT_LINK_LOST,
5047 				    SATA_ADDR_CPORT, 0);
5048 				nvp->nvp_state |= NV_PORT_HOTREMOVED;
5049 				nvp->nvp_state &= ~NV_PORT_RESET;
5050 			}
5051 
5052 			goto finished;
5053 		} else {
5054 
5055 			nvp->nvp_link_lost_time = 0;
5056 		}
5057 
5058 		nv_read_signature(nvp);
5059 
5060 		if (nvp->nvp_signature != 0) {
5061 			if ((nvp->nvp_type == SATA_DTYPE_ATADISK) ||
5062 			    (nvp->nvp_type == SATA_DTYPE_ATAPICD)) {
5063 				nvp->nvp_state |= NV_PORT_RESTORE;
5064 				nv_port_state_change(nvp,
5065 				    SATA_EVNT_DEVICE_RESET,
5066 				    SATA_ADDR_DCPORT,
5067 				    SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE);
5068 			}
5069 
5070 			goto finished;
5071 		}
5072 
5073 		/*
5074 		 * Reset if more than 5 seconds has passed without
5075 		 * acquiring a signature.
5076 		 */
5077 		if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) {
5078 			nv_reset(nvp);
5079 		}
5080 
5081 		restart_timeout = B_TRUE;
5082 		goto finished;
5083 	}
5084 
5085 
5086 	/*
5087 	 * not yet NCQ aware
5088 	 */
5089 	nv_slotp = &(nvp->nvp_slot[0]);
5090 
5091 	/*
5092 	 * this happens early on before nv_slotp is set
5093 	 * up OR when a device was unexpectedly removed and
5094 	 * there was an active packet.
5095 	 */
5096 	if (nv_slotp == NULL) {
5097 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5098 		    "nv_timeout: nv_slotp == NULL"));
5099 
5100 		goto finished;
5101 	}
5102 
5103 	/*
5104 	 * perform timeout checking and processing only if there is an
5105 	 * active packet on the port
5106 	 */
5107 	if (nv_slotp->nvslot_spkt != NULL)  {
5108 		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5109 		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5110 		uint8_t cmd = satacmd->satacmd_cmd_reg;
5111 		uint64_t lba;
5112 
5113 #if ! defined(__lock_lint) && defined(DEBUG)
5114 
5115 		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5116 		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5117 		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5118 		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5119 		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5120 		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5121 #endif
5122 
5123 		/*
5124 		 * timeout not needed if there is a polling thread
5125 		 */
5126 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5127 
5128 			goto finished;
5129 		}
5130 
5131 		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5132 		    spkt->satapkt_time) {
5133 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5134 			    "abort timeout: "
5135 			    "nvslot_stime: %ld max ticks till timeout: "
5136 			    "%ld cur_time: %ld cmd=%x lba=%d",
5137 			    nv_slotp->nvslot_stime, drv_usectohz(MICROSEC *
5138 			    spkt->satapkt_time), ddi_get_lbolt(), cmd, lba));
5139 
5140 			(void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT);
5141 
5142 		} else {
5143 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:"
5144 			    " still in use so restarting timeout"));
5145 		}
5146 		restart_timeout = B_TRUE;
5147 
5148 	} else {
5149 		/*
5150 		 * there was no active packet, so do not re-enable timeout
5151 		 */
5152 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5153 		    "nv_timeout: no active packet so not re-arming timeout"));
5154 	}
5155 
5156 	finished:
5157 
5158 	if (restart_timeout == B_TRUE) {
5159 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
5160 		    drv_usectohz(NV_ONE_SEC));
5161 	} else {
5162 		nvp->nvp_timeout_id = 0;
5163 	}
5164 	mutex_exit(&nvp->nvp_mutex);
5165 }
5166 
5167 
5168 /*
5169  * enable or disable the 3 interrupt types the driver is
5170  * interested in: completion, add and remove.
5171  */
5172 static void
5173 mcp04_set_intr(nv_port_t *nvp, int flag)
5174 {
5175 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5176 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5177 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5178 	uint8_t intr_bits[] = { MCP04_INT_PDEV_HOT|MCP04_INT_PDEV_INT,
5179 	    MCP04_INT_SDEV_HOT|MCP04_INT_SDEV_INT };
5180 	uint8_t clear_all_bits[] = { MCP04_INT_PDEV_ALL, MCP04_INT_SDEV_ALL };
5181 	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5182 
5183 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5184 
5185 	/*
5186 	 * controller level lock also required since access to an 8-bit
5187 	 * interrupt register is shared between both channels.
5188 	 */
5189 	mutex_enter(&nvc->nvc_mutex);
5190 
5191 	if (flag & NV_INTR_CLEAR_ALL) {
5192 		NVLOG((NVDBG_INTR, nvc, nvp,
5193 		    "mcp04_set_intr: NV_INTR_CLEAR_ALL"));
5194 
5195 		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5196 		    (uint8_t *)(nvc->nvc_mcp04_int_status));
5197 
5198 		if (intr_status & clear_all_bits[port]) {
5199 
5200 			nv_put8(nvc->nvc_bar_hdl[5],
5201 			    (uint8_t *)(nvc->nvc_mcp04_int_status),
5202 			    clear_all_bits[port]);
5203 
5204 			NVLOG((NVDBG_INTR, nvc, nvp,
5205 			    "interrupt bits cleared %x",
5206 			    intr_status & clear_all_bits[port]));
5207 		}
5208 	}
5209 
5210 	if (flag & NV_INTR_DISABLE) {
5211 		NVLOG((NVDBG_INTR, nvc, nvp,
5212 		    "mcp04_set_intr: NV_INTR_DISABLE"));
5213 		int_en = nv_get8(bar5_hdl,
5214 		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
5215 		int_en &= ~intr_bits[port];
5216 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
5217 		    int_en);
5218 	}
5219 
5220 	if (flag & NV_INTR_ENABLE) {
5221 		NVLOG((NVDBG_INTR, nvc, nvp, "mcp04_set_intr: NV_INTR_ENABLE"));
5222 		int_en = nv_get8(bar5_hdl,
5223 		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
5224 		int_en |= intr_bits[port];
5225 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
5226 		    int_en);
5227 	}
5228 
5229 	mutex_exit(&nvc->nvc_mutex);
5230 }
5231 
5232 
5233 /*
5234  * enable or disable the 3 interrupts the driver is interested in:
5235  * completion interrupt, hot add, and hot remove interrupt.
5236  */
5237 static void
5238 mcp55_set_intr(nv_port_t *nvp, int flag)
5239 {
5240 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5241 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5242 	uint16_t intr_bits =
5243 	    MCP55_INT_ADD|MCP55_INT_REM|MCP55_INT_COMPLETE;
5244 	uint16_t int_en;
5245 
5246 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5247 
5248 	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
5249 
5250 	if (flag & NV_INTR_CLEAR_ALL) {
5251 		NVLOG((NVDBG_INTR, nvc, nvp,
5252 		    "mcp55_set_intr: NV_INTR_CLEAR_ALL"));
5253 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, MCP55_INT_CLEAR);
5254 	}
5255 
5256 	if (flag & NV_INTR_ENABLE) {
5257 		NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_set_intr: NV_INTR_ENABLE"));
5258 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
5259 		int_en |= intr_bits;
5260 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
5261 	}
5262 
5263 	if (flag & NV_INTR_DISABLE) {
5264 		NVLOG((NVDBG_INTR, nvc, nvp,
5265 		    "mcp55_set_intr: NV_INTR_DISABLE"));
5266 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
5267 		int_en &= ~intr_bits;
5268 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
5269 	}
5270 }
5271 
5272 
5273 /*
5274  * The PM functions for suspend and resume are incomplete and need additional
5275  * work.  It may or may not work in the current state.
5276  */
5277 static void
5278 nv_resume(nv_port_t *nvp)
5279 {
5280 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
5281 
5282 	mutex_enter(&nvp->nvp_mutex);
5283 
5284 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5285 		mutex_exit(&nvp->nvp_mutex);
5286 
5287 		return;
5288 	}
5289 
5290 #ifdef SGPIO_SUPPORT
5291 	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5292 	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5293 #endif
5294 
5295 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5296 
5297 	/*
5298 	 * power may have been removed to the port and the
5299 	 * drive, and/or a drive may have been added or removed.
5300 	 * Force a reset which will cause a probe and re-establish
5301 	 * any state needed on the drive.
5302 	 * nv_reset(nvp);
5303 	 */
5304 
5305 	nv_reset(nvp);
5306 
5307 	mutex_exit(&nvp->nvp_mutex);
5308 }
5309 
5310 /*
5311  * The PM functions for suspend and resume are incomplete and need additional
5312  * work.  It may or may not work in the current state.
5313  */
5314 static void
5315 nv_suspend(nv_port_t *nvp)
5316 {
5317 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
5318 
5319 	mutex_enter(&nvp->nvp_mutex);
5320 
5321 #ifdef SGPIO_SUPPORT
5322 	nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5323 	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5324 #endif
5325 
5326 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5327 		mutex_exit(&nvp->nvp_mutex);
5328 
5329 		return;
5330 	}
5331 
5332 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_DISABLE);
5333 
5334 	/*
5335 	 * power may have been removed to the port and the
5336 	 * drive, and/or a drive may have been added or removed.
5337 	 * Force a reset which will cause a probe and re-establish
5338 	 * any state needed on the drive.
5339 	 * nv_reset(nvp);
5340 	 */
5341 
5342 	mutex_exit(&nvp->nvp_mutex);
5343 }
5344 
5345 
5346 static void
5347 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
5348 {
5349 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5350 	sata_cmd_t *scmd = &spkt->satapkt_cmd;
5351 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5352 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5353 	uchar_t status;
5354 	struct sata_cmd_flags flags;
5355 
5356 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()"));
5357 
5358 	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5359 	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
5360 	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5361 
5362 	if (spkt == NULL) {
5363 
5364 		return;
5365 	}
5366 
5367 	/*
5368 	 * in the error case, implicitly set the return of regs needed
5369 	 * for error handling.
5370 	 */
5371 	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
5372 	    nvp->nvp_altstatus);
5373 
5374 	flags = scmd->satacmd_flags;
5375 
5376 	if (status & SATA_STATUS_ERR) {
5377 		flags.sata_copy_out_lba_low_msb = B_TRUE;
5378 		flags.sata_copy_out_lba_mid_msb = B_TRUE;
5379 		flags.sata_copy_out_lba_high_msb = B_TRUE;
5380 		flags.sata_copy_out_lba_low_lsb = B_TRUE;
5381 		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
5382 		flags.sata_copy_out_lba_high_lsb = B_TRUE;
5383 		flags.sata_copy_out_error_reg = B_TRUE;
5384 		flags.sata_copy_out_sec_count_msb = B_TRUE;
5385 		flags.sata_copy_out_sec_count_lsb = B_TRUE;
5386 		scmd->satacmd_status_reg = status;
5387 	}
5388 
5389 	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
5390 
5391 		/*
5392 		 * set HOB so that high byte will be read
5393 		 */
5394 		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
5395 
5396 		/*
5397 		 * get the requested high bytes
5398 		 */
5399 		if (flags.sata_copy_out_sec_count_msb) {
5400 			scmd->satacmd_sec_count_msb =
5401 			    nv_get8(cmdhdl, nvp->nvp_count);
5402 		}
5403 
5404 		if (flags.sata_copy_out_lba_low_msb) {
5405 			scmd->satacmd_lba_low_msb =
5406 			    nv_get8(cmdhdl, nvp->nvp_sect);
5407 		}
5408 
5409 		if (flags.sata_copy_out_lba_mid_msb) {
5410 			scmd->satacmd_lba_mid_msb =
5411 			    nv_get8(cmdhdl, nvp->nvp_lcyl);
5412 		}
5413 
5414 		if (flags.sata_copy_out_lba_high_msb) {
5415 			scmd->satacmd_lba_high_msb =
5416 			    nv_get8(cmdhdl, nvp->nvp_hcyl);
5417 		}
5418 	}
5419 
5420 	/*
5421 	 * disable HOB so that low byte is read
5422 	 */
5423 	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
5424 
5425 	/*
5426 	 * get the requested low bytes
5427 	 */
5428 	if (flags.sata_copy_out_sec_count_lsb) {
5429 		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
5430 	}
5431 
5432 	if (flags.sata_copy_out_lba_low_lsb) {
5433 		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
5434 	}
5435 
5436 	if (flags.sata_copy_out_lba_mid_lsb) {
5437 		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
5438 	}
5439 
5440 	if (flags.sata_copy_out_lba_high_lsb) {
5441 		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
5442 	}
5443 
5444 	/*
5445 	 * get the device register if requested
5446 	 */
5447 	if (flags.sata_copy_out_device_reg) {
5448 		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
5449 	}
5450 
5451 	/*
5452 	 * get the error register if requested
5453 	 */
5454 	if (flags.sata_copy_out_error_reg) {
5455 		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5456 	}
5457 }
5458 
5459 
5460 /*
5461  * Hot plug and remove interrupts can occur when the device is reset.  Just
5462  * masking the interrupt doesn't always work well because if a
5463  * different interrupt arrives on the other port, the driver can still
5464  * end up checking the state of the other port and discover the hot
5465  * interrupt flag is set even though it was masked.  Checking for recent
5466  * reset activity and then ignoring turns out to be the easiest way.
5467  */
5468 static void
5469 nv_report_add_remove(nv_port_t *nvp, int flags)
5470 {
5471 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5472 	clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time;
5473 	uint32_t sstatus;
5474 	int i;
5475 
5476 	/*
5477 	 * If reset within last 1 second ignore.  This should be
5478 	 * reworked and improved instead of having this somewhat
5479 	 * heavy handed clamping job.
5480 	 */
5481 	if (time_diff < drv_usectohz(NV_ONE_SEC)) {
5482 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
5483 		    "ignoring plug interrupt was %dms ago",
5484 		    TICK_TO_MSEC(time_diff)));
5485 
5486 		return;
5487 	}
5488 
5489 	/*
5490 	 * wait up to 1ms for sstatus to settle and reflect the true
5491 	 * status of the port.  Failure to do so can create confusion
5492 	 * in probe, where the incorrect sstatus value can still
5493 	 * persist.
5494 	 */
5495 	for (i = 0; i < 1000; i++) {
5496 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5497 
5498 		if ((flags == NV_PORT_HOTREMOVED) &&
5499 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
5500 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5501 			break;
5502 		}
5503 
5504 		if ((flags != NV_PORT_HOTREMOVED) &&
5505 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
5506 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5507 			break;
5508 		}
5509 		drv_usecwait(1);
5510 	}
5511 
5512 	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5513 	    "sstatus took %i us for DEVPRE_PHYCOM to settle", i));
5514 
5515 	if (flags == NV_PORT_HOTREMOVED) {
5516 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5517 		    "nv_report_add_remove() hot removed"));
5518 		nv_port_state_change(nvp,
5519 		    SATA_EVNT_DEVICE_DETACHED,
5520 		    SATA_ADDR_CPORT, 0);
5521 
5522 		nvp->nvp_state |= NV_PORT_HOTREMOVED;
5523 	} else {
5524 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5525 		    "nv_report_add_remove() hot plugged"));
5526 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5527 		    SATA_ADDR_CPORT, 0);
5528 	}
5529 }
5530 
5531 
5532 /*
5533  * Get request sense data and stuff it the command's sense buffer.
5534  * Start a request sense command in order to get sense data to insert
5535  * in the sata packet's rqsense buffer.  The command completion
5536  * processing is in nv_intr_pkt_pio.
5537  *
5538  * The sata framework provides a function to allocate and set-up a
5539  * request sense packet command. The reasons it is not being used here is:
5540  * a) it cannot be called in an interrupt context and this function is
5541  *    called in an interrupt context.
5542  * b) it allocates DMA resources that are not used here because this is
5543  *    implemented using PIO.
5544  *
5545  * If, in the future, this is changed to use DMA, the sata framework should
5546  * be used to allocate and set-up the error retrieval (request sense)
5547  * command.
5548  */
5549 static int
5550 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
5551 {
5552 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5553 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5554 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5555 	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
5556 
5557 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5558 	    "nv_start_rqsense_pio: start"));
5559 
5560 	/* clear the local request sense buffer before starting the command */
5561 	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
5562 
5563 	/* Write the request sense PACKET command */
5564 
5565 	/* select the drive */
5566 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
5567 
5568 	/* make certain the drive selected */
5569 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
5570 	    NV_SEC2USEC(5), 0) == B_FALSE) {
5571 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5572 		    "nv_start_rqsense_pio: drive select failed"));
5573 		return (NV_FAILURE);
5574 	}
5575 
5576 	/* set up the command */
5577 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
5578 	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
5579 	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
5580 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
5581 	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
5582 
5583 	/* initiate the command by writing the command register last */
5584 	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
5585 
5586 	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
5587 	NV_DELAY_NSEC(400);
5588 
5589 	/*
5590 	 * Wait for the device to indicate that it is ready for the command
5591 	 * ATAPI protocol state - HP0: Check_Status_A
5592 	 */
5593 
5594 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
5595 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
5596 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
5597 	    4000000, 0) == B_FALSE) {
5598 		if (nv_get8(cmdhdl, nvp->nvp_status) &
5599 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
5600 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5601 			    "nv_start_rqsense_pio: rqsense dev error (HP0)"));
5602 		} else {
5603 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5604 			    "nv_start_rqsense_pio: rqsense timeout (HP0)"));
5605 		}
5606 
5607 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5608 		nv_complete_io(nvp, spkt, 0);
5609 		nv_reset(nvp);
5610 
5611 		return (NV_FAILURE);
5612 	}
5613 
5614 	/*
5615 	 * Put the ATAPI command in the data register
5616 	 * ATAPI protocol state - HP1: Send_Packet
5617 	 */
5618 
5619 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
5620 	    (ushort_t *)nvp->nvp_data,
5621 	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
5622 
5623 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5624 	    "nv_start_rqsense_pio: exiting into HP3"));
5625 
5626 	return (NV_SUCCESS);
5627 }
5628 
5629 
5630 #ifdef SGPIO_SUPPORT
5631 /*
5632  * NVIDIA specific SGPIO LED support
5633  * Please refer to the NVIDIA documentation for additional details
5634  */
5635 
5636 /*
5637  * nv_sgp_led_init
5638  * Detect SGPIO support.  If present, initialize.
5639  */
5640 static void
5641 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
5642 {
5643 	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
5644 	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
5645 	nv_sgp_cmn_t *cmn;	/* shared data structure */
5646 	char tqname[SGPIO_TQ_NAME_LEN];
5647 	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
5648 
5649 	/*
5650 	 * The NVIDIA SGPIO support can nominally handle 6 drives.
5651 	 * However, the current implementation only supports 4 drives.
5652 	 * With two drives per controller, that means only look at the
5653 	 * first two controllers.
5654 	 */
5655 	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
5656 		return;
5657 
5658 	/* confirm that the SGPIO registers are there */
5659 	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
5660 		NVLOG((NVDBG_INIT, nvc, NULL,
5661 		    "SGPIO registers not detected"));
5662 		return;
5663 	}
5664 
5665 	/* save off the SGPIO_CSR I/O address */
5666 	nvc->nvc_sgp_csr = csrp;
5667 
5668 	/* map in Command Block */
5669 	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
5670 	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
5671 
5672 	/* initialize the SGPIO h/w */
5673 	if (nv_sgp_init(nvc) == NV_FAILURE) {
5674 		nv_cmn_err(CE_WARN, nvc, NULL,
5675 		    "!Unable to initialize SGPIO");
5676 	}
5677 
5678 	if (nvc->nvc_ctlr_num == 0) {
5679 		/*
5680 		 * Controller 0 on the MCP55/IO55 initialized the SGPIO
5681 		 * and the data that is shared between the controllers.
5682 		 * The clever thing to do would be to let the first controller
5683 		 * that comes up be the one that initializes all this.
5684 		 * However, SGPIO state is not necessarily zeroed between
5685 		 * between OS reboots, so there might be old data there.
5686 		 */
5687 
5688 		/* allocate shared space */
5689 		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
5690 		    KM_SLEEP);
5691 		if (cmn == NULL) {
5692 			nv_cmn_err(CE_WARN, nvc, NULL,
5693 			    "!Failed to allocate shared data");
5694 			return;
5695 		}
5696 
5697 		nvc->nvc_sgp_cmn = cmn;
5698 
5699 		/* initialize the shared data structure */
5700 		cmn->nvs_magic = SGPIO_MAGIC;
5701 		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
5702 		cmn->nvs_connected = 0;
5703 		cmn->nvs_activity = 0;
5704 
5705 		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
5706 		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
5707 		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
5708 
5709 		/* put the address in the SGPIO scratch register */
5710 #if defined(__amd64)
5711 		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
5712 #else
5713 		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
5714 #endif
5715 
5716 		/* start the activity LED taskq */
5717 
5718 		/*
5719 		 * The taskq name should be unique and the time
5720 		 */
5721 		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
5722 		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
5723 		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
5724 		    TASKQ_DEFAULTPRI, 0);
5725 		if (cmn->nvs_taskq == NULL) {
5726 			cmn->nvs_taskq_delay = 0;
5727 			nv_cmn_err(CE_WARN, nvc, NULL,
5728 			    "!Failed to start activity LED taskq");
5729 		} else {
5730 			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
5731 			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
5732 			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
5733 		}
5734 
5735 	} else if (nvc->nvc_ctlr_num == 1) {
5736 		/*
5737 		 * Controller 1 confirms that SGPIO has been initialized
5738 		 * and, if so, try to get the shared data pointer, otherwise
5739 		 * get the shared data pointer when accessing the data.
5740 		 */
5741 
5742 		if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
5743 			cmn = (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
5744 
5745 			/*
5746 			 * It looks like a pointer, but is it the shared data?
5747 			 */
5748 			if (cmn->nvs_magic == SGPIO_MAGIC) {
5749 				nvc->nvc_sgp_cmn = cmn;
5750 
5751 				cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
5752 			}
5753 		}
5754 	}
5755 }
5756 
5757 /*
5758  * nv_sgp_detect
5759  * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
5760  * report back whether both were readable.
5761  */
5762 static int
5763 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
5764     uint32_t *cbpp)
5765 {
5766 	/* get the SGPIO_CSRP */
5767 	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
5768 	if (*csrpp == 0) {
5769 		return (NV_FAILURE);
5770 	}
5771 
5772 	/* SGPIO_CSRP is good, get the SGPIO_CBP */
5773 	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
5774 	if (*cbpp == 0) {
5775 		return (NV_FAILURE);
5776 	}
5777 
5778 	/* SGPIO_CBP is good, so we must support SGPIO */
5779 	return (NV_SUCCESS);
5780 }
5781 
5782 /*
5783  * nv_sgp_init
5784  * Initialize SGPIO.  The process is specified by NVIDIA.
5785  */
5786 static int
5787 nv_sgp_init(nv_ctl_t *nvc)
5788 {
5789 	uint32_t status;
5790 	int drive_count;
5791 
5792 	/*
5793 	 * if SGPIO status set to SGPIO_STATE_RESET, logic has been
5794 	 * reset and needs to be initialized.
5795 	 */
5796 	status = nv_sgp_csr_read(nvc);
5797 	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
5798 		if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5799 			/* reset and try again */
5800 			nv_sgp_reset(nvc);
5801 			if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5802 				NVLOG((NVDBG_ALWAYS, nvc, NULL,
5803 				    "SGPIO init failed"));
5804 				return (NV_FAILURE);
5805 			}
5806 		}
5807 	}
5808 
5809 	/*
5810 	 * NVIDIA recommends reading the supported drive count even
5811 	 * though they also indicate that it is 4 at this time.
5812 	 */
5813 	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
5814 	if (drive_count != SGPIO_DRV_CNT_VALUE) {
5815 		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5816 		    "SGPIO reported undocumented drive count - %d",
5817 		    drive_count));
5818 	}
5819 
5820 	NVLOG((NVDBG_INIT, nvc, NULL,
5821 	    "initialized ctlr: %d csr: 0x%08x",
5822 	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr));
5823 
5824 	return (NV_SUCCESS);
5825 }
5826 
5827 static void
5828 nv_sgp_reset(nv_ctl_t *nvc)
5829 {
5830 	uint32_t cmd;
5831 	uint32_t status;
5832 
5833 	cmd = SGPIO_CMD_RESET;
5834 	nv_sgp_csr_write(nvc, cmd);
5835 
5836 	status = nv_sgp_csr_read(nvc);
5837 
5838 	if (SGPIO_CSR_CSTAT(status) != SGPIO_CMD_OK) {
5839 		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5840 		    "SGPIO reset failed: CSR - 0x%x", status));
5841 	}
5842 }
5843 
5844 static int
5845 nv_sgp_init_cmd(nv_ctl_t *nvc)
5846 {
5847 	int seq;
5848 	hrtime_t start, end;
5849 	uint32_t status;
5850 	uint32_t cmd;
5851 
5852 	/* get the old sequence value */
5853 	status = nv_sgp_csr_read(nvc);
5854 	seq = SGPIO_CSR_SEQ(status);
5855 
5856 	/* check the state since we have the info anyway */
5857 	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
5858 		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5859 		    "SGPIO init_cmd: state not operational"));
5860 	}
5861 
5862 	/* issue command */
5863 	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
5864 	nv_sgp_csr_write(nvc, cmd);
5865 
5866 	DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
5867 
5868 	/* poll for completion */
5869 	start = gethrtime();
5870 	end = start + NV_SGP_CMD_TIMEOUT;
5871 	for (;;) {
5872 		status = nv_sgp_csr_read(nvc);
5873 
5874 		/* break on error */
5875 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
5876 			break;
5877 
5878 		/* break on command completion (seq changed) */
5879 		if (SGPIO_CSR_SEQ(status) != seq) {
5880 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ACTIVE) {
5881 				NVLOG((NVDBG_ALWAYS, nvc, NULL,
5882 				    "Seq changed but command still active"));
5883 			}
5884 
5885 			break;
5886 		}
5887 
5888 		/* Wait 400 ns and try again */
5889 		NV_DELAY_NSEC(400);
5890 
5891 		if (gethrtime() > end)
5892 			break;
5893 	}
5894 
5895 	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
5896 		return (NV_SUCCESS);
5897 
5898 	return (NV_FAILURE);
5899 }
5900 
5901 static int
5902 nv_sgp_check_set_cmn(nv_ctl_t *nvc)
5903 {
5904 	nv_sgp_cmn_t *cmn;
5905 
5906 	/* check to see if Scratch Register is set */
5907 	if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
5908 		nvc->nvc_sgp_cmn =
5909 		    (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
5910 
5911 		if (nvc->nvc_sgp_cmn->nvs_magic != SGPIO_MAGIC)
5912 			return (NV_FAILURE);
5913 
5914 		cmn = nvc->nvc_sgp_cmn;
5915 
5916 		mutex_enter(&cmn->nvs_slock);
5917 		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
5918 		mutex_exit(&cmn->nvs_slock);
5919 
5920 		return (NV_SUCCESS);
5921 	}
5922 
5923 	return (NV_FAILURE);
5924 }
5925 
5926 /*
5927  * nv_sgp_csr_read
5928  * This is just a 32-bit port read from the value that was obtained from the
5929  * PCI config space.
5930  *
5931  * XXX It was advised to use the in[bwl] function for this, even though they
5932  * are obsolete interfaces.
5933  */
5934 static int
5935 nv_sgp_csr_read(nv_ctl_t *nvc)
5936 {
5937 	return (inl(nvc->nvc_sgp_csr));
5938 }
5939 
5940 /*
5941  * nv_sgp_csr_write
5942  * This is just a 32-bit I/O port write.  The port number was obtained from
5943  * the PCI config space.
5944  *
5945  * XXX It was advised to use the out[bwl] function for this, even though they
5946  * are obsolete interfaces.
5947  */
5948 static void
5949 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
5950 {
5951 	outl(nvc->nvc_sgp_csr, val);
5952 }
5953 
5954 /*
5955  * nv_sgp_write_data
5956  * Cause SGPIO to send Command Block data
5957  */
5958 static int
5959 nv_sgp_write_data(nv_ctl_t *nvc)
5960 {
5961 	hrtime_t start, end;
5962 	uint32_t status;
5963 	uint32_t cmd;
5964 
5965 	/* issue command */
5966 	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
5967 	nv_sgp_csr_write(nvc, cmd);
5968 
5969 	/* poll for completion */
5970 	start = gethrtime();
5971 	end = start + NV_SGP_CMD_TIMEOUT;
5972 	for (;;) {
5973 		status = nv_sgp_csr_read(nvc);
5974 
5975 		/* break on error completion */
5976 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
5977 			break;
5978 
5979 		/* break on successful completion */
5980 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
5981 			break;
5982 
5983 		/* Wait 400 ns and try again */
5984 		NV_DELAY_NSEC(400);
5985 
5986 		if (gethrtime() > end)
5987 			break;
5988 	}
5989 
5990 	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
5991 		return (NV_SUCCESS);
5992 
5993 	return (NV_FAILURE);
5994 }
5995 
5996 /*
5997  * nv_sgp_activity_led_ctl
5998  * This is run as a taskq.  It wakes up at a fixed interval and checks to
5999  * see if any of the activity LEDs need to be changed.
6000  */
6001 static void
6002 nv_sgp_activity_led_ctl(void *arg)
6003 {
6004 	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6005 	nv_sgp_cmn_t *cmn;
6006 	volatile nv_sgp_cb_t *cbp;
6007 	clock_t ticks;
6008 	uint8_t drv_leds;
6009 	uint32_t old_leds;
6010 	uint32_t new_led_state;
6011 	int i;
6012 
6013 	cmn = nvc->nvc_sgp_cmn;
6014 	cbp = nvc->nvc_sgp_cbp;
6015 
6016 	do {
6017 		/* save off the old state of all of the LEDs */
6018 		old_leds = cbp->sgpio0_tr;
6019 
6020 		DTRACE_PROBE3(sgpio__activity__state,
6021 		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6022 		    int, old_leds);
6023 
6024 		new_led_state = 0;
6025 
6026 		/* for each drive */
6027 		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6028 
6029 			/* get the current state of the LEDs for the drive */
6030 			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6031 
6032 			if ((cmn->nvs_connected & (1 << i)) == 0) {
6033 				/* if not connected, turn off activity */
6034 				drv_leds &= ~TR_ACTIVE_MASK;
6035 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6036 
6037 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6038 				new_led_state |=
6039 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6040 
6041 				continue;
6042 			}
6043 
6044 			if ((cmn->nvs_activity & (1 << i)) == 0) {
6045 				/* connected, but not active */
6046 				drv_leds &= ~TR_ACTIVE_MASK;
6047 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6048 
6049 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6050 				new_led_state |=
6051 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6052 
6053 				continue;
6054 			}
6055 
6056 			/* connected and active */
6057 			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6058 				/* was enabled, so disable */
6059 				drv_leds &= ~TR_ACTIVE_MASK;
6060 				drv_leds |=
6061 				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6062 
6063 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6064 				new_led_state |=
6065 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6066 			} else {
6067 				/* was disabled, so enable */
6068 				drv_leds &= ~TR_ACTIVE_MASK;
6069 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6070 
6071 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6072 				new_led_state |=
6073 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6074 			}
6075 
6076 			/*
6077 			 * clear the activity bit
6078 			 * if there is drive activity again within the
6079 			 * loop interval (now 1/16 second), nvs_activity
6080 			 * will be reset and the "connected and active"
6081 			 * condition above will cause the LED to blink
6082 			 * off and on at the loop interval rate.  The
6083 			 * rate may be increased (interval shortened) as
6084 			 * long as it is not more than 1/30 second.
6085 			 */
6086 			mutex_enter(&cmn->nvs_slock);
6087 			cmn->nvs_activity &= ~(1 << i);
6088 			mutex_exit(&cmn->nvs_slock);
6089 		}
6090 
6091 		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6092 
6093 		/* write out LED values */
6094 
6095 		mutex_enter(&cmn->nvs_slock);
6096 		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6097 		cbp->sgpio0_tr |= new_led_state;
6098 		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6099 		mutex_exit(&cmn->nvs_slock);
6100 
6101 		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6102 			NVLOG((NVDBG_VERBOSE, nvc, NULL,
6103 			    "nv_sgp_write_data failure updating active LED"));
6104 		}
6105 
6106 		/* now rest for the interval */
6107 		mutex_enter(&cmn->nvs_tlock);
6108 		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6109 		if (ticks > 0)
6110 			(void) cv_timedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6111 			    ddi_get_lbolt() + ticks);
6112 		mutex_exit(&cmn->nvs_tlock);
6113 	} while (ticks > 0);
6114 }
6115 
6116 /*
6117  * nv_sgp_drive_connect
6118  * Set the flag used to indicate that the drive is attached to the HBA.
6119  * Used to let the taskq know that it should turn the Activity LED on.
6120  */
6121 static void
6122 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6123 {
6124 	nv_sgp_cmn_t *cmn;
6125 
6126 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6127 		return;
6128 	cmn = nvc->nvc_sgp_cmn;
6129 
6130 	mutex_enter(&cmn->nvs_slock);
6131 	cmn->nvs_connected |= (1 << drive);
6132 	mutex_exit(&cmn->nvs_slock);
6133 }
6134 
6135 /*
6136  * nv_sgp_drive_disconnect
6137  * Clears the flag used to indicate that the drive is no longer attached
6138  * to the HBA.  Used to let the taskq know that it should turn the
6139  * Activity LED off.  The flag that indicates that the drive is in use is
6140  * also cleared.
6141  */
6142 static void
6143 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6144 {
6145 	nv_sgp_cmn_t *cmn;
6146 
6147 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6148 		return;
6149 	cmn = nvc->nvc_sgp_cmn;
6150 
6151 	mutex_enter(&cmn->nvs_slock);
6152 	cmn->nvs_connected &= ~(1 << drive);
6153 	cmn->nvs_activity &= ~(1 << drive);
6154 	mutex_exit(&cmn->nvs_slock);
6155 }
6156 
6157 /*
6158  * nv_sgp_drive_active
6159  * Sets the flag used to indicate that the drive has been accessed and the
6160  * LED should be flicked off, then on.  It is cleared at a fixed time
6161  * interval by the LED taskq and set by the sata command start.
6162  */
6163 static void
6164 nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6165 {
6166 	nv_sgp_cmn_t *cmn;
6167 
6168 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6169 		return;
6170 	cmn = nvc->nvc_sgp_cmn;
6171 
6172 	DTRACE_PROBE1(sgpio__active, int, drive);
6173 
6174 	mutex_enter(&cmn->nvs_slock);
6175 	cmn->nvs_connected |= (1 << drive);
6176 	cmn->nvs_activity |= (1 << drive);
6177 	mutex_exit(&cmn->nvs_slock);
6178 }
6179 
6180 
6181 /*
6182  * nv_sgp_locate
6183  * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
6184  * maintained in the SGPIO Command Block.
6185  */
6186 static void
6187 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
6188 {
6189 	uint8_t leds;
6190 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6191 	nv_sgp_cmn_t *cmn;
6192 
6193 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6194 		return;
6195 	cmn = nvc->nvc_sgp_cmn;
6196 
6197 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6198 		return;
6199 
6200 	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
6201 
6202 	mutex_enter(&cmn->nvs_slock);
6203 
6204 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6205 
6206 	leds &= ~TR_LOCATE_MASK;
6207 	leds |= TR_LOCATE_SET(value);
6208 
6209 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6210 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6211 
6212 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6213 
6214 	mutex_exit(&cmn->nvs_slock);
6215 
6216 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6217 		nv_cmn_err(CE_WARN, nvc, NULL,
6218 		    "!nv_sgp_write_data failure updating OK2RM/Locate LED");
6219 	}
6220 }
6221 
6222 /*
6223  * nv_sgp_error
6224  * Turns the Error/Failure LED off or on for a particular drive.  State is
6225  * maintained in the SGPIO Command Block.
6226  */
6227 static void
6228 nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
6229 {
6230 	uint8_t leds;
6231 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6232 	nv_sgp_cmn_t *cmn;
6233 
6234 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6235 		return;
6236 	cmn = nvc->nvc_sgp_cmn;
6237 
6238 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6239 		return;
6240 
6241 	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
6242 
6243 	mutex_enter(&cmn->nvs_slock);
6244 
6245 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6246 
6247 	leds &= ~TR_ERROR_MASK;
6248 	leds |= TR_ERROR_SET(value);
6249 
6250 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6251 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6252 
6253 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6254 
6255 	mutex_exit(&cmn->nvs_slock);
6256 
6257 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6258 		nv_cmn_err(CE_WARN, nvc, NULL,
6259 		    "!nv_sgp_write_data failure updating Fail/Error LED");
6260 	}
6261 }
6262 
6263 static void
6264 nv_sgp_cleanup(nv_ctl_t *nvc)
6265 {
6266 	int drive;
6267 	uint8_t drv_leds;
6268 	uint32_t led_state;
6269 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6270 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6271 	extern void psm_unmap_phys(caddr_t, size_t);
6272 
6273 	/* turn off activity LEDs for this controller */
6274 	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6275 
6276 	/* get the existing LED state */
6277 	led_state = cb->sgpio0_tr;
6278 
6279 	/* turn off port 0 */
6280 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
6281 	led_state &= SGPIO0_TR_DRV_CLR(drive);
6282 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6283 
6284 	/* turn off port 1 */
6285 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
6286 	led_state &= SGPIO0_TR_DRV_CLR(drive);
6287 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6288 
6289 	/* set the new led state, which should turn off this ctrl's LEDs */
6290 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6291 	(void) nv_sgp_write_data(nvc);
6292 
6293 	/* clear the controller's in use bit */
6294 	mutex_enter(&cmn->nvs_slock);
6295 	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
6296 	mutex_exit(&cmn->nvs_slock);
6297 
6298 	if (cmn->nvs_in_use == 0) {
6299 		/* if all "in use" bits cleared, take everything down */
6300 
6301 		if (cmn->nvs_taskq != NULL) {
6302 			/* allow activity taskq to exit */
6303 			cmn->nvs_taskq_delay = 0;
6304 			cv_broadcast(&cmn->nvs_cv);
6305 
6306 			/* then destroy it */
6307 			ddi_taskq_destroy(cmn->nvs_taskq);
6308 		}
6309 
6310 		/* turn off all of the LEDs */
6311 		cb->sgpio0_tr = 0;
6312 		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6313 		(void) nv_sgp_write_data(nvc);
6314 
6315 		cb->sgpio_sr = NULL;
6316 
6317 		/* free resources */
6318 		cv_destroy(&cmn->nvs_cv);
6319 		mutex_destroy(&cmn->nvs_tlock);
6320 		mutex_destroy(&cmn->nvs_slock);
6321 
6322 		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
6323 	}
6324 
6325 	nvc->nvc_sgp_cmn = NULL;
6326 
6327 	/* unmap the SGPIO Command Block */
6328 	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
6329 }
6330 #endif	/* SGPIO_SUPPORT */
6331