1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *
29  * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
30  * based chipsets.
31  *
32  * NCQ
33  * ---
34  *
35  * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
36  * and is likely to be revisited in the future.
37  *
38  *
39  * Power Management
40  * ----------------
41  *
42  * Normally power management would be responsible for ensuring the device
43  * is quiescent and then changing power states to the device, such as
44  * powering down parts or all of the device.  mcp5x/ck804 is unique in
45  * that it is only available as part of a larger southbridge chipset, so
46  * removing power to the device isn't possible.  Switches to control
47  * power management states D0/D3 in the PCI configuration space appear to
48  * be supported but changes to these states are apparently are ignored.
49  * The only further PM that the driver _could_ do is shut down the PHY,
50  * but in order to deliver the first rev of the driver sooner than later,
51  * that will be deferred until some future phase.
52  *
53  * Since the driver currently will not directly change any power state to
54  * the device, no power() entry point will be required.  However, it is
55  * possible that in ACPI power state S3, aka suspend to RAM, that power
56  * can be removed to the device, and the driver cannot rely on BIOS to
57  * have reset any state.  For the time being, there is no known
58  * non-default configurations that need to be programmed.  This judgement
59  * is based on the port of the legacy ata driver not having any such
60  * functionality and based on conversations with the PM team.  If such a
61  * restoration is later deemed necessary it can be incorporated into the
62  * DDI_RESUME processing.
63  *
64  */
65 
66 #include <sys/scsi/scsi.h>
67 #include <sys/pci.h>
68 #include <sys/byteorder.h>
69 #include <sys/sunddi.h>
70 #include <sys/sata/sata_hba.h>
71 #ifdef SGPIO_SUPPORT
72 #include <sys/sata/adapters/nv_sata/nv_sgpio.h>
73 #include <sys/devctl.h>
74 #include <sys/sdt.h>
75 #endif
76 #include <sys/sata/adapters/nv_sata/nv_sata.h>
77 #include <sys/disp.h>
78 #include <sys/note.h>
79 #include <sys/promif.h>
80 
81 
82 /*
83  * Function prototypes for driver entry points
84  */
85 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
86 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
87 static int nv_quiesce(dev_info_t *dip);
88 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
89     void *arg, void **result);
90 
91 /*
92  * Function prototypes for entry points from sata service module
93  * These functions are distinguished from other local functions
94  * by the prefix "nv_sata_"
95  */
96 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
97 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
98 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
99 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
100 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
101 
102 /*
103  * Local function prototypes
104  */
105 static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
106 static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
107 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
108 #ifdef NV_MSI_SUPPORTED
109 static int nv_add_msi_intrs(nv_ctl_t *nvc);
110 #endif
111 static void nv_rem_intrs(nv_ctl_t *nvc);
112 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
113 static int nv_start_nodata(nv_port_t *nvp, int slot);
114 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
115 static int nv_start_pio_in(nv_port_t *nvp, int slot);
116 static int nv_start_pio_out(nv_port_t *nvp, int slot);
117 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
118 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
119 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
120 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
121 static int nv_start_dma(nv_port_t *nvp, int slot);
122 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
123 static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
124 static void nv_uninit_ctl(nv_ctl_t *nvc);
125 static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
126 static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
127 static void nv_uninit_port(nv_port_t *nvp);
128 static int nv_init_port(nv_port_t *nvp);
129 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
130 static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131 #ifdef NCQ
132 static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
133 #endif
134 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
135 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
136     int state);
137 static void nv_common_reg_init(nv_ctl_t *nvc);
138 static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
139 static void nv_reset(nv_port_t *nvp);
140 static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
141 static void nv_timeout(void *);
142 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
143 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
144 static void nv_read_signature(nv_port_t *nvp);
145 static void mcp5x_set_intr(nv_port_t *nvp, int flag);
146 static void ck804_set_intr(nv_port_t *nvp, int flag);
147 static void nv_resume(nv_port_t *nvp);
148 static void nv_suspend(nv_port_t *nvp);
149 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
150 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
151     int flag);
152 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
153     sata_pkt_t *spkt);
154 static void nv_report_add_remove(nv_port_t *nvp, int flags);
155 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
156 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
157     uchar_t failure_onbits2, uchar_t failure_offbits2,
158     uchar_t failure_onbits3, uchar_t failure_offbits3,
159     uint_t timeout_usec, int type_wait);
160 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
161     uint_t timeout_usec, int type_wait);
162 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
163 static void nv_init_port_link_processing(nv_ctl_t *nvc);
164 static void nv_setup_timeout(nv_port_t *nvp, int time);
165 static void nv_monitor_reset(nv_port_t *nvp);
166 static int nv_bm_status_clear(nv_port_t *nvp);
167 
168 #ifdef SGPIO_SUPPORT
169 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
170 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
171 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
172     cred_t *credp, int *rvalp);
173 
174 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
175 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
176     uint32_t *cbpp);
177 static int nv_sgp_init(nv_ctl_t *nvc);
178 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
179 static int nv_sgp_csr_read(nv_ctl_t *nvc);
180 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
181 static int nv_sgp_write_data(nv_ctl_t *nvc);
182 static void nv_sgp_activity_led_ctl(void *arg);
183 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
184 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
185 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
186 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
187 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
188 static void nv_sgp_cleanup(nv_ctl_t *nvc);
189 #endif
190 
191 
192 /*
193  * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
194  * Verify if needed if ported to other ISA.
195  */
196 static ddi_dma_attr_t buffer_dma_attr = {
197 	DMA_ATTR_V0,		/* dma_attr_version */
198 	0,			/* dma_attr_addr_lo: lowest bus address */
199 	0xffffffffull,		/* dma_attr_addr_hi: */
200 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
201 	4,			/* dma_attr_align */
202 	1,			/* dma_attr_burstsizes. */
203 	1,			/* dma_attr_minxfer */
204 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
205 	0xffffffffull,		/* dma_attr_seg */
206 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
207 	512,			/* dma_attr_granular */
208 	0,			/* dma_attr_flags */
209 };
210 static ddi_dma_attr_t buffer_dma_40bit_attr = {
211 	DMA_ATTR_V0,		/* dma_attr_version */
212 	0,			/* dma_attr_addr_lo: lowest bus address */
213 	0xffffffffffull,	/* dma_attr_addr_hi: */
214 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
215 	4,			/* dma_attr_align */
216 	1,			/* dma_attr_burstsizes. */
217 	1,			/* dma_attr_minxfer */
218 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
219 	0xffffffffull,		/* dma_attr_seg */
220 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
221 	512,			/* dma_attr_granular */
222 	0,			/* dma_attr_flags */
223 };
224 
225 
226 /*
227  * DMA attributes for PRD tables
228  */
229 ddi_dma_attr_t nv_prd_dma_attr = {
230 	DMA_ATTR_V0,		/* dma_attr_version */
231 	0,			/* dma_attr_addr_lo */
232 	0xffffffffull,		/* dma_attr_addr_hi */
233 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
234 	4,			/* dma_attr_align */
235 	1,			/* dma_attr_burstsizes */
236 	1,			/* dma_attr_minxfer */
237 	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
238 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
239 	1,			/* dma_attr_sgllen */
240 	1,			/* dma_attr_granular */
241 	0			/* dma_attr_flags */
242 };
243 
244 /*
245  * Device access attributes
246  */
247 static ddi_device_acc_attr_t accattr = {
248     DDI_DEVICE_ATTR_V0,
249     DDI_STRUCTURE_LE_ACC,
250     DDI_STRICTORDER_ACC
251 };
252 
253 
254 #ifdef SGPIO_SUPPORT
255 static struct cb_ops nv_cb_ops = {
256 	nv_open,		/* open */
257 	nv_close,		/* close */
258 	nodev,			/* strategy (block) */
259 	nodev,			/* print (block) */
260 	nodev,			/* dump (block) */
261 	nodev,			/* read */
262 	nodev,			/* write */
263 	nv_ioctl,		/* ioctl */
264 	nodev,			/* devmap */
265 	nodev,			/* mmap */
266 	nodev,			/* segmap */
267 	nochpoll,		/* chpoll */
268 	ddi_prop_op,		/* prop_op */
269 	NULL,			/* streams */
270 	D_NEW | D_MP |
271 	D_64BIT | D_HOTPLUG,	/* flags */
272 	CB_REV			/* rev */
273 };
274 #endif  /* SGPIO_SUPPORT */
275 
276 
277 static struct dev_ops nv_dev_ops = {
278 	DEVO_REV,		/* devo_rev */
279 	0,			/* refcnt  */
280 	nv_getinfo,		/* info */
281 	nulldev,		/* identify */
282 	nulldev,		/* probe */
283 	nv_attach,		/* attach */
284 	nv_detach,		/* detach */
285 	nodev,			/* no reset */
286 #ifdef SGPIO_SUPPORT
287 	&nv_cb_ops,		/* driver operations */
288 #else
289 	(struct cb_ops *)0,	/* driver operations */
290 #endif
291 	NULL,			/* bus operations */
292 	NULL,			/* power */
293 	nv_quiesce		/* quiesce */
294 };
295 
296 
297 /*
298  * Request Sense CDB for ATAPI
299  */
300 static const uint8_t nv_rqsense_cdb[16] = {
301 	SCMD_REQUEST_SENSE,
302 	0,
303 	0,
304 	0,
305 	SATA_ATAPI_MIN_RQSENSE_LEN,
306 	0,
307 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
308 };
309 
310 
311 static sata_tran_hotplug_ops_t nv_hotplug_ops;
312 
313 extern struct mod_ops mod_driverops;
314 
315 static  struct modldrv modldrv = {
316 	&mod_driverops,	/* driverops */
317 	"Nvidia ck804/mcp51/mcp55 HBA",
318 	&nv_dev_ops,	/* driver ops */
319 };
320 
321 static  struct modlinkage modlinkage = {
322 	MODREV_1,
323 	&modldrv,
324 	NULL
325 };
326 
327 
328 /*
329  * Wait for a signature.
330  * If this variable is non-zero, the driver will wait for a device signature
331  * before reporting a device reset to the sata module.
332  * Some (most?) drives will not process commands sent to them before D2H FIS
333  * is sent to a host.
334  */
335 int nv_wait_for_signature = 1;
336 
337 /*
338  * Check for a signature availability.
339  * If this variable is non-zero, the driver will check task file error register
340  * for indication of a signature availability before reading a signature.
341  * Task file error register bit 0 set to 1 indicates that the drive
342  * is ready and it has sent the D2H FIS with a signature.
343  * This behavior of the error register is not reliable in the mcp5x controller.
344  */
345 int nv_check_tfr_error = 0;
346 
347 /*
348  * Max signature acquisition time, in milliseconds.
349  * The driver will try to acquire a device signature within specified time and
350  * quit acquisition operation if signature was not acquired.
351  */
352 long nv_sig_acquisition_time = NV_SIG_ACQUISITION_TIME;
353 
354 /*
355  * If this variable is non-zero, the driver will wait for a signature in the
356  * nv_monitor_reset function without any time limit.
357  * Used for debugging and drive evaluation.
358  */
359 int nv_wait_here_forever = 0;
360 
361 /*
362  * Reset after hotplug.
363  * If this variable is non-zero, driver will reset device after hotplug
364  * (device attached) interrupt.
365  * If the variable is zero, driver will not reset the new device nor will it
366  * try to read device signature.
367  * Chipset is generating a hotplug (device attached) interrupt with a delay, so
368  * the device should have already sent the D2H FIS with the signature.
369  */
370 int nv_reset_after_hotplug = 1;
371 
372 /*
373  * Delay after device hotplug.
374  * It specifies the time between detecting a hotplugged device and sending
375  * a notification to the SATA module.
376  * It is used when device is not reset after hotpugging and acquiring signature
377  * may be unreliable. The delay should be long enough for a device to become
378  * ready to accept commands.
379  */
380 int nv_hotplug_delay = NV_HOTPLUG_DELAY;
381 
382 
383 /*
384  * Maximum number of consecutive interrupts processed in the loop in the
385  * single invocation of the port interrupt routine.
386  */
387 int nv_max_intr_loops = NV_MAX_INTR_PER_DEV;
388 
389 
390 
391 /*
392  * wait between checks of reg status
393  */
394 int nv_usec_delay = NV_WAIT_REG_CHECK;
395 
396 /*
397  * The following is needed for nv_vcmn_err()
398  */
399 static kmutex_t nv_log_mutex; /* protects nv_log_buf */
400 static char nv_log_buf[NV_STRING_512];
401 int nv_debug_flags = NVDBG_ALWAYS;
402 int nv_log_to_console = B_FALSE;
403 
404 int nv_log_delay = 0;
405 int nv_prom_print = B_FALSE;
406 
407 /*
408  * for debugging
409  */
410 #ifdef DEBUG
411 int ncq_commands = 0;
412 int non_ncq_commands = 0;
413 #endif
414 
415 /*
416  * Opaque state pointer to be initialized by ddi_soft_state_init()
417  */
418 static void *nv_statep	= NULL;
419 
420 /*
421  * Map from CBP to shared space
422  *
423  * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
424  * Control Block Pointer as well as the corresponding Control Block) that
425  * is shared across all driver instances associated with that part.  The
426  * Control Block is used to update and query the LED state for the devices
427  * on the controllers associated with those instances.  There is also some
428  * driver state (called the 'common' area here) associated with each SGPIO
429  * Control Block.  The nv_sgp_cpb2cmn is used to map a given CBP to its
430  * control area.
431  *
432  * The driver can also use this mapping array to determine whether the
433  * common area for a given CBP has been initialized, and, if it isn't
434  * initialized, initialize it.
435  *
436  * When a driver instance with a CBP value that is already in the array is
437  * initialized, it will use the pointer to the previously initialized common
438  * area associated with that SGPIO CBP value, rather than initialize it
439  * itself.
440  *
441  * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
442  */
443 #ifdef SGPIO_SUPPORT
444 static kmutex_t nv_sgp_c2c_mutex;
445 static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
446 #endif
447 
448 /* We still have problems in 40-bit DMA support, so disable it by default */
449 int nv_sata_40bit_dma = B_FALSE;
450 
451 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
452 	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
453 	nv_sata_activate,	/* activate port. cfgadm -c connect */
454 	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
455 };
456 
457 
458 /*
459  *  nv module initialization
460  */
461 int
462 _init(void)
463 {
464 	int	error;
465 #ifdef SGPIO_SUPPORT
466 	int	i;
467 #endif
468 
469 	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
470 
471 	if (error != 0) {
472 
473 		return (error);
474 	}
475 
476 	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
477 #ifdef SGPIO_SUPPORT
478 	mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
479 
480 	for (i = 0; i < NV_MAX_CBPS; i++) {
481 		nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
482 		nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
483 	}
484 #endif
485 
486 	if ((error = sata_hba_init(&modlinkage)) != 0) {
487 		ddi_soft_state_fini(&nv_statep);
488 		mutex_destroy(&nv_log_mutex);
489 
490 		return (error);
491 	}
492 
493 	error = mod_install(&modlinkage);
494 	if (error != 0) {
495 		sata_hba_fini(&modlinkage);
496 		ddi_soft_state_fini(&nv_statep);
497 		mutex_destroy(&nv_log_mutex);
498 
499 		return (error);
500 	}
501 
502 	return (error);
503 }
504 
505 
506 /*
507  * nv module uninitialize
508  */
509 int
510 _fini(void)
511 {
512 	int	error;
513 
514 	error = mod_remove(&modlinkage);
515 
516 	if (error != 0) {
517 		return (error);
518 	}
519 
520 	/*
521 	 * remove the resources allocated in _init()
522 	 */
523 	mutex_destroy(&nv_log_mutex);
524 #ifdef SGPIO_SUPPORT
525 	mutex_destroy(&nv_sgp_c2c_mutex);
526 #endif
527 	sata_hba_fini(&modlinkage);
528 	ddi_soft_state_fini(&nv_statep);
529 
530 	return (error);
531 }
532 
533 
534 /*
535  * nv _info entry point
536  */
537 int
538 _info(struct modinfo *modinfop)
539 {
540 	return (mod_info(&modlinkage, modinfop));
541 }
542 
543 
544 /*
545  * these wrappers for ddi_{get,put}8 are for observability
546  * with dtrace
547  */
548 #ifdef DEBUG
549 
550 static void
551 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
552 {
553 	ddi_put8(handle, dev_addr, value);
554 }
555 
556 static void
557 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
558 {
559 	ddi_put32(handle, dev_addr, value);
560 }
561 
562 static uint32_t
563 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
564 {
565 	return (ddi_get32(handle, dev_addr));
566 }
567 
568 static void
569 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
570 {
571 	ddi_put16(handle, dev_addr, value);
572 }
573 
574 static uint16_t
575 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
576 {
577 	return (ddi_get16(handle, dev_addr));
578 }
579 
580 static uint8_t
581 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
582 {
583 	return (ddi_get8(handle, dev_addr));
584 }
585 
586 #else
587 
588 #define	nv_put8 ddi_put8
589 #define	nv_put32 ddi_put32
590 #define	nv_get32 ddi_get32
591 #define	nv_put16 ddi_put16
592 #define	nv_get16 ddi_get16
593 #define	nv_get8 ddi_get8
594 
595 #endif
596 
597 
598 /*
599  * Driver attach
600  */
601 static int
602 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
603 {
604 	int status, attach_state, intr_types, bar, i, command;
605 	int inst = ddi_get_instance(dip);
606 	ddi_acc_handle_t pci_conf_handle;
607 	nv_ctl_t *nvc;
608 	uint8_t subclass;
609 	uint32_t reg32;
610 #ifdef SGPIO_SUPPORT
611 	pci_regspec_t *regs;
612 	int rlen;
613 #endif
614 
615 	switch (cmd) {
616 
617 	case DDI_ATTACH:
618 
619 		NVLOG((NVDBG_INIT, NULL, NULL,
620 		    "nv_attach(): DDI_ATTACH inst %d", inst));
621 
622 		attach_state = ATTACH_PROGRESS_NONE;
623 
624 		status = ddi_soft_state_zalloc(nv_statep, inst);
625 
626 		if (status != DDI_SUCCESS) {
627 			break;
628 		}
629 
630 		nvc = ddi_get_soft_state(nv_statep, inst);
631 
632 		nvc->nvc_dip = dip;
633 
634 		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
635 
636 		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
637 			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
638 			    PCI_CONF_REVID);
639 			NVLOG((NVDBG_INIT, NULL, NULL,
640 			    "inst %d: silicon revid is %x nv_debug_flags=%x",
641 			    inst, nvc->nvc_revid, nv_debug_flags));
642 		} else {
643 			break;
644 		}
645 
646 		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
647 
648 		/*
649 		 * Set the PCI command register: enable IO/MEM/Master.
650 		 */
651 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
652 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
653 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
654 
655 		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
656 
657 		if (subclass & PCI_MASS_RAID) {
658 			cmn_err(CE_WARN,
659 			    "attach failed: RAID mode not supported");
660 			break;
661 		}
662 
663 		/*
664 		 * the 6 bars of the controller are:
665 		 * 0: port 0 task file
666 		 * 1: port 0 status
667 		 * 2: port 1 task file
668 		 * 3: port 1 status
669 		 * 4: bus master for both ports
670 		 * 5: extended registers for SATA features
671 		 */
672 		for (bar = 0; bar < 6; bar++) {
673 			status = ddi_regs_map_setup(dip, bar + 1,
674 			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
675 			    &nvc->nvc_bar_hdl[bar]);
676 
677 			if (status != DDI_SUCCESS) {
678 				NVLOG((NVDBG_INIT, nvc, NULL,
679 				    "ddi_regs_map_setup failure for bar"
680 				    " %d status = %d", bar, status));
681 				break;
682 			}
683 		}
684 
685 		attach_state |= ATTACH_PROGRESS_BARS;
686 
687 		/*
688 		 * initialize controller structures
689 		 */
690 		status = nv_init_ctl(nvc, pci_conf_handle);
691 
692 		if (status == NV_FAILURE) {
693 			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
694 
695 			break;
696 		}
697 
698 		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
699 
700 		/*
701 		 * initialize mutexes
702 		 */
703 		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
704 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
705 
706 		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
707 
708 		/*
709 		 * get supported interrupt types
710 		 */
711 		if (ddi_intr_get_supported_types(dip, &intr_types) !=
712 		    DDI_SUCCESS) {
713 			nv_cmn_err(CE_WARN, nvc, NULL,
714 			    "!ddi_intr_get_supported_types failed");
715 			NVLOG((NVDBG_INIT, nvc, NULL,
716 			    "interrupt supported types failed"));
717 
718 			break;
719 		}
720 
721 		NVLOG((NVDBG_INIT, nvc, NULL,
722 		    "ddi_intr_get_supported_types() returned: 0x%x",
723 		    intr_types));
724 
725 #ifdef NV_MSI_SUPPORTED
726 		if (intr_types & DDI_INTR_TYPE_MSI) {
727 			NVLOG((NVDBG_INIT, nvc, NULL,
728 			    "using MSI interrupt type"));
729 
730 			/*
731 			 * Try MSI first, but fall back to legacy if MSI
732 			 * attach fails
733 			 */
734 			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
735 				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
736 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
737 				NVLOG((NVDBG_INIT, nvc, NULL,
738 				    "MSI interrupt setup done"));
739 			} else {
740 				nv_cmn_err(CE_CONT, nvc, NULL,
741 				    "!MSI registration failed "
742 				    "will try Legacy interrupts");
743 			}
744 		}
745 #endif
746 
747 		/*
748 		 * Either the MSI interrupt setup has failed or only
749 		 * the fixed interrupts are available on the system.
750 		 */
751 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
752 		    (intr_types & DDI_INTR_TYPE_FIXED)) {
753 
754 			NVLOG((NVDBG_INIT, nvc, NULL,
755 			    "using Legacy interrupt type"));
756 
757 			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
758 				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
759 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
760 				NVLOG((NVDBG_INIT, nvc, NULL,
761 				    "Legacy interrupt setup done"));
762 			} else {
763 				nv_cmn_err(CE_WARN, nvc, NULL,
764 				    "!legacy interrupt setup failed");
765 				NVLOG((NVDBG_INIT, nvc, NULL,
766 				    "legacy interrupt setup failed"));
767 				break;
768 			}
769 		}
770 
771 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
772 			NVLOG((NVDBG_INIT, nvc, NULL,
773 			    "no interrupts registered"));
774 			break;
775 		}
776 
777 #ifdef SGPIO_SUPPORT
778 		/*
779 		 * save off the controller number
780 		 */
781 		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
782 		    "reg", (caddr_t)&regs, &rlen);
783 		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
784 		kmem_free(regs, rlen);
785 
786 		/*
787 		 * initialize SGPIO
788 		 */
789 		nv_sgp_led_init(nvc, pci_conf_handle);
790 #endif	/* SGPIO_SUPPORT */
791 
792 		/*
793 		 * Initiate link processing and device identification
794 		 */
795 		nv_init_port_link_processing(nvc);
796 		/*
797 		 * attach to sata module
798 		 */
799 		if (sata_hba_attach(nvc->nvc_dip,
800 		    &nvc->nvc_sata_hba_tran,
801 		    DDI_ATTACH) != DDI_SUCCESS) {
802 			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
803 
804 			break;
805 		}
806 
807 		pci_config_teardown(&pci_conf_handle);
808 
809 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
810 
811 		return (DDI_SUCCESS);
812 
813 	case DDI_RESUME:
814 
815 		nvc = ddi_get_soft_state(nv_statep, inst);
816 
817 		NVLOG((NVDBG_INIT, nvc, NULL,
818 		    "nv_attach(): DDI_RESUME inst %d", inst));
819 
820 		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
821 			return (DDI_FAILURE);
822 		}
823 
824 		/*
825 		 * Set the PCI command register: enable IO/MEM/Master.
826 		 */
827 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
828 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
829 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
830 
831 		/*
832 		 * Need to set bit 2 to 1 at config offset 0x50
833 		 * to enable access to the bar5 registers.
834 		 */
835 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
836 
837 		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
838 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
839 			    reg32 | NV_BAR5_SPACE_EN);
840 		}
841 
842 		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
843 
844 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
845 			nv_resume(&(nvc->nvc_port[i]));
846 		}
847 
848 		pci_config_teardown(&pci_conf_handle);
849 
850 		return (DDI_SUCCESS);
851 
852 	default:
853 		return (DDI_FAILURE);
854 	}
855 
856 
857 	/*
858 	 * DDI_ATTACH failure path starts here
859 	 */
860 
861 	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
862 		nv_rem_intrs(nvc);
863 	}
864 
865 	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
866 		/*
867 		 * Remove timers
868 		 */
869 		int port = 0;
870 		nv_port_t *nvp;
871 
872 		for (; port < NV_MAX_PORTS(nvc); port++) {
873 			nvp = &(nvc->nvc_port[port]);
874 			if (nvp->nvp_timeout_id != 0) {
875 				(void) untimeout(nvp->nvp_timeout_id);
876 			}
877 		}
878 	}
879 
880 	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
881 		mutex_destroy(&nvc->nvc_mutex);
882 	}
883 
884 	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
885 		nv_uninit_ctl(nvc);
886 	}
887 
888 	if (attach_state & ATTACH_PROGRESS_BARS) {
889 		while (--bar >= 0) {
890 			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
891 		}
892 	}
893 
894 	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
895 		ddi_soft_state_free(nv_statep, inst);
896 	}
897 
898 	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
899 		pci_config_teardown(&pci_conf_handle);
900 	}
901 
902 	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
903 
904 	return (DDI_FAILURE);
905 }
906 
907 
908 static int
909 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
910 {
911 	int i, port, inst = ddi_get_instance(dip);
912 	nv_ctl_t *nvc;
913 	nv_port_t *nvp;
914 
915 	nvc = ddi_get_soft_state(nv_statep, inst);
916 
917 	switch (cmd) {
918 
919 	case DDI_DETACH:
920 
921 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
922 
923 		/*
924 		 * Remove interrupts
925 		 */
926 		nv_rem_intrs(nvc);
927 
928 		/*
929 		 * Remove timers
930 		 */
931 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
932 			nvp = &(nvc->nvc_port[port]);
933 			if (nvp->nvp_timeout_id != 0) {
934 				(void) untimeout(nvp->nvp_timeout_id);
935 			}
936 		}
937 
938 		/*
939 		 * Remove maps
940 		 */
941 		for (i = 0; i < 6; i++) {
942 			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
943 		}
944 
945 		/*
946 		 * Destroy mutexes
947 		 */
948 		mutex_destroy(&nvc->nvc_mutex);
949 
950 		/*
951 		 * Uninitialize the controller structures
952 		 */
953 		nv_uninit_ctl(nvc);
954 
955 #ifdef SGPIO_SUPPORT
956 		/*
957 		 * release SGPIO resources
958 		 */
959 		nv_sgp_cleanup(nvc);
960 #endif
961 
962 		/*
963 		 * unregister from the sata module
964 		 */
965 		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
966 
967 		/*
968 		 * Free soft state
969 		 */
970 		ddi_soft_state_free(nv_statep, inst);
971 
972 		return (DDI_SUCCESS);
973 
974 	case DDI_SUSPEND:
975 
976 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
977 
978 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
979 			nv_suspend(&(nvc->nvc_port[i]));
980 		}
981 
982 		nvc->nvc_state |= NV_CTRL_SUSPEND;
983 
984 		return (DDI_SUCCESS);
985 
986 	default:
987 		return (DDI_FAILURE);
988 	}
989 }
990 
991 
992 /*ARGSUSED*/
993 static int
994 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
995 {
996 	nv_ctl_t *nvc;
997 	int instance;
998 	dev_t dev;
999 
1000 	dev = (dev_t)arg;
1001 	instance = getminor(dev);
1002 
1003 	switch (infocmd) {
1004 	case DDI_INFO_DEVT2DEVINFO:
1005 		nvc = ddi_get_soft_state(nv_statep,  instance);
1006 		if (nvc != NULL) {
1007 			*result = nvc->nvc_dip;
1008 			return (DDI_SUCCESS);
1009 		} else {
1010 			*result = NULL;
1011 			return (DDI_FAILURE);
1012 		}
1013 	case DDI_INFO_DEVT2INSTANCE:
1014 		*(int *)result = instance;
1015 		break;
1016 	default:
1017 		break;
1018 	}
1019 	return (DDI_SUCCESS);
1020 }
1021 
1022 
1023 #ifdef SGPIO_SUPPORT
1024 /* ARGSUSED */
1025 static int
1026 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1027 {
1028 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
1029 
1030 	if (nvc == NULL) {
1031 		return (ENXIO);
1032 	}
1033 
1034 	return (0);
1035 }
1036 
1037 
1038 /* ARGSUSED */
1039 static int
1040 nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
1041 {
1042 	return (0);
1043 }
1044 
1045 
1046 /* ARGSUSED */
1047 static int
1048 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1049 {
1050 	nv_ctl_t *nvc;
1051 	int inst;
1052 	int status;
1053 	int ctlr, port;
1054 	int drive;
1055 	uint8_t curr_led;
1056 	struct dc_led_ctl led;
1057 
1058 	inst = getminor(dev);
1059 	if (inst == -1) {
1060 		return (EBADF);
1061 	}
1062 
1063 	nvc = ddi_get_soft_state(nv_statep, inst);
1064 	if (nvc == NULL) {
1065 		return (EBADF);
1066 	}
1067 
1068 	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
1069 		return (EIO);
1070 	}
1071 
1072 	switch (cmd) {
1073 	case DEVCTL_SET_LED:
1074 		status = ddi_copyin((void *)arg, &led,
1075 		    sizeof (struct dc_led_ctl), mode);
1076 		if (status != 0)
1077 			return (EFAULT);
1078 
1079 		/*
1080 		 * Since only the first two controller currently support
1081 		 * SGPIO (as per NVIDIA docs), this code will as well.
1082 		 * Note that this validate the port value within led_state
1083 		 * as well.
1084 		 */
1085 
1086 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1087 		if ((ctlr != 0) && (ctlr != 1))
1088 			return (ENXIO);
1089 
1090 		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1091 		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
1092 			return (EINVAL);
1093 		}
1094 
1095 		drive = led.led_number;
1096 
1097 		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1098 		    (led.led_state == DCL_STATE_OFF)) {
1099 
1100 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1101 				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1102 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1103 				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1104 			} else {
1105 				return (ENXIO);
1106 			}
1107 
1108 			port = SGP_DRV_TO_PORT(led.led_number);
1109 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1110 		}
1111 
1112 		if (led.led_ctl_active == DCL_CNTRL_ON) {
1113 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1114 				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1115 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1116 				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1117 			} else {
1118 				return (ENXIO);
1119 			}
1120 
1121 			port = SGP_DRV_TO_PORT(led.led_number);
1122 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1123 		}
1124 
1125 		break;
1126 
1127 	case DEVCTL_GET_LED:
1128 		status = ddi_copyin((void *)arg, &led,
1129 		    sizeof (struct dc_led_ctl), mode);
1130 		if (status != 0)
1131 			return (EFAULT);
1132 
1133 		/*
1134 		 * Since only the first two controller currently support
1135 		 * SGPIO (as per NVIDIA docs), this code will as well.
1136 		 * Note that this validate the port value within led_state
1137 		 * as well.
1138 		 */
1139 
1140 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1141 		if ((ctlr != 0) && (ctlr != 1))
1142 			return (ENXIO);
1143 
1144 		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1145 		    led.led_number);
1146 
1147 		port = SGP_DRV_TO_PORT(led.led_number);
1148 		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1149 			led.led_ctl_active = DCL_CNTRL_ON;
1150 
1151 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1152 				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1153 					led.led_state = DCL_STATE_OFF;
1154 				else
1155 					led.led_state = DCL_STATE_ON;
1156 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1157 				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1158 					led.led_state = DCL_STATE_OFF;
1159 				else
1160 					led.led_state = DCL_STATE_ON;
1161 			} else {
1162 				return (ENXIO);
1163 			}
1164 		} else {
1165 			led.led_ctl_active = DCL_CNTRL_OFF;
1166 			/*
1167 			 * Not really off, but never set and no constant for
1168 			 * tri-state
1169 			 */
1170 			led.led_state = DCL_STATE_OFF;
1171 		}
1172 
1173 		status = ddi_copyout(&led, (void *)arg,
1174 		    sizeof (struct dc_led_ctl), mode);
1175 		if (status != 0)
1176 			return (EFAULT);
1177 
1178 		break;
1179 
1180 	case DEVCTL_NUM_LEDS:
1181 		led.led_number = SGPIO_DRV_CNT_VALUE;
1182 		led.led_ctl_active = 1;
1183 		led.led_type = 3;
1184 
1185 		/*
1186 		 * According to documentation, NVIDIA SGPIO is supposed to
1187 		 * support blinking, but it does not seem to work in practice.
1188 		 */
1189 		led.led_state = DCL_STATE_ON;
1190 
1191 		status = ddi_copyout(&led, (void *)arg,
1192 		    sizeof (struct dc_led_ctl), mode);
1193 		if (status != 0)
1194 			return (EFAULT);
1195 
1196 		break;
1197 
1198 	default:
1199 		return (EINVAL);
1200 	}
1201 
1202 	return (0);
1203 }
1204 #endif	/* SGPIO_SUPPORT */
1205 
1206 
1207 /*
1208  * Called by sata module to probe a port.  Port and device state
1209  * are not changed here... only reported back to the sata module.
1210  *
1211  */
1212 static int
1213 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1214 {
1215 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1216 	uint8_t cport = sd->satadev_addr.cport;
1217 	uint8_t pmport = sd->satadev_addr.pmport;
1218 	uint8_t qual = sd->satadev_addr.qual;
1219 	nv_port_t *nvp;
1220 
1221 	if (cport >= NV_MAX_PORTS(nvc)) {
1222 		sd->satadev_type = SATA_DTYPE_NONE;
1223 		sd->satadev_state = SATA_STATE_UNKNOWN;
1224 
1225 		return (SATA_FAILURE);
1226 	}
1227 
1228 	ASSERT(nvc->nvc_port != NULL);
1229 	nvp = &(nvc->nvc_port[cport]);
1230 	ASSERT(nvp != NULL);
1231 
1232 	NVLOG((NVDBG_RESET, nvc, nvp,
1233 	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1234 	    "qual: 0x%x", cport, pmport, qual));
1235 
1236 	mutex_enter(&nvp->nvp_mutex);
1237 
1238 	/*
1239 	 * This check seems to be done in the SATA module.
1240 	 * It may not be required here
1241 	 */
1242 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1243 		nv_cmn_err(CE_WARN, nvc, nvp,
1244 		    "port inactive.  Use cfgadm to activate");
1245 		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1246 		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1247 		mutex_exit(&nvp->nvp_mutex);
1248 
1249 		return (SATA_SUCCESS);
1250 	}
1251 
1252 	if (nvp->nvp_state & NV_PORT_FAILED) {
1253 		NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
1254 		    "probe: port failed"));
1255 		sd->satadev_type = SATA_DTYPE_NONE;
1256 		sd->satadev_state = SATA_PSTATE_FAILED;
1257 		mutex_exit(&nvp->nvp_mutex);
1258 
1259 		return (SATA_SUCCESS);
1260 	}
1261 
1262 	if (qual == SATA_ADDR_PMPORT) {
1263 		sd->satadev_type = SATA_DTYPE_NONE;
1264 		sd->satadev_state = SATA_STATE_UNKNOWN;
1265 		mutex_exit(&nvp->nvp_mutex);
1266 		nv_cmn_err(CE_WARN, nvc, nvp,
1267 		    "controller does not support port multiplier");
1268 
1269 		return (SATA_SUCCESS);
1270 	}
1271 
1272 	sd->satadev_state = SATA_PSTATE_PWRON;
1273 
1274 	nv_copy_registers(nvp, sd, NULL);
1275 
1276 	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
1277 		/*
1278 		 * We are waiting for reset to complete and to fetch
1279 		 * a signature.
1280 		 * Reset will cause the link to go down for a short period of
1281 		 * time.  If reset processing continues for less than
1282 		 * NV_LINK_DOWN_TIMEOUT, fake the status of the link so that
1283 		 * we will not report intermittent link down.
1284 		 * Maybe we should report previous link state?
1285 		 */
1286 		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) <
1287 		    NV_LINK_DOWN_TIMEOUT) {
1288 			SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1289 			    SSTATUS_IPM_ACTIVE);
1290 			SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1291 			    SSTATUS_DET_DEVPRE_PHYCOM);
1292 			sd->satadev_type = nvp->nvp_type;
1293 			mutex_exit(&nvp->nvp_mutex);
1294 
1295 			return (SATA_SUCCESS);
1296 		}
1297 	}
1298 	/*
1299 	 * Just report the current port state
1300 	 */
1301 	sd->satadev_type = nvp->nvp_type;
1302 	sd->satadev_state = nvp->nvp_state | SATA_PSTATE_PWRON;
1303 	mutex_exit(&nvp->nvp_mutex);
1304 
1305 #ifdef SGPIO_SUPPORT
1306 	if (nvp->nvp_type != SATA_DTYPE_NONE) {
1307 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1308 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1309 	}
1310 #endif
1311 
1312 	return (SATA_SUCCESS);
1313 }
1314 
1315 
1316 /*
1317  * Called by sata module to start a new command.
1318  */
1319 static int
1320 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1321 {
1322 	int cport = spkt->satapkt_device.satadev_addr.cport;
1323 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1324 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1325 	int ret;
1326 
1327 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1328 	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1329 
1330 	mutex_enter(&nvp->nvp_mutex);
1331 
1332 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1333 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1334 		NVLOG((NVDBG_ERRS, nvc, nvp,
1335 		    "nv_sata_start: port not yet initialized"));
1336 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1337 		mutex_exit(&nvp->nvp_mutex);
1338 
1339 		return (SATA_TRAN_PORT_ERROR);
1340 	}
1341 
1342 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1343 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1344 		NVLOG((NVDBG_ERRS, nvc, nvp,
1345 		    "nv_sata_start: NV_PORT_INACTIVE"));
1346 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1347 		mutex_exit(&nvp->nvp_mutex);
1348 
1349 		return (SATA_TRAN_PORT_ERROR);
1350 	}
1351 
1352 	if (nvp->nvp_state & NV_PORT_FAILED) {
1353 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1354 		NVLOG((NVDBG_ERRS, nvc, nvp,
1355 		    "nv_sata_start: NV_PORT_FAILED state"));
1356 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1357 		mutex_exit(&nvp->nvp_mutex);
1358 
1359 		return (SATA_TRAN_PORT_ERROR);
1360 	}
1361 
1362 	if (nvp->nvp_state & NV_PORT_RESET) {
1363 		NVLOG((NVDBG_VERBOSE, nvc, nvp,
1364 		    "still waiting for reset completion"));
1365 		spkt->satapkt_reason = SATA_PKT_BUSY;
1366 		mutex_exit(&nvp->nvp_mutex);
1367 
1368 		/*
1369 		 * If in panic, timeouts do not occur, so fake one
1370 		 * so that the signature can be acquired to complete
1371 		 * the reset handling.
1372 		 */
1373 		if (ddi_in_panic()) {
1374 			nv_timeout(nvp);
1375 		}
1376 
1377 		return (SATA_TRAN_BUSY);
1378 	}
1379 
1380 	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1381 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1382 		NVLOG((NVDBG_ERRS, nvc, nvp,
1383 		    "nv_sata_start: SATA_DTYPE_NONE"));
1384 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1385 		mutex_exit(&nvp->nvp_mutex);
1386 
1387 		return (SATA_TRAN_PORT_ERROR);
1388 	}
1389 
1390 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1391 		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1392 		nv_cmn_err(CE_WARN, nvc, nvp,
1393 		    "port multipliers not supported by controller");
1394 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1395 		mutex_exit(&nvp->nvp_mutex);
1396 
1397 		return (SATA_TRAN_CMD_UNSUPPORTED);
1398 	}
1399 
1400 	/*
1401 	 * after a device reset, and then when sata module restore processing
1402 	 * is complete, the sata module will set sata_clear_dev_reset which
1403 	 * indicates that restore processing has completed and normal
1404 	 * non-restore related commands should be processed.
1405 	 */
1406 	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1407 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1408 		NVLOG((NVDBG_RESET, nvc, nvp,
1409 		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1410 	}
1411 
1412 	/*
1413 	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1414 	 * only allow commands which restore device state.  The sata module
1415 	 * marks such commands with with sata_ignore_dev_reset.
1416 	 *
1417 	 * during coredump, nv_reset is called and but then the restore
1418 	 * doesn't happen.  For now, workaround by ignoring the wait for
1419 	 * restore if the system is panicing.
1420 	 */
1421 	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1422 	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1423 	    (ddi_in_panic() == 0)) {
1424 		spkt->satapkt_reason = SATA_PKT_BUSY;
1425 		NVLOG((NVDBG_VERBOSE, nvc, nvp,
1426 		    "nv_sata_start: waiting for restore "));
1427 		mutex_exit(&nvp->nvp_mutex);
1428 
1429 		return (SATA_TRAN_BUSY);
1430 	}
1431 
1432 	if (nvp->nvp_state & NV_PORT_ABORTING) {
1433 		spkt->satapkt_reason = SATA_PKT_BUSY;
1434 		NVLOG((NVDBG_ERRS, nvc, nvp,
1435 		    "nv_sata_start: NV_PORT_ABORTING"));
1436 		mutex_exit(&nvp->nvp_mutex);
1437 
1438 		return (SATA_TRAN_BUSY);
1439 	}
1440 
1441 	/* Clear SError to be able to check errors after the command failure */
1442 	nv_put32(nvp->nvp_ctlp->nvc_bar_hdl[5], nvp->nvp_serror, 0xffffffff);
1443 
1444 	if (spkt->satapkt_op_mode &
1445 	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1446 
1447 		ret = nv_start_sync(nvp, spkt);
1448 
1449 		mutex_exit(&nvp->nvp_mutex);
1450 
1451 		return (ret);
1452 	}
1453 
1454 	/*
1455 	 * start command asynchronous command
1456 	 */
1457 	ret = nv_start_async(nvp, spkt);
1458 
1459 	mutex_exit(&nvp->nvp_mutex);
1460 
1461 	return (ret);
1462 }
1463 
1464 
1465 /*
1466  * SATA_OPMODE_POLLING implies the driver is in a
1467  * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1468  * If only SATA_OPMODE_SYNCH is set, the driver can use
1469  * interrupts and sleep wait on a cv.
1470  *
1471  * If SATA_OPMODE_POLLING is set, the driver can't use
1472  * interrupts and must busy wait and simulate the
1473  * interrupts by waiting for BSY to be cleared.
1474  *
1475  * Synchronous mode has to return BUSY if there are
1476  * any other commands already on the drive.
1477  */
1478 static int
1479 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1480 {
1481 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1482 	int ret;
1483 
1484 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1485 
1486 	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1487 		spkt->satapkt_reason = SATA_PKT_BUSY;
1488 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1489 		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1490 		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1491 		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1492 		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1493 
1494 		return (SATA_TRAN_BUSY);
1495 	}
1496 
1497 	/*
1498 	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1499 	 */
1500 	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1501 	    servicing_interrupt()) {
1502 		spkt->satapkt_reason = SATA_PKT_BUSY;
1503 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1504 		    "SYNC mode not allowed during interrupt"));
1505 
1506 		return (SATA_TRAN_BUSY);
1507 
1508 	}
1509 
1510 	/*
1511 	 * disable interrupt generation if in polled mode
1512 	 */
1513 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1514 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1515 	}
1516 
1517 	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1518 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1519 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1520 		}
1521 
1522 		return (ret);
1523 	}
1524 
1525 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1526 		mutex_exit(&nvp->nvp_mutex);
1527 		ret = nv_poll_wait(nvp, spkt);
1528 		mutex_enter(&nvp->nvp_mutex);
1529 
1530 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1531 
1532 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1533 		    " done % reason %d", ret));
1534 
1535 		return (ret);
1536 	}
1537 
1538 	/*
1539 	 * non-polling synchronous mode handling.  The interrupt will signal
1540 	 * when the IO is completed.
1541 	 */
1542 	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1543 
1544 	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1545 
1546 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1547 	}
1548 
1549 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1550 	    " done % reason %d", spkt->satapkt_reason));
1551 
1552 	return (SATA_TRAN_ACCEPTED);
1553 }
1554 
1555 
1556 static int
1557 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1558 {
1559 	int ret;
1560 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1561 #if ! defined(__lock_lint)
1562 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1563 #endif
1564 
1565 	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1566 
1567 	for (;;) {
1568 
1569 		NV_DELAY_NSEC(400);
1570 
1571 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1572 		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1573 		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1574 			mutex_enter(&nvp->nvp_mutex);
1575 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1576 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1577 			nvp->nvp_state |= NV_PORT_RESET;
1578 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1579 			    NV_PORT_RESET_RETRY);
1580 			nv_reset(nvp);
1581 			nv_complete_io(nvp, spkt, 0);
1582 			mutex_exit(&nvp->nvp_mutex);
1583 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1584 			    "SATA_STATUS_BSY"));
1585 
1586 			return (SATA_TRAN_ACCEPTED);
1587 		}
1588 
1589 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1590 
1591 		/*
1592 		 * Simulate interrupt.
1593 		 */
1594 		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1595 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1596 
1597 		if (ret != DDI_INTR_CLAIMED) {
1598 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1599 			    " unclaimed -- resetting"));
1600 			mutex_enter(&nvp->nvp_mutex);
1601 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1602 			nvp->nvp_state |= NV_PORT_RESET;
1603 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1604 			    NV_PORT_RESET_RETRY);
1605 			nv_reset(nvp);
1606 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1607 			nv_complete_io(nvp, spkt, 0);
1608 			mutex_exit(&nvp->nvp_mutex);
1609 
1610 			return (SATA_TRAN_ACCEPTED);
1611 		}
1612 
1613 #if ! defined(__lock_lint)
1614 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1615 			/*
1616 			 * packet is complete
1617 			 */
1618 			return (SATA_TRAN_ACCEPTED);
1619 		}
1620 #endif
1621 	}
1622 	/*NOTREACHED*/
1623 }
1624 
1625 
1626 /*
1627  * Called by sata module to abort outstanding packets.
1628  */
1629 /*ARGSUSED*/
1630 static int
1631 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1632 {
1633 	int cport = spkt->satapkt_device.satadev_addr.cport;
1634 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1635 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1636 	int c_a, ret;
1637 
1638 	ASSERT(cport < NV_MAX_PORTS(nvc));
1639 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1640 
1641 	mutex_enter(&nvp->nvp_mutex);
1642 
1643 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1644 		mutex_exit(&nvp->nvp_mutex);
1645 		nv_cmn_err(CE_WARN, nvc, nvp,
1646 		    "abort request failed: port inactive");
1647 
1648 		return (SATA_FAILURE);
1649 	}
1650 
1651 	/*
1652 	 * spkt == NULL then abort all commands
1653 	 */
1654 	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED, B_TRUE);
1655 
1656 	if (c_a) {
1657 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1658 		    "packets aborted running=%d", c_a));
1659 		ret = SATA_SUCCESS;
1660 	} else {
1661 		if (spkt == NULL) {
1662 			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1663 		} else {
1664 			NVLOG((NVDBG_ENTRY, nvc, nvp,
1665 			    "can't find spkt to abort"));
1666 		}
1667 		ret = SATA_FAILURE;
1668 	}
1669 
1670 	mutex_exit(&nvp->nvp_mutex);
1671 
1672 	return (ret);
1673 }
1674 
1675 
1676 /*
1677  * if spkt == NULL abort all pkts running, otherwise
1678  * abort the requested packet.  must be called with nv_mutex
1679  * held and returns with it held.  Not NCQ aware.
1680  */
1681 static int
1682 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason, int flag)
1683 {
1684 	int aborted = 0, i, reset_once = B_FALSE;
1685 	struct nv_slot *nv_slotp;
1686 	sata_pkt_t *spkt_slot;
1687 
1688 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1689 
1690 	/*
1691 	 * return if the port is not configured
1692 	 */
1693 	if (nvp->nvp_slot == NULL) {
1694 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1695 		    "nv_abort_active: not configured so returning"));
1696 
1697 		return (0);
1698 	}
1699 
1700 	NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1701 
1702 	nvp->nvp_state |= NV_PORT_ABORTING;
1703 
1704 	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1705 
1706 		nv_slotp = &(nvp->nvp_slot[i]);
1707 		spkt_slot = nv_slotp->nvslot_spkt;
1708 
1709 		/*
1710 		 * skip if not active command in slot
1711 		 */
1712 		if (spkt_slot == NULL) {
1713 			continue;
1714 		}
1715 
1716 		/*
1717 		 * if a specific packet was requested, skip if
1718 		 * this is not a match
1719 		 */
1720 		if ((spkt != NULL) && (spkt != spkt_slot)) {
1721 			continue;
1722 		}
1723 
1724 		/*
1725 		 * stop the hardware.  This could need reworking
1726 		 * when NCQ is enabled in the driver.
1727 		 */
1728 		if (reset_once == B_FALSE) {
1729 			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1730 
1731 			/*
1732 			 * stop DMA engine
1733 			 */
1734 			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1735 
1736 			/*
1737 			 * Reset only if explicitly specified by the arg flag
1738 			 */
1739 			if (flag == B_TRUE) {
1740 				reset_once = B_TRUE;
1741 				nvp->nvp_state |= NV_PORT_RESET;
1742 				nvp->nvp_state &= ~(NV_PORT_RESTORE |
1743 				    NV_PORT_RESET_RETRY);
1744 				nv_reset(nvp);
1745 			}
1746 		}
1747 
1748 		spkt_slot->satapkt_reason = abort_reason;
1749 		nv_complete_io(nvp, spkt_slot, i);
1750 		aborted++;
1751 	}
1752 
1753 	nvp->nvp_state &= ~NV_PORT_ABORTING;
1754 
1755 	return (aborted);
1756 }
1757 
1758 
1759 /*
1760  * Called by sata module to reset a port, device, or the controller.
1761  */
1762 static int
1763 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1764 {
1765 	int cport = sd->satadev_addr.cport;
1766 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1767 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1768 	int ret = SATA_SUCCESS;
1769 
1770 	ASSERT(cport < NV_MAX_PORTS(nvc));
1771 
1772 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1773 
1774 	mutex_enter(&nvp->nvp_mutex);
1775 
1776 	switch (sd->satadev_addr.qual) {
1777 
1778 	case SATA_ADDR_CPORT:
1779 		/*FALLTHROUGH*/
1780 	case SATA_ADDR_DCPORT:
1781 		nvp->nvp_state |= NV_PORT_RESET;
1782 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1783 		nv_reset(nvp);
1784 		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET, B_FALSE);
1785 
1786 		break;
1787 	case SATA_ADDR_CNTRL:
1788 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1789 		    "nv_sata_reset: constroller reset not supported"));
1790 
1791 		break;
1792 	case SATA_ADDR_PMPORT:
1793 	case SATA_ADDR_DPMPORT:
1794 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1795 		    "nv_sata_reset: port multipliers not supported"));
1796 		/*FALLTHROUGH*/
1797 	default:
1798 		/*
1799 		 * unsupported case
1800 		 */
1801 		ret = SATA_FAILURE;
1802 		break;
1803 	}
1804 
1805 	if (ret == SATA_SUCCESS) {
1806 		/*
1807 		 * If the port is inactive, do a quiet reset and don't attempt
1808 		 * to wait for reset completion or do any post reset processing
1809 		 */
1810 		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1811 			nvp->nvp_state &= ~NV_PORT_RESET;
1812 			nvp->nvp_reset_time = 0;
1813 		}
1814 
1815 		/*
1816 		 * clear the port failed flag
1817 		 */
1818 		nvp->nvp_state &= ~NV_PORT_FAILED;
1819 	}
1820 
1821 	mutex_exit(&nvp->nvp_mutex);
1822 
1823 	return (ret);
1824 }
1825 
1826 
1827 /*
1828  * Sata entry point to handle port activation.  cfgadm -c connect
1829  */
1830 static int
1831 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1832 {
1833 	int cport = sd->satadev_addr.cport;
1834 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1835 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1836 
1837 	ASSERT(cport < NV_MAX_PORTS(nvc));
1838 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1839 
1840 	mutex_enter(&nvp->nvp_mutex);
1841 
1842 	sd->satadev_state = SATA_STATE_READY;
1843 
1844 	nv_copy_registers(nvp, sd, NULL);
1845 
1846 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1847 
1848 	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1849 	/* Initiate link probing and device signature acquisition */
1850 	nvp->nvp_type = SATA_DTYPE_NONE;
1851 	nvp->nvp_signature = 0;
1852 	nvp->nvp_state |= NV_PORT_RESET; /* | NV_PORT_PROBE; */
1853 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
1854 	nv_reset(nvp);
1855 
1856 	mutex_exit(&nvp->nvp_mutex);
1857 
1858 	return (SATA_SUCCESS);
1859 }
1860 
1861 
1862 /*
1863  * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1864  */
1865 static int
1866 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1867 {
1868 	int cport = sd->satadev_addr.cport;
1869 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1870 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1871 
1872 	ASSERT(cport < NV_MAX_PORTS(nvc));
1873 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1874 
1875 	mutex_enter(&nvp->nvp_mutex);
1876 
1877 	(void) nv_abort_active(nvp, NULL, SATA_PKT_ABORTED, B_FALSE);
1878 
1879 	/*
1880 	 * make the device inaccessible
1881 	 */
1882 	nvp->nvp_state |= NV_PORT_INACTIVE;
1883 
1884 	/*
1885 	 * disable the interrupts on port
1886 	 */
1887 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1888 
1889 	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1890 	nv_copy_registers(nvp, sd, NULL);
1891 
1892 	mutex_exit(&nvp->nvp_mutex);
1893 
1894 	return (SATA_SUCCESS);
1895 }
1896 
1897 
1898 /*
1899  * find an empty slot in the driver's queue, increment counters,
1900  * and then invoke the appropriate PIO or DMA start routine.
1901  */
1902 static int
1903 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1904 {
1905 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1906 	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1907 	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1908 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1909 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1910 	nv_slot_t *nv_slotp;
1911 	boolean_t dma_cmd;
1912 
1913 	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1914 	    sata_cmdp->satacmd_cmd_reg));
1915 
1916 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1917 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1918 		nvp->nvp_ncq_run++;
1919 		/*
1920 		 * search for an empty NCQ slot.  by the time, it's already
1921 		 * been determined by the caller that there is room on the
1922 		 * queue.
1923 		 */
1924 		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1925 		    on_bit <<= 1) {
1926 			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1927 				break;
1928 			}
1929 		}
1930 
1931 		/*
1932 		 * the first empty slot found, should not exceed the queue
1933 		 * depth of the drive.  if it does it's an error.
1934 		 */
1935 		ASSERT(slot != nvp->nvp_queue_depth);
1936 
1937 		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1938 		    nvp->nvp_sactive);
1939 		ASSERT((sactive & on_bit) == 0);
1940 		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1941 		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1942 		    on_bit));
1943 		nvp->nvp_sactive_cache |= on_bit;
1944 
1945 		ncq = NVSLOT_NCQ;
1946 
1947 	} else {
1948 		nvp->nvp_non_ncq_run++;
1949 		slot = 0;
1950 	}
1951 
1952 	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1953 
1954 	ASSERT(nv_slotp->nvslot_spkt == NULL);
1955 
1956 	nv_slotp->nvslot_spkt = spkt;
1957 	nv_slotp->nvslot_flags = ncq;
1958 
1959 	/*
1960 	 * the sata module doesn't indicate which commands utilize the
1961 	 * DMA engine, so find out using this switch table.
1962 	 */
1963 	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1964 	case SATAC_READ_DMA_EXT:
1965 	case SATAC_WRITE_DMA_EXT:
1966 	case SATAC_WRITE_DMA:
1967 	case SATAC_READ_DMA:
1968 	case SATAC_READ_DMA_QUEUED:
1969 	case SATAC_READ_DMA_QUEUED_EXT:
1970 	case SATAC_WRITE_DMA_QUEUED:
1971 	case SATAC_WRITE_DMA_QUEUED_EXT:
1972 	case SATAC_READ_FPDMA_QUEUED:
1973 	case SATAC_WRITE_FPDMA_QUEUED:
1974 		dma_cmd = B_TRUE;
1975 		break;
1976 	default:
1977 		dma_cmd = B_FALSE;
1978 	}
1979 
1980 	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1981 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1982 		nv_slotp->nvslot_start = nv_start_dma;
1983 		nv_slotp->nvslot_intr = nv_intr_dma;
1984 	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1985 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "packet command"));
1986 		nv_slotp->nvslot_start = nv_start_pkt_pio;
1987 		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1988 		if ((direction == SATA_DIR_READ) ||
1989 		    (direction == SATA_DIR_WRITE)) {
1990 			nv_slotp->nvslot_byte_count =
1991 			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1992 			nv_slotp->nvslot_v_addr =
1993 			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1994 			/*
1995 			 * Freeing DMA resources allocated by the framework
1996 			 * now to avoid buffer overwrite (dma sync) problems
1997 			 * when the buffer is released at command completion.
1998 			 * Primarily an issue on systems with more than
1999 			 * 4GB of memory.
2000 			 */
2001 			sata_free_dma_resources(spkt);
2002 		}
2003 	} else if (direction == SATA_DIR_NODATA_XFER) {
2004 		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
2005 		nv_slotp->nvslot_start = nv_start_nodata;
2006 		nv_slotp->nvslot_intr = nv_intr_nodata;
2007 	} else if (direction == SATA_DIR_READ) {
2008 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
2009 		nv_slotp->nvslot_start = nv_start_pio_in;
2010 		nv_slotp->nvslot_intr = nv_intr_pio_in;
2011 		nv_slotp->nvslot_byte_count =
2012 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2013 		nv_slotp->nvslot_v_addr =
2014 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2015 		/*
2016 		 * Freeing DMA resources allocated by the framework now to
2017 		 * avoid buffer overwrite (dma sync) problems when the buffer
2018 		 * is released at command completion.  This is not an issue
2019 		 * for write because write does not update the buffer.
2020 		 * Primarily an issue on systems with more than 4GB of memory.
2021 		 */
2022 		sata_free_dma_resources(spkt);
2023 	} else if (direction == SATA_DIR_WRITE) {
2024 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
2025 		nv_slotp->nvslot_start = nv_start_pio_out;
2026 		nv_slotp->nvslot_intr = nv_intr_pio_out;
2027 		nv_slotp->nvslot_byte_count =
2028 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2029 		nv_slotp->nvslot_v_addr =
2030 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2031 	} else {
2032 		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2033 		    " %d cookies %d cmd %x",
2034 		    sata_cmdp->satacmd_flags.sata_data_direction,
2035 		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2036 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2037 		ret = SATA_TRAN_CMD_UNSUPPORTED;
2038 
2039 		goto fail;
2040 	}
2041 
2042 	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2043 	    SATA_TRAN_ACCEPTED) {
2044 #ifdef SGPIO_SUPPORT
2045 		nv_sgp_drive_active(nvp->nvp_ctlp,
2046 		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2047 #endif
2048 		nv_slotp->nvslot_stime = ddi_get_lbolt();
2049 
2050 		/*
2051 		 * start timer if it's not already running and this packet
2052 		 * is not requesting polled mode.
2053 		 */
2054 		if ((nvp->nvp_timeout_id == 0) &&
2055 		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2056 			nv_setup_timeout(nvp, NV_ONE_SEC);
2057 		}
2058 
2059 		nvp->nvp_previous_cmd = nvp->nvp_last_cmd;
2060 		nvp->nvp_last_cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2061 
2062 		return (SATA_TRAN_ACCEPTED);
2063 	}
2064 
2065 	fail:
2066 
2067 	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2068 
2069 	if (ncq == NVSLOT_NCQ) {
2070 		nvp->nvp_ncq_run--;
2071 		nvp->nvp_sactive_cache &= ~on_bit;
2072 	} else {
2073 		nvp->nvp_non_ncq_run--;
2074 	}
2075 	nv_slotp->nvslot_spkt = NULL;
2076 	nv_slotp->nvslot_flags = 0;
2077 
2078 	return (ret);
2079 }
2080 
2081 
2082 /*
2083  * Check if the signature is ready and if non-zero translate
2084  * it into a solaris sata defined type.
2085  */
2086 static void
2087 nv_read_signature(nv_port_t *nvp)
2088 {
2089 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2090 
2091 	/*
2092 	 * Task file error register bit 0 set to 1 indicate that drive
2093 	 * is ready and have sent D2H FIS with a signature.
2094 	 */
2095 	if (nv_check_tfr_error != 0) {
2096 		uint8_t tfr_error = nv_get8(cmdhdl, nvp->nvp_error);
2097 		if (!(tfr_error & SATA_ERROR_ILI)) {
2098 			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
2099 			    "nv_read_signature: signature not ready"));
2100 			return;
2101 		}
2102 	}
2103 
2104 	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2105 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2106 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2107 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2108 
2109 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
2110 	    "nv_read_signature: 0x%x ", nvp->nvp_signature));
2111 
2112 	switch (nvp->nvp_signature) {
2113 
2114 	case NV_SIG_DISK:
2115 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
2116 		nvp->nvp_type = SATA_DTYPE_ATADISK;
2117 		break;
2118 	case NV_SIG_ATAPI:
2119 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2120 		    "drive is an optical device"));
2121 		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2122 		break;
2123 	case NV_SIG_PM:
2124 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2125 		    "device is a port multiplier"));
2126 		nvp->nvp_type = SATA_DTYPE_PMULT;
2127 		break;
2128 	case NV_SIG_NOTREADY:
2129 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2130 		    "signature not ready"));
2131 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2132 		break;
2133 	default:
2134 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2135 		    " recognized", nvp->nvp_signature);
2136 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2137 		break;
2138 	}
2139 
2140 	if (nvp->nvp_signature) {
2141 		nvp->nvp_state &= ~(NV_PORT_RESET_RETRY | NV_PORT_RESET);
2142 	}
2143 }
2144 
2145 
2146 /*
2147  * Set up a new timeout or complete a timeout.
2148  * Timeout value has to be specified in microseconds. If time is zero, no new
2149  * timeout is scheduled.
2150  * Must be called at the end of the timeout routine.
2151  */
2152 static void
2153 nv_setup_timeout(nv_port_t *nvp, int time)
2154 {
2155 	clock_t old_duration = nvp->nvp_timeout_duration;
2156 
2157 	ASSERT(time != 0);
2158 
2159 	if (nvp->nvp_timeout_id != 0 && nvp->nvp_timeout_duration == 0) {
2160 		/*
2161 		 * Since we are dropping the mutex for untimeout,
2162 		 * the timeout may be executed while we are trying to
2163 		 * untimeout and setting up a new timeout.
2164 		 * If nvp_timeout_duration is 0, then this function
2165 		 * was re-entered. Just exit.
2166 		 */
2167 	cmn_err(CE_WARN, "nv_setup_timeout re-entered");
2168 		return;
2169 	}
2170 	nvp->nvp_timeout_duration = 0;
2171 	if (nvp->nvp_timeout_id == 0) {
2172 		/* Start new timer */
2173 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2174 		    drv_usectohz(time));
2175 	} else {
2176 		/*
2177 		 * If the currently running timeout is due later than the
2178 		 * requested one, restart it with a new expiration.
2179 		 * Our timeouts do not need to be accurate - we would be just
2180 		 * checking that the specified time was exceeded.
2181 		 */
2182 		if (old_duration > time) {
2183 			mutex_exit(&nvp->nvp_mutex);
2184 			untimeout(nvp->nvp_timeout_id);
2185 			mutex_enter(&nvp->nvp_mutex);
2186 			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2187 			    drv_usectohz(time));
2188 		}
2189 	}
2190 	nvp->nvp_timeout_duration = time;
2191 }
2192 
2193 
2194 
2195 int nv_reset_length = NV_RESET_LENGTH;
2196 
2197 /*
2198  * Reset the port
2199  *
2200  * Entered with nvp mutex held
2201  */
2202 static void
2203 nv_reset(nv_port_t *nvp)
2204 {
2205 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2206 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2207 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2208 	uint32_t sctrl, serr, sstatus;
2209 	uint8_t bmicx;
2210 	int i, j, reset = 0;
2211 
2212 	ASSERT(mutex_owned(&nvp->nvp_mutex));
2213 
2214 	NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset()"));
2215 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2216 	NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset: serr 0x%x", serr));
2217 
2218 	/*
2219 	 * stop DMA engine.
2220 	 */
2221 	bmicx = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmicx);
2222 	nv_put8(nvp->nvp_bm_hdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2223 
2224 	nvp->nvp_state |= NV_PORT_RESET;
2225 	nvp->nvp_reset_time = ddi_get_lbolt();
2226 
2227 	/*
2228 	 * Issue hardware reset; retry if necessary.
2229 	 */
2230 	for (i = 0; i < NV_RESET_ATTEMPTS; i++) {
2231 		/*
2232 		 * Clear signature registers
2233 		 */
2234 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
2235 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2236 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2237 		nv_put8(cmdhdl, nvp->nvp_count, 0);
2238 
2239 		/* Clear task file error register */
2240 		nv_put8(nvp->nvp_cmd_hdl, nvp->nvp_error, 0);
2241 
2242 		/*
2243 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
2244 		 */
2245 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2246 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2247 		    sctrl | SCONTROL_DET_COMRESET);
2248 
2249 		/* Wait at least 1ms, as required by the spec */
2250 		drv_usecwait(nv_reset_length);
2251 
2252 		/* Reset all accumulated error bits */
2253 		nv_put32(bar5_hdl, nvp->nvp_serror, 0xffffffff);
2254 
2255 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2256 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2257 		NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset: applied (%d); "
2258 		    "sctrl 0x%x, sstatus 0x%x", i, sctrl, sstatus));
2259 
2260 		/* de-assert reset in PHY */
2261 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2262 		    sctrl & ~SCONTROL_DET_COMRESET);
2263 
2264 		/*
2265 		 * Wait up to 10ms for COMINIT to arrive, indicating that
2266 		 * the device recognized COMRESET.
2267 		 */
2268 		for (j = 0; j < 10; j++) {
2269 			drv_usecwait(NV_ONE_MSEC);
2270 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2271 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2272 			    (SSTATUS_GET_DET(sstatus) ==
2273 			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2274 				reset = 1;
2275 				break;
2276 			}
2277 		}
2278 		if (reset == 1)
2279 			break;
2280 	}
2281 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2282 	if (reset == 0) {
2283 		NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset not succeeded "
2284 		    "(serr 0x%x) after %d attempts", serr, i));
2285 	} else {
2286 		NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset succeeded (serr 0x%x)"
2287 		    "after %dms", serr, TICK_TO_MSEC(ddi_get_lbolt() -
2288 		    nvp->nvp_reset_time)));
2289 	}
2290 	nvp->nvp_reset_time = ddi_get_lbolt();
2291 
2292 	if (servicing_interrupt()) {
2293 		nv_setup_timeout(nvp, NV_ONE_MSEC);
2294 	} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
2295 		nv_monitor_reset(nvp);
2296 	}
2297 }
2298 
2299 
2300 /*
2301  * Initialize register handling specific to mcp51/mcp55
2302  */
2303 /* ARGSUSED */
2304 static void
2305 mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2306 {
2307 	nv_port_t *nvp;
2308 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2309 	uint8_t off, port;
2310 
2311 	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2312 	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2313 
2314 	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2315 		nvp = &(nvc->nvc_port[port]);
2316 		nvp->nvp_mcp5x_int_status =
2317 		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2318 		nvp->nvp_mcp5x_int_ctl =
2319 		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2320 
2321 		/*
2322 		 * clear any previous interrupts asserted
2323 		 */
2324 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2325 		    MCP5X_INT_CLEAR);
2326 
2327 		/*
2328 		 * These are the interrupts to accept for now.  The spec
2329 		 * says these are enable bits, but nvidia has indicated
2330 		 * these are masking bits.  Even though they may be masked
2331 		 * out to prevent asserting the main interrupt, they can
2332 		 * still be asserted while reading the interrupt status
2333 		 * register, so that needs to be considered in the interrupt
2334 		 * handler.
2335 		 */
2336 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2337 		    ~(MCP5X_INT_IGNORE));
2338 	}
2339 
2340 	/*
2341 	 * Allow the driver to program the BM on the first command instead
2342 	 * of waiting for an interrupt.
2343 	 */
2344 #ifdef NCQ
2345 	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2346 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2347 	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2348 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2349 #endif
2350 
2351 	/*
2352 	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2353 	 * Enable DMA to take advantage of that.
2354 	 *
2355 	 */
2356 	if (nvc->nvc_revid >= 0xa3) {
2357 		if (nv_sata_40bit_dma == B_TRUE) {
2358 			uint32_t reg32;
2359 			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2360 			    "rev id is %X and"
2361 			    " is capable of 40-bit DMA addressing",
2362 			    nvc->nvc_revid));
2363 			nvc->dma_40bit = B_TRUE;
2364 			reg32 = pci_config_get32(pci_conf_handle,
2365 			    NV_SATA_CFG_20);
2366 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2367 			    reg32 | NV_40BIT_PRD);
2368 		} else {
2369 			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2370 			    "40-bit DMA disabled by nv_sata_40bit_dma"));
2371 		}
2372 	} else {
2373 		nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "rev id is %X and is "
2374 		    "not capable of 40-bit DMA addressing", nvc->nvc_revid);
2375 	}
2376 }
2377 
2378 
2379 /*
2380  * Initialize register handling specific to ck804
2381  */
2382 static void
2383 ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2384 {
2385 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2386 	uint32_t reg32;
2387 	uint16_t reg16;
2388 	nv_port_t *nvp;
2389 	int j;
2390 
2391 	/*
2392 	 * delay hotplug interrupts until PHYRDY.
2393 	 */
2394 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2395 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2396 	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2397 
2398 	/*
2399 	 * enable hot plug interrupts for channel x and y
2400 	 */
2401 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2402 	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2403 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2404 	    NV_HIRQ_EN | reg16);
2405 
2406 
2407 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2408 	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2409 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2410 	    NV_HIRQ_EN | reg16);
2411 
2412 	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2413 
2414 	/*
2415 	 * clear any existing interrupt pending then enable
2416 	 */
2417 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2418 		nvp = &(nvc->nvc_port[j]);
2419 		mutex_enter(&nvp->nvp_mutex);
2420 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2421 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2422 		mutex_exit(&nvp->nvp_mutex);
2423 	}
2424 }
2425 
2426 
2427 /*
2428  * Initialize the controller and set up driver data structures.
2429  * determine if ck804 or mcp5x class.
2430  */
2431 static int
2432 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2433 {
2434 	struct sata_hba_tran stran;
2435 	nv_port_t *nvp;
2436 	int j, ck804;
2437 	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2438 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2439 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2440 	uint32_t reg32;
2441 	uint8_t reg8, reg8_save;
2442 
2443 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2444 
2445 	ck804 = B_TRUE;
2446 #ifdef SGPIO_SUPPORT
2447 	nvc->nvc_mcp5x_flag = B_FALSE;
2448 #endif
2449 
2450 	/*
2451 	 * Need to set bit 2 to 1 at config offset 0x50
2452 	 * to enable access to the bar5 registers.
2453 	 */
2454 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2455 	if (!(reg32 & NV_BAR5_SPACE_EN)) {
2456 		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2457 		    reg32 | NV_BAR5_SPACE_EN);
2458 	}
2459 
2460 	/*
2461 	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2462 	 * task file registers into bar5 while mcp5x won't.  The offset of
2463 	 * the task file registers in mcp5x's space is unused, so it will
2464 	 * return zero.  So check one of the task file registers to see if it is
2465 	 * writable and reads back what was written.  If it's mcp5x it will
2466 	 * return back 0xff whereas ck804 will return the value written.
2467 	 */
2468 	reg8_save = nv_get8(bar5_hdl,
2469 	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2470 
2471 
2472 	for (j = 1; j < 3; j++) {
2473 
2474 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2475 		reg8 = nv_get8(bar5_hdl,
2476 		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2477 
2478 		if (reg8 != j) {
2479 			ck804 = B_FALSE;
2480 			nvc->nvc_mcp5x_flag = B_TRUE;
2481 			break;
2482 		}
2483 	}
2484 
2485 	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2486 
2487 	if (ck804 == B_TRUE) {
2488 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2489 		nvc->nvc_interrupt = ck804_intr;
2490 		nvc->nvc_reg_init = ck804_reg_init;
2491 		nvc->nvc_set_intr = ck804_set_intr;
2492 	} else {
2493 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55"));
2494 		nvc->nvc_interrupt = mcp5x_intr;
2495 		nvc->nvc_reg_init = mcp5x_reg_init;
2496 		nvc->nvc_set_intr = mcp5x_set_intr;
2497 	}
2498 
2499 
2500 	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2501 	stran.sata_tran_hba_dip = nvc->nvc_dip;
2502 	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2503 	stran.sata_tran_hba_features_support =
2504 	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2505 	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2506 	stran.sata_tran_probe_port = nv_sata_probe;
2507 	stran.sata_tran_start = nv_sata_start;
2508 	stran.sata_tran_abort = nv_sata_abort;
2509 	stran.sata_tran_reset_dport = nv_sata_reset;
2510 	stran.sata_tran_selftest = NULL;
2511 	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2512 	stran.sata_tran_pwrmgt_ops = NULL;
2513 	stran.sata_tran_ioctl = NULL;
2514 	nvc->nvc_sata_hba_tran = stran;
2515 
2516 	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2517 	    KM_SLEEP);
2518 
2519 	/*
2520 	 * initialize registers common to all chipsets
2521 	 */
2522 	nv_common_reg_init(nvc);
2523 
2524 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2525 		nvp = &(nvc->nvc_port[j]);
2526 
2527 		cmd_addr = nvp->nvp_cmd_addr;
2528 		ctl_addr = nvp->nvp_ctl_addr;
2529 		bm_addr = nvp->nvp_bm_addr;
2530 
2531 		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2532 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2533 
2534 		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2535 
2536 		nvp->nvp_data	= cmd_addr + NV_DATA;
2537 		nvp->nvp_error	= cmd_addr + NV_ERROR;
2538 		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2539 		nvp->nvp_count	= cmd_addr + NV_COUNT;
2540 		nvp->nvp_sect	= cmd_addr + NV_SECT;
2541 		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2542 		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2543 		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2544 		nvp->nvp_status	= cmd_addr + NV_STATUS;
2545 		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2546 		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2547 		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2548 
2549 		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2550 		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2551 		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2552 
2553 		nvp->nvp_state = 0;
2554 
2555 		/*
2556 		 * Initialize dma handles, etc.
2557 		 * If it fails, the port is in inactive state.
2558 		 */
2559 		(void) nv_init_port(nvp);
2560 	}
2561 
2562 	/*
2563 	 * initialize register by calling chip specific reg initialization
2564 	 */
2565 	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2566 
2567 	/* initialize the hba dma attribute */
2568 	if (nvc->dma_40bit == B_TRUE)
2569 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2570 		    &buffer_dma_40bit_attr;
2571 	else
2572 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2573 		    &buffer_dma_attr;
2574 
2575 	return (NV_SUCCESS);
2576 }
2577 
2578 
2579 /*
2580  * Initialize data structures with enough slots to handle queuing, if
2581  * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2582  * NCQ support is built into the driver and enabled.  It might have been
2583  * better to derive the true size from the drive itself, but the sata
2584  * module only sends down that information on the first NCQ command,
2585  * which means possibly re-sizing the structures on an interrupt stack,
2586  * making error handling more messy.  The easy way is to just allocate
2587  * all 32 slots, which is what most drives support anyway.
2588  */
2589 static int
2590 nv_init_port(nv_port_t *nvp)
2591 {
2592 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2593 	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2594 	dev_info_t *dip = nvc->nvc_dip;
2595 	ddi_device_acc_attr_t dev_attr;
2596 	size_t buf_size;
2597 	ddi_dma_cookie_t cookie;
2598 	uint_t count;
2599 	int rc, i;
2600 
2601 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2602 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2603 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2604 
2605 	if (nvp->nvp_state & NV_PORT_INIT) {
2606 		NVLOG((NVDBG_INIT, nvc, nvp,
2607 		    "nv_init_port previously initialized"));
2608 
2609 		return (NV_SUCCESS);
2610 	} else {
2611 		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2612 	}
2613 
2614 	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2615 	    NV_QUEUE_SLOTS, KM_SLEEP);
2616 
2617 	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2618 	    NV_QUEUE_SLOTS, KM_SLEEP);
2619 
2620 	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2621 	    NV_QUEUE_SLOTS, KM_SLEEP);
2622 
2623 	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2624 	    NV_QUEUE_SLOTS, KM_SLEEP);
2625 
2626 	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2627 	    KM_SLEEP);
2628 
2629 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2630 
2631 		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2632 		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2633 
2634 		if (rc != DDI_SUCCESS) {
2635 			nv_uninit_port(nvp);
2636 
2637 			return (NV_FAILURE);
2638 		}
2639 
2640 		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2641 		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2642 		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2643 		    &(nvp->nvp_sg_acc_hdl[i]));
2644 
2645 		if (rc != DDI_SUCCESS) {
2646 			nv_uninit_port(nvp);
2647 
2648 			return (NV_FAILURE);
2649 		}
2650 
2651 		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2652 		    nvp->nvp_sg_addr[i], buf_size,
2653 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2654 		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2655 
2656 		if (rc != DDI_DMA_MAPPED) {
2657 			nv_uninit_port(nvp);
2658 
2659 			return (NV_FAILURE);
2660 		}
2661 
2662 		ASSERT(count == 1);
2663 		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2664 
2665 		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2666 
2667 		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2668 	}
2669 
2670 	/*
2671 	 * nvp_queue_depth represents the actual drive queue depth, not the
2672 	 * number of slots allocated in the structures (which may be more).
2673 	 * Actual queue depth is only learned after the first NCQ command, so
2674 	 * initialize it to 1 for now.
2675 	 */
2676 	nvp->nvp_queue_depth = 1;
2677 
2678 	/*
2679 	 * Port is initialized whether the device is attached or not.
2680 	 * Link processing and device identification will be started later,
2681 	 * after interrupts are initialized.
2682 	 */
2683 	nvp->nvp_type = SATA_DTYPE_NONE;
2684 	nvp->nvp_signature = 0;
2685 
2686 	nvp->nvp_state |= NV_PORT_INIT;
2687 
2688 	return (NV_SUCCESS);
2689 }
2690 
2691 
2692 /*
2693  * Establish initial link & device type
2694  * Called only from nv_attach
2695  * Loops up to approximately 210ms; can exit earlier.
2696  * The time includes wait for the link up and completion of the initial
2697  * signature gathering operation.
2698  */
2699 static void
2700 nv_init_port_link_processing(nv_ctl_t *nvc)
2701 {
2702 	ddi_acc_handle_t bar5_hdl;
2703 	nv_port_t *nvp;
2704 	volatile uint32_t sstatus;
2705 	int port, links_up, ready_ports, i;
2706 
2707 
2708 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2709 		nvp = &(nvc->nvc_port[port]);
2710 		if (nvp != NULL && (nvp->nvp_state & NV_PORT_INIT)) {
2711 			/*
2712 			 * Initiate device identification, if any is attached
2713 			 * and reset was not already applied by hot-plug
2714 			 * event processing.
2715 			 */
2716 			mutex_enter(&nvp->nvp_mutex);
2717 			if (!(nvp->nvp_state & NV_PORT_RESET)) {
2718 				nvp->nvp_state |= NV_PORT_RESET | NV_PORT_PROBE;
2719 				nv_reset(nvp);
2720 			}
2721 			mutex_exit(&nvp->nvp_mutex);
2722 		}
2723 	}
2724 	/*
2725 	 * Wait up to 10ms for links up.
2726 	 * Spec says that link should be up in 1ms.
2727 	 */
2728 	for (i = 0; i < 10; i++) {
2729 		drv_usecwait(NV_ONE_MSEC);
2730 		links_up = 0;
2731 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2732 			nvp = &(nvc->nvc_port[port]);
2733 			mutex_enter(&nvp->nvp_mutex);
2734 			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2735 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2736 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2737 			    (SSTATUS_GET_DET(sstatus) ==
2738 			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2739 				if ((nvp->nvp_state & NV_PORT_RESET) &&
2740 				    nvp->nvp_type == SATA_DTYPE_NONE) {
2741 					nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2742 				}
2743 				NVLOG((NVDBG_INIT, nvc, nvp,
2744 				    "nv_init_port_link_processing()"
2745 				    "link up; time from reset %dms",
2746 				    TICK_TO_MSEC(ddi_get_lbolt() -
2747 				    nvp->nvp_reset_time)));
2748 				links_up++;
2749 			}
2750 			mutex_exit(&nvp->nvp_mutex);
2751 		}
2752 		if (links_up == NV_MAX_PORTS(nvc)) {
2753 			break;
2754 		}
2755 	}
2756 	NVLOG((NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2757 	    "%d links up", links_up));
2758 	/*
2759 	 * At this point, if any device is attached, the link is established.
2760 	 * Wait till devices are ready to be accessed, no more than 200ms.
2761 	 * 200ms is empirical time in which a signature should be available.
2762 	 */
2763 	for (i = 0; i < 200; i++) {
2764 		ready_ports = 0;
2765 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2766 			nvp = &(nvc->nvc_port[port]);
2767 			mutex_enter(&nvp->nvp_mutex);
2768 			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2769 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2770 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2771 			    (SSTATUS_GET_DET(sstatus) ==
2772 			    SSTATUS_DET_DEVPRE_PHYCOM) &&
2773 			    !(nvp->nvp_state & (NV_PORT_RESET |
2774 			    NV_PORT_RESET_RETRY))) {
2775 				/*
2776 				 * Reset already processed
2777 				 */
2778 				NVLOG((NVDBG_RESET, nvc, nvp,
2779 				    "nv_init_port_link_processing()"
2780 				    "device ready; port state %x; "
2781 				    "time from reset %dms", nvp->nvp_state,
2782 				    TICK_TO_MSEC(ddi_get_lbolt() -
2783 				    nvp->nvp_reset_time)));
2784 
2785 				ready_ports++;
2786 			}
2787 			mutex_exit(&nvp->nvp_mutex);
2788 		}
2789 		if (ready_ports == links_up) {
2790 			break;
2791 		}
2792 		drv_usecwait(NV_ONE_MSEC);
2793 	}
2794 	NVLOG((NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2795 	    "%d devices ready", ready_ports));
2796 }
2797 
2798 /*
2799  * Free dynamically allocated structures for port.
2800  */
2801 static void
2802 nv_uninit_port(nv_port_t *nvp)
2803 {
2804 	int i;
2805 
2806 	/*
2807 	 * It is possible to reach here before a port has been initialized or
2808 	 * after it has already been uninitialized.  Just return in that case.
2809 	 */
2810 	if (nvp->nvp_slot == NULL) {
2811 
2812 		return;
2813 	}
2814 	/*
2815 	 * Mark port unusable now.
2816 	 */
2817 	nvp->nvp_state &= ~NV_PORT_INIT;
2818 
2819 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2820 	    "nv_uninit_port uninitializing"));
2821 
2822 	nvp->nvp_type = SATA_DTYPE_NONE;
2823 
2824 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2825 		if (nvp->nvp_sg_paddr[i]) {
2826 			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2827 		}
2828 
2829 		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2830 			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2831 		}
2832 
2833 		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2834 			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2835 		}
2836 	}
2837 
2838 	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2839 	nvp->nvp_slot = NULL;
2840 
2841 	kmem_free(nvp->nvp_sg_dma_hdl,
2842 	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2843 	nvp->nvp_sg_dma_hdl = NULL;
2844 
2845 	kmem_free(nvp->nvp_sg_acc_hdl,
2846 	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2847 	nvp->nvp_sg_acc_hdl = NULL;
2848 
2849 	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2850 	nvp->nvp_sg_addr = NULL;
2851 
2852 	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2853 	nvp->nvp_sg_paddr = NULL;
2854 }
2855 
2856 
2857 /*
2858  * Cache register offsets and access handles to frequently accessed registers
2859  * which are common to either chipset.
2860  */
2861 static void
2862 nv_common_reg_init(nv_ctl_t *nvc)
2863 {
2864 	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2865 	uchar_t *bm_addr_offset, *sreg_offset;
2866 	uint8_t bar, port;
2867 	nv_port_t *nvp;
2868 
2869 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2870 		if (port == 0) {
2871 			bar = NV_BAR_0;
2872 			bm_addr_offset = 0;
2873 			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2874 		} else {
2875 			bar = NV_BAR_2;
2876 			bm_addr_offset = (uchar_t *)8;
2877 			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2878 		}
2879 
2880 		nvp = &(nvc->nvc_port[port]);
2881 		nvp->nvp_ctlp = nvc;
2882 		nvp->nvp_port_num = port;
2883 		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2884 
2885 		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2886 		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2887 		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2888 		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2889 		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2890 		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2891 		    (long)bm_addr_offset;
2892 
2893 		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2894 		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2895 		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2896 		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2897 	}
2898 }
2899 
2900 
2901 static void
2902 nv_uninit_ctl(nv_ctl_t *nvc)
2903 {
2904 	int port;
2905 	nv_port_t *nvp;
2906 
2907 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2908 
2909 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2910 		nvp = &(nvc->nvc_port[port]);
2911 		mutex_enter(&nvp->nvp_mutex);
2912 		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2913 		nv_uninit_port(nvp);
2914 		mutex_exit(&nvp->nvp_mutex);
2915 		mutex_destroy(&nvp->nvp_mutex);
2916 		cv_destroy(&nvp->nvp_poll_cv);
2917 	}
2918 
2919 	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2920 	nvc->nvc_port = NULL;
2921 }
2922 
2923 
2924 /*
2925  * ck804 interrupt.  This is a wrapper around ck804_intr_process so
2926  * that interrupts from other devices can be disregarded while dtracing.
2927  */
2928 /* ARGSUSED */
2929 static uint_t
2930 ck804_intr(caddr_t arg1, caddr_t arg2)
2931 {
2932 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2933 	uint8_t intr_status;
2934 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2935 
2936 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
2937 		return (DDI_INTR_UNCLAIMED);
2938 
2939 	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
2940 
2941 	if (intr_status == 0) {
2942 
2943 		return (DDI_INTR_UNCLAIMED);
2944 	}
2945 
2946 	ck804_intr_process(nvc, intr_status);
2947 
2948 	return (DDI_INTR_CLAIMED);
2949 }
2950 
2951 
2952 /*
2953  * Main interrupt handler for ck804.  handles normal device
2954  * interrupts as well as port hot plug and remove interrupts.
2955  *
2956  */
2957 static void
2958 ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2959 {
2960 
2961 	int port, i;
2962 	nv_port_t *nvp;
2963 	nv_slot_t *nv_slotp;
2964 	uchar_t	status;
2965 	sata_pkt_t *spkt;
2966 	uint8_t bmstatus, clear_bits;
2967 	ddi_acc_handle_t bmhdl;
2968 	int nvcleared = 0;
2969 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2970 	uint32_t sstatus;
2971 	int port_mask_hot[] = {
2972 		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
2973 	};
2974 	int port_mask_pm[] = {
2975 		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
2976 	};
2977 
2978 	NVLOG((NVDBG_INTR, nvc, NULL,
2979 	    "ck804_intr_process entered intr_status=%x", intr_status));
2980 
2981 	/*
2982 	 * For command completion interrupt, explicit clear is not required.
2983 	 * however, for the error cases explicit clear is performed.
2984 	 */
2985 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2986 
2987 		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
2988 
2989 		if ((port_mask[port] & intr_status) == 0) {
2990 			continue;
2991 		}
2992 
2993 		NVLOG((NVDBG_INTR, nvc, NULL,
2994 		    "ck804_intr_process interrupt on port %d", port));
2995 
2996 		nvp = &(nvc->nvc_port[port]);
2997 
2998 		mutex_enter(&nvp->nvp_mutex);
2999 
3000 		/*
3001 		 * there was a corner case found where an interrupt
3002 		 * arrived before nvp_slot was set.  Should
3003 		 * probably should track down why that happens and try
3004 		 * to eliminate that source and then get rid of this
3005 		 * check.
3006 		 */
3007 		if (nvp->nvp_slot == NULL) {
3008 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3009 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3010 			    "received before initialization "
3011 			    "completed status=%x", status));
3012 			mutex_exit(&nvp->nvp_mutex);
3013 
3014 			/*
3015 			 * clear interrupt bits
3016 			 */
3017 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3018 			    port_mask[port]);
3019 
3020 			continue;
3021 		}
3022 
3023 		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
3024 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3025 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3026 			    " no command in progress status=%x", status));
3027 			mutex_exit(&nvp->nvp_mutex);
3028 
3029 			/*
3030 			 * clear interrupt bits
3031 			 */
3032 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3033 			    port_mask[port]);
3034 
3035 			continue;
3036 		}
3037 
3038 		bmhdl = nvp->nvp_bm_hdl;
3039 		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3040 
3041 		if (!(bmstatus & BMISX_IDEINTS)) {
3042 			mutex_exit(&nvp->nvp_mutex);
3043 
3044 			continue;
3045 		}
3046 
3047 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3048 
3049 		if (status & SATA_STATUS_BSY) {
3050 			mutex_exit(&nvp->nvp_mutex);
3051 
3052 			continue;
3053 		}
3054 
3055 		nv_slotp = &(nvp->nvp_slot[0]);
3056 
3057 		ASSERT(nv_slotp);
3058 
3059 		spkt = nv_slotp->nvslot_spkt;
3060 
3061 		if (spkt == NULL) {
3062 			mutex_exit(&nvp->nvp_mutex);
3063 
3064 			continue;
3065 		}
3066 
3067 		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3068 
3069 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3070 
3071 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3072 
3073 			nv_complete_io(nvp, spkt, 0);
3074 		}
3075 
3076 		mutex_exit(&nvp->nvp_mutex);
3077 	}
3078 
3079 	/*
3080 	 * ck804 often doesn't correctly distinguish hot add/remove
3081 	 * interrupts.  Frequently both the ADD and the REMOVE bits
3082 	 * are asserted, whether it was a remove or add.  Use sstatus
3083 	 * to distinguish hot add from hot remove.
3084 	 */
3085 
3086 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3087 		clear_bits = 0;
3088 
3089 		nvp = &(nvc->nvc_port[port]);
3090 		mutex_enter(&nvp->nvp_mutex);
3091 
3092 		if ((port_mask_pm[port] & intr_status) != 0) {
3093 			clear_bits = port_mask_pm[port];
3094 			NVLOG((NVDBG_HOT, nvc, nvp,
3095 			    "clearing PM interrupt bit: %x",
3096 			    intr_status & port_mask_pm[port]));
3097 		}
3098 
3099 		if ((port_mask_hot[port] & intr_status) == 0) {
3100 			if (clear_bits != 0) {
3101 				goto clear;
3102 			} else {
3103 				mutex_exit(&nvp->nvp_mutex);
3104 				continue;
3105 			}
3106 		}
3107 
3108 		/*
3109 		 * reaching here means there was a hot add or remove.
3110 		 */
3111 		clear_bits |= port_mask_hot[port];
3112 
3113 		ASSERT(nvc->nvc_port[port].nvp_sstatus);
3114 
3115 		sstatus = nv_get32(bar5_hdl,
3116 		    nvc->nvc_port[port].nvp_sstatus);
3117 
3118 		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
3119 		    SSTATUS_DET_DEVPRE_PHYCOM) {
3120 			nv_report_add_remove(nvp, 0);
3121 		} else {
3122 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3123 		}
3124 	clear:
3125 		/*
3126 		 * clear interrupt bits.  explicit interrupt clear is
3127 		 * required for hotplug interrupts.
3128 		 */
3129 		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
3130 
3131 		/*
3132 		 * make sure it's flushed and cleared.  If not try
3133 		 * again.  Sometimes it has been observed to not clear
3134 		 * on the first try.
3135 		 */
3136 		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3137 
3138 		/*
3139 		 * make 10 additional attempts to clear the interrupt
3140 		 */
3141 		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
3142 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
3143 			    "still not clear try=%d", intr_status,
3144 			    ++nvcleared));
3145 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3146 			    clear_bits);
3147 			intr_status = nv_get8(bar5_hdl,
3148 			    nvc->nvc_ck804_int_status);
3149 		}
3150 
3151 		/*
3152 		 * if still not clear, log a message and disable the
3153 		 * port. highly unlikely that this path is taken, but it
3154 		 * gives protection against a wedged interrupt.
3155 		 */
3156 		if (intr_status & clear_bits) {
3157 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3158 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3159 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3160 			nvp->nvp_state |= NV_PORT_FAILED;
3161 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3162 			    B_TRUE);
3163 			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
3164 			    "interrupt.  disabling port intr_status=%X",
3165 			    intr_status);
3166 		}
3167 
3168 		mutex_exit(&nvp->nvp_mutex);
3169 	}
3170 }
3171 
3172 
3173 /*
3174  * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
3175  * on the controller, to handle completion and hot plug and remove events.
3176  *
3177  */
3178 static uint_t
3179 mcp5x_intr_port(nv_port_t *nvp)
3180 {
3181 	nv_ctl_t *nvc = nvp->nvp_ctlp;
3182 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3183 	uint8_t clear = 0, intr_cycles = 0;
3184 	int ret = DDI_INTR_UNCLAIMED;
3185 	uint16_t int_status;
3186 	clock_t intr_time;
3187 	int loop_cnt = 0;
3188 
3189 	nvp->intr_start_time = ddi_get_lbolt();
3190 
3191 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered"));
3192 
3193 	do {
3194 		/*
3195 		 * read current interrupt status
3196 		 */
3197 		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3198 
3199 		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
3200 
3201 		/*
3202 		 * MCP5X_INT_IGNORE interrupts will show up in the status,
3203 		 * but are masked out from causing an interrupt to be generated
3204 		 * to the processor.  Ignore them here by masking them out.
3205 		 */
3206 		int_status &= ~(MCP5X_INT_IGNORE);
3207 
3208 		/*
3209 		 * exit the loop when no more interrupts to process
3210 		 */
3211 		if (int_status == 0) {
3212 
3213 			break;
3214 		}
3215 
3216 		if (int_status & MCP5X_INT_COMPLETE) {
3217 			NVLOG((NVDBG_INTR, nvc, nvp,
3218 			    "mcp5x_packet_complete_intr"));
3219 			/*
3220 			 * since int_status was set, return DDI_INTR_CLAIMED
3221 			 * from the DDI's perspective even though the packet
3222 			 * completion may not have succeeded.  If it fails,
3223 			 * need to manually clear the interrupt, otherwise
3224 			 * clearing is implicit.
3225 			 */
3226 			ret = DDI_INTR_CLAIMED;
3227 			if (mcp5x_packet_complete_intr(nvc, nvp) ==
3228 			    NV_FAILURE) {
3229 				clear |= MCP5X_INT_COMPLETE;
3230 			} else {
3231 				intr_cycles = 0;
3232 			}
3233 		}
3234 
3235 		if (int_status & MCP5X_INT_DMA_SETUP) {
3236 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr"));
3237 
3238 			/*
3239 			 * Needs to be cleared before starting the BM, so do it
3240 			 * now.  make sure this is still working.
3241 			 */
3242 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3243 			    MCP5X_INT_DMA_SETUP);
3244 #ifdef NCQ
3245 			ret = mcp5x_dma_setup_intr(nvc, nvp);
3246 #endif
3247 		}
3248 
3249 		if (int_status & MCP5X_INT_REM) {
3250 			NVLOG((NVDBG_HOT, nvc, nvp, "mcp5x device removed"));
3251 			clear |= MCP5X_INT_REM;
3252 			ret = DDI_INTR_CLAIMED;
3253 
3254 			mutex_enter(&nvp->nvp_mutex);
3255 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3256 			mutex_exit(&nvp->nvp_mutex);
3257 
3258 		} else if (int_status & MCP5X_INT_ADD) {
3259 			NVLOG((NVDBG_HOT, nvc, nvp, "mcp5x device added"));
3260 			clear |= MCP5X_INT_ADD;
3261 			ret = DDI_INTR_CLAIMED;
3262 
3263 			mutex_enter(&nvp->nvp_mutex);
3264 			nv_report_add_remove(nvp, 0);
3265 			mutex_exit(&nvp->nvp_mutex);
3266 		}
3267 		if (clear) {
3268 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3269 			clear = 0;
3270 		}
3271 		/* Protect against a stuck interrupt */
3272 		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3273 			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3274 			    "processing.  Disabling port int_status=%X"
3275 			    " clear=%X", int_status, clear);
3276 			mutex_enter(&nvp->nvp_mutex);
3277 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3278 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3279 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3280 			nvp->nvp_state |= NV_PORT_FAILED;
3281 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3282 			    B_TRUE);
3283 			mutex_exit(&nvp->nvp_mutex);
3284 		}
3285 
3286 	} while (loop_cnt++ < nv_max_intr_loops);
3287 
3288 	if (loop_cnt > nvp->intr_loop_cnt) {
3289 		NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp,
3290 		    "Exiting with multiple intr loop count %d", loop_cnt));
3291 		nvp->intr_loop_cnt = loop_cnt;
3292 	}
3293 
3294 	if ((nv_debug_flags & (NVDBG_INTR | NVDBG_VERBOSE)) ==
3295 	    (NVDBG_INTR | NVDBG_VERBOSE)) {
3296 		uint8_t status, bmstatus;
3297 		uint16_t int_status2;
3298 
3299 		if (int_status & MCP5X_INT_COMPLETE) {
3300 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3301 			bmstatus = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmisx);
3302 			int_status2 = nv_get16(nvp->nvp_ctlp->nvc_bar_hdl[5],
3303 			    nvp->nvp_mcp5x_int_status);
3304 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
3305 			    "mcp55_intr_port: Exiting with altstatus %x, "
3306 			    "bmicx %x, int_status2 %X, int_status %X, ret %x,"
3307 			    " loop_cnt %d ", status, bmstatus, int_status2,
3308 			    int_status, ret, loop_cnt));
3309 		}
3310 	}
3311 
3312 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret));
3313 
3314 	/*
3315 	 * To facilitate debugging, keep track of the length of time spent in
3316 	 * the port interrupt routine.
3317 	 */
3318 	intr_time = ddi_get_lbolt() - nvp->intr_start_time;
3319 	if (intr_time > nvp->intr_duration)
3320 		nvp->intr_duration = intr_time;
3321 
3322 	return (ret);
3323 }
3324 
3325 
3326 /* ARGSUSED */
3327 static uint_t
3328 mcp5x_intr(caddr_t arg1, caddr_t arg2)
3329 {
3330 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3331 	int ret;
3332 
3333 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3334 		return (DDI_INTR_UNCLAIMED);
3335 
3336 	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3337 	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3338 
3339 	return (ret);
3340 }
3341 
3342 
3343 #ifdef NCQ
3344 /*
3345  * with software driven NCQ on mcp5x, an interrupt occurs right
3346  * before the drive is ready to do a DMA transfer.  At this point,
3347  * the PRD table needs to be programmed and the DMA engine enabled
3348  * and ready to go.
3349  *
3350  * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3351  * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3352  * -- clear bit 0 of master command reg
3353  * -- program PRD
3354  * -- clear the interrupt status bit for the DMA Setup FIS
3355  * -- set bit 0 of the bus master command register
3356  */
3357 static int
3358 mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3359 {
3360 	int slot;
3361 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3362 	uint8_t bmicx;
3363 	int port = nvp->nvp_port_num;
3364 	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3365 	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3366 
3367 	nv_cmn_err(CE_PANIC, nvc, nvp,
3368 	    "this is should not be executed at all until NCQ");
3369 
3370 	mutex_enter(&nvp->nvp_mutex);
3371 
3372 	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3373 
3374 	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3375 
3376 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3377 	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
3378 
3379 	/*
3380 	 * halt the DMA engine.  This step is necessary according to
3381 	 * the mcp5x spec, probably since there may have been a "first" packet
3382 	 * that already programmed the DMA engine, but may not turn out to
3383 	 * be the first one processed.
3384 	 */
3385 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3386 
3387 	if (bmicx & BMICX_SSBM) {
3388 		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3389 		    "another packet.  Cancelling and reprogramming"));
3390 		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3391 	}
3392 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3393 
3394 	nv_start_dma_engine(nvp, slot);
3395 
3396 	mutex_exit(&nvp->nvp_mutex);
3397 
3398 	return (DDI_INTR_CLAIMED);
3399 }
3400 #endif /* NCQ */
3401 
3402 
3403 /*
3404  * packet completion interrupt.  If the packet is complete, invoke
3405  * the packet completion callback.
3406  */
3407 static int
3408 mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3409 {
3410 	uint8_t status, bmstatus;
3411 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3412 	int sactive;
3413 	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3414 	sata_pkt_t *spkt;
3415 	nv_slot_t *nv_slotp;
3416 
3417 	mutex_enter(&nvp->nvp_mutex);
3418 
3419 	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3420 
3421 	if (!(bmstatus & (BMISX_IDEINTS | BMISX_IDERR))) {
3422 		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
3423 		mutex_exit(&nvp->nvp_mutex);
3424 
3425 		return (NV_FAILURE);
3426 	}
3427 
3428 	/*
3429 	 * Commands may have been processed by abort or timeout before
3430 	 * interrupt processing acquired the mutex. So we may be processing
3431 	 * an interrupt for packets that were already removed.
3432 	 * For functionning NCQ processing all slots may be checked, but
3433 	 * with NCQ disabled (current code), relying on *_run flags is OK.
3434 	 */
3435 	if (nvp->nvp_non_ncq_run) {
3436 		/*
3437 		 * If the just completed item is a non-ncq command, the busy
3438 		 * bit should not be set
3439 		 */
3440 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3441 		if (status & SATA_STATUS_BSY) {
3442 			nv_cmn_err(CE_WARN, nvc, nvp,
3443 			    "unexpected SATA_STATUS_BSY set");
3444 			mutex_exit(&nvp->nvp_mutex);
3445 			/*
3446 			 * calling function will clear interrupt.  then
3447 			 * the real interrupt will either arrive or the
3448 			 * packet timeout handling will take over and
3449 			 * reset.
3450 			 */
3451 			return (NV_FAILURE);
3452 		}
3453 		ASSERT(nvp->nvp_ncq_run == 0);
3454 	} else {
3455 		ASSERT(nvp->nvp_non_ncq_run == 0);
3456 		/*
3457 		 * Pre-NCQ code!
3458 		 * Nothing to do. The packet for the command that just
3459 		 * completed is already gone. Just clear the interrupt.
3460 		 */
3461 		(void) nv_bm_status_clear(nvp);
3462 		(void) nv_get8(nvp->nvp_cmd_hdl, nvp->nvp_status);
3463 		mutex_exit(&nvp->nvp_mutex);
3464 		return (NV_SUCCESS);
3465 
3466 		/*
3467 		 * NCQ check for BSY here and wait if still bsy before
3468 		 * continuing. Rather than wait for it to be cleared
3469 		 * when starting a packet and wasting CPU time, the starting
3470 		 * thread can exit immediate, but might have to spin here
3471 		 * for a bit possibly.  Needs more work and experimentation.
3472 		 *
3473 		 */
3474 	}
3475 
3476 	/*
3477 	 * active_pkt_bit will represent the bitmap of the single completed
3478 	 * packet.  Because of the nature of sw assisted NCQ, only one
3479 	 * command will complete per interrupt.
3480 	 */
3481 
3482 	if (ncq_command == B_FALSE) {
3483 		active_pkt = 0;
3484 	} else {
3485 		/*
3486 		 * NCQ: determine which command just completed, by examining
3487 		 * which bit cleared in the register since last written.
3488 		 */
3489 		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3490 
3491 		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3492 
3493 		ASSERT(active_pkt_bit);
3494 
3495 
3496 		/*
3497 		 * this failure path needs more work to handle the
3498 		 * error condition and recovery.
3499 		 */
3500 		if (active_pkt_bit == 0) {
3501 			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3502 
3503 			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3504 			    "nvp->nvp_sactive %X", sactive,
3505 			    nvp->nvp_sactive_cache);
3506 
3507 			(void) nv_get8(cmdhdl, nvp->nvp_status);
3508 
3509 			mutex_exit(&nvp->nvp_mutex);
3510 
3511 			return (NV_FAILURE);
3512 		}
3513 
3514 		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3515 		    active_pkt++, active_pkt_bit >>= 1) {
3516 		}
3517 
3518 		/*
3519 		 * make sure only one bit is ever turned on
3520 		 */
3521 		ASSERT(active_pkt_bit == 1);
3522 
3523 		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3524 	}
3525 
3526 	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3527 
3528 	spkt = nv_slotp->nvslot_spkt;
3529 
3530 	ASSERT(spkt != NULL);
3531 
3532 	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3533 
3534 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3535 
3536 	if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3537 
3538 		nv_complete_io(nvp, spkt, active_pkt);
3539 	}
3540 
3541 	mutex_exit(&nvp->nvp_mutex);
3542 
3543 	return (NV_SUCCESS);
3544 }
3545 
3546 
3547 static void
3548 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3549 {
3550 
3551 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3552 
3553 	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3554 		nvp->nvp_ncq_run--;
3555 	} else {
3556 		nvp->nvp_non_ncq_run--;
3557 	}
3558 
3559 	/*
3560 	 * mark the packet slot idle so it can be reused.  Do this before
3561 	 * calling satapkt_comp so the slot can be reused.
3562 	 */
3563 	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3564 
3565 	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3566 		/*
3567 		 * If this is not timed polled mode cmd, which has an
3568 		 * active thread monitoring for completion, then need
3569 		 * to signal the sleeping thread that the cmd is complete.
3570 		 */
3571 		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3572 			cv_signal(&nvp->nvp_poll_cv);
3573 		}
3574 
3575 		return;
3576 	}
3577 
3578 	if (spkt->satapkt_comp != NULL) {
3579 		mutex_exit(&nvp->nvp_mutex);
3580 		(*spkt->satapkt_comp)(spkt);
3581 		mutex_enter(&nvp->nvp_mutex);
3582 	}
3583 }
3584 
3585 
3586 /*
3587  * check whether packet is ncq command or not.  for ncq command,
3588  * start it if there is still room on queue.  for non-ncq command only
3589  * start if no other command is running.
3590  */
3591 static int
3592 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3593 {
3594 	uint8_t cmd, ncq;
3595 
3596 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3597 
3598 	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3599 
3600 	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3601 	    (cmd == SATAC_READ_FPDMA_QUEUED));
3602 
3603 	if (ncq == B_FALSE) {
3604 
3605 		if ((nvp->nvp_non_ncq_run == 1) ||
3606 		    (nvp->nvp_ncq_run > 0)) {
3607 			/*
3608 			 * next command is non-ncq which can't run
3609 			 * concurrently.  exit and return queue full.
3610 			 */
3611 			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3612 
3613 			return (SATA_TRAN_QUEUE_FULL);
3614 		}
3615 
3616 		return (nv_start_common(nvp, spkt));
3617 	}
3618 
3619 	/*
3620 	 * ncq == B_TRUE
3621 	 */
3622 	if (nvp->nvp_non_ncq_run == 1) {
3623 		/*
3624 		 * cannot start any NCQ commands when there
3625 		 * is a non-NCQ command running.
3626 		 */
3627 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3628 
3629 		return (SATA_TRAN_QUEUE_FULL);
3630 	}
3631 
3632 #ifdef NCQ
3633 	/*
3634 	 * this is not compiled for now as satapkt_device.satadev_qdepth
3635 	 * is being pulled out until NCQ support is later addressed
3636 	 *
3637 	 * nvp_queue_depth is initialized by the first NCQ command
3638 	 * received.
3639 	 */
3640 	if (nvp->nvp_queue_depth == 1) {
3641 		nvp->nvp_queue_depth =
3642 		    spkt->satapkt_device.satadev_qdepth;
3643 
3644 		ASSERT(nvp->nvp_queue_depth > 1);
3645 
3646 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3647 		    "nv_process_queue: nvp_queue_depth set to %d",
3648 		    nvp->nvp_queue_depth));
3649 	}
3650 #endif
3651 
3652 	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3653 		/*
3654 		 * max number of NCQ commands already active
3655 		 */
3656 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3657 
3658 		return (SATA_TRAN_QUEUE_FULL);
3659 	}
3660 
3661 	return (nv_start_common(nvp, spkt));
3662 }
3663 
3664 
3665 /*
3666  * configure INTx and legacy interrupts
3667  */
3668 static int
3669 nv_add_legacy_intrs(nv_ctl_t *nvc)
3670 {
3671 	dev_info_t	*devinfo = nvc->nvc_dip;
3672 	int		actual, count = 0;
3673 	int		x, y, rc, inum = 0;
3674 
3675 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3676 
3677 	/*
3678 	 * get number of interrupts
3679 	 */
3680 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3681 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3682 		NVLOG((NVDBG_INTR, nvc, NULL,
3683 		    "ddi_intr_get_nintrs() failed, "
3684 		    "rc %d count %d", rc, count));
3685 
3686 		return (DDI_FAILURE);
3687 	}
3688 
3689 	/*
3690 	 * allocate an array of interrupt handles
3691 	 */
3692 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3693 	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3694 
3695 	/*
3696 	 * call ddi_intr_alloc()
3697 	 */
3698 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3699 	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3700 
3701 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3702 		nv_cmn_err(CE_WARN, nvc, NULL,
3703 		    "ddi_intr_alloc() failed, rc %d", rc);
3704 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3705 
3706 		return (DDI_FAILURE);
3707 	}
3708 
3709 	if (actual < count) {
3710 		nv_cmn_err(CE_WARN, nvc, NULL,
3711 		    "ddi_intr_alloc: requested: %d, received: %d",
3712 		    count, actual);
3713 
3714 		goto failure;
3715 	}
3716 
3717 	nvc->nvc_intr_cnt = actual;
3718 
3719 	/*
3720 	 * get intr priority
3721 	 */
3722 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3723 	    DDI_SUCCESS) {
3724 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3725 
3726 		goto failure;
3727 	}
3728 
3729 	/*
3730 	 * Test for high level mutex
3731 	 */
3732 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3733 		nv_cmn_err(CE_WARN, nvc, NULL,
3734 		    "nv_add_legacy_intrs: high level intr not supported");
3735 
3736 		goto failure;
3737 	}
3738 
3739 	for (x = 0; x < actual; x++) {
3740 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3741 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3742 			nv_cmn_err(CE_WARN, nvc, NULL,
3743 			    "ddi_intr_add_handler() failed");
3744 
3745 			goto failure;
3746 		}
3747 	}
3748 
3749 	/*
3750 	 * call ddi_intr_enable() for legacy interrupts
3751 	 */
3752 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3753 		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3754 	}
3755 
3756 	return (DDI_SUCCESS);
3757 
3758 	failure:
3759 	/*
3760 	 * free allocated intr and nvc_htable
3761 	 */
3762 	for (y = 0; y < actual; y++) {
3763 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3764 	}
3765 
3766 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3767 
3768 	return (DDI_FAILURE);
3769 }
3770 
3771 #ifdef	NV_MSI_SUPPORTED
3772 /*
3773  * configure MSI interrupts
3774  */
3775 static int
3776 nv_add_msi_intrs(nv_ctl_t *nvc)
3777 {
3778 	dev_info_t	*devinfo = nvc->nvc_dip;
3779 	int		count, avail, actual;
3780 	int		x, y, rc, inum = 0;
3781 
3782 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3783 
3784 	/*
3785 	 * get number of interrupts
3786 	 */
3787 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3788 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3789 		nv_cmn_err(CE_WARN, nvc, NULL,
3790 		    "ddi_intr_get_nintrs() failed, "
3791 		    "rc %d count %d", rc, count);
3792 
3793 		return (DDI_FAILURE);
3794 	}
3795 
3796 	/*
3797 	 * get number of available interrupts
3798 	 */
3799 	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3800 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3801 		nv_cmn_err(CE_WARN, nvc, NULL,
3802 		    "ddi_intr_get_navail() failed, "
3803 		    "rc %d avail %d", rc, avail);
3804 
3805 		return (DDI_FAILURE);
3806 	}
3807 
3808 	if (avail < count) {
3809 		nv_cmn_err(CE_WARN, nvc, NULL,
3810 		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3811 		    avail, count);
3812 	}
3813 
3814 	/*
3815 	 * allocate an array of interrupt handles
3816 	 */
3817 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3818 	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3819 
3820 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3821 	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3822 
3823 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3824 		nv_cmn_err(CE_WARN, nvc, NULL,
3825 		    "ddi_intr_alloc() failed, rc %d", rc);
3826 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3827 
3828 		return (DDI_FAILURE);
3829 	}
3830 
3831 	/*
3832 	 * Use interrupt count returned or abort?
3833 	 */
3834 	if (actual < count) {
3835 		NVLOG((NVDBG_INIT, nvc, NULL,
3836 		    "Requested: %d, Received: %d", count, actual));
3837 	}
3838 
3839 	nvc->nvc_intr_cnt = actual;
3840 
3841 	/*
3842 	 * get priority for first msi, assume remaining are all the same
3843 	 */
3844 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3845 	    DDI_SUCCESS) {
3846 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3847 
3848 		goto failure;
3849 	}
3850 
3851 	/*
3852 	 * test for high level mutex
3853 	 */
3854 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3855 		nv_cmn_err(CE_WARN, nvc, NULL,
3856 		    "nv_add_msi_intrs: high level intr not supported");
3857 
3858 		goto failure;
3859 	}
3860 
3861 	/*
3862 	 * Call ddi_intr_add_handler()
3863 	 */
3864 	for (x = 0; x < actual; x++) {
3865 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3866 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3867 			nv_cmn_err(CE_WARN, nvc, NULL,
3868 			    "ddi_intr_add_handler() failed");
3869 
3870 			goto failure;
3871 		}
3872 	}
3873 
3874 	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3875 
3876 	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3877 		(void) ddi_intr_block_enable(nvc->nvc_htable,
3878 		    nvc->nvc_intr_cnt);
3879 	} else {
3880 		/*
3881 		 * Call ddi_intr_enable() for MSI non block enable
3882 		 */
3883 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3884 			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3885 		}
3886 	}
3887 
3888 	return (DDI_SUCCESS);
3889 
3890 	failure:
3891 	/*
3892 	 * free allocated intr and nvc_htable
3893 	 */
3894 	for (y = 0; y < actual; y++) {
3895 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3896 	}
3897 
3898 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3899 
3900 	return (DDI_FAILURE);
3901 }
3902 #endif
3903 
3904 
3905 static void
3906 nv_rem_intrs(nv_ctl_t *nvc)
3907 {
3908 	int x, i;
3909 	nv_port_t *nvp;
3910 
3911 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3912 
3913 	/*
3914 	 * prevent controller from generating interrupts by
3915 	 * masking them out.  This is an extra precaution.
3916 	 */
3917 	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3918 		nvp = (&nvc->nvc_port[i]);
3919 		mutex_enter(&nvp->nvp_mutex);
3920 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3921 		mutex_exit(&nvp->nvp_mutex);
3922 	}
3923 
3924 	/*
3925 	 * disable all interrupts
3926 	 */
3927 	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3928 	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3929 		(void) ddi_intr_block_disable(nvc->nvc_htable,
3930 		    nvc->nvc_intr_cnt);
3931 	} else {
3932 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3933 			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3934 		}
3935 	}
3936 
3937 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3938 		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3939 		(void) ddi_intr_free(nvc->nvc_htable[x]);
3940 	}
3941 
3942 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3943 }
3944 
3945 
3946 /*
3947  * variable argument wrapper for cmn_err.  prefixes the instance and port
3948  * number if possible
3949  */
3950 static void
3951 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3952 {
3953 	char port[NV_STRING_10];
3954 	char inst[NV_STRING_10];
3955 
3956 	mutex_enter(&nv_log_mutex);
3957 
3958 	if (nvc) {
3959 		(void) snprintf(inst, NV_STRING_10, "inst %d",
3960 		    ddi_get_instance(nvc->nvc_dip));
3961 	} else {
3962 		inst[0] = '\0';
3963 	}
3964 
3965 	if (nvp) {
3966 		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3967 	} else {
3968 		port[0] = '\0';
3969 	}
3970 
3971 	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3972 	    (inst[0]|port[0] ? ": " :""));
3973 
3974 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3975 	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3976 
3977 	/*
3978 	 * normally set to log to console but in some debug situations it
3979 	 * may be useful to log only to a file.
3980 	 */
3981 	if (nv_log_to_console) {
3982 		if (nv_prom_print) {
3983 			prom_printf("%s\n", nv_log_buf);
3984 		} else {
3985 			cmn_err(ce, "%s", nv_log_buf);
3986 		}
3987 
3988 
3989 	} else {
3990 		cmn_err(ce, "!%s", nv_log_buf);
3991 	}
3992 
3993 	mutex_exit(&nv_log_mutex);
3994 }
3995 
3996 
3997 /*
3998  * wrapper for cmn_err
3999  */
4000 static void
4001 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4002 {
4003 	va_list ap;
4004 
4005 	va_start(ap, fmt);
4006 	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
4007 	va_end(ap);
4008 }
4009 
4010 
4011 #if defined(DEBUG)
4012 /*
4013  * prefixes the instance and port number if possible to the debug message
4014  */
4015 static void
4016 nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4017 {
4018 	va_list ap;
4019 
4020 	if ((nv_debug_flags & flag) == 0) {
4021 		return;
4022 	}
4023 
4024 	va_start(ap, fmt);
4025 	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
4026 	va_end(ap);
4027 
4028 	/*
4029 	 * useful for some debugging situations
4030 	 */
4031 	if (nv_log_delay) {
4032 		drv_usecwait(nv_log_delay);
4033 	}
4034 
4035 }
4036 #endif /* DEBUG */
4037 
4038 
4039 /*
4040  * program registers which are common to all commands
4041  */
4042 static void
4043 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
4044 {
4045 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4046 	sata_pkt_t *spkt;
4047 	sata_cmd_t *satacmd;
4048 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4049 	uint8_t cmd, ncq = B_FALSE;
4050 
4051 	spkt = nv_slotp->nvslot_spkt;
4052 	satacmd = &spkt->satapkt_cmd;
4053 	cmd = satacmd->satacmd_cmd_reg;
4054 
4055 	ASSERT(nvp->nvp_slot);
4056 
4057 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4058 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4059 		ncq = B_TRUE;
4060 	}
4061 
4062 	/*
4063 	 * select the drive
4064 	 */
4065 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4066 
4067 	/*
4068 	 * make certain the drive selected
4069 	 */
4070 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4071 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4072 
4073 		return;
4074 	}
4075 
4076 	switch (spkt->satapkt_cmd.satacmd_addr_type) {
4077 
4078 	case ATA_ADDR_LBA:
4079 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
4080 
4081 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4082 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4083 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4084 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4085 
4086 		break;
4087 
4088 	case ATA_ADDR_LBA28:
4089 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4090 		    "ATA_ADDR_LBA28 mode"));
4091 		/*
4092 		 * NCQ only uses 48-bit addressing
4093 		 */
4094 		ASSERT(ncq != B_TRUE);
4095 
4096 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4097 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4098 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4099 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4100 
4101 		break;
4102 
4103 	case ATA_ADDR_LBA48:
4104 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4105 		    "ATA_ADDR_LBA48 mode"));
4106 
4107 		/*
4108 		 * for NCQ, tag goes into count register and real sector count
4109 		 * into features register.  The sata module does the translation
4110 		 * in the satacmd.
4111 		 */
4112 		if (ncq == B_TRUE) {
4113 			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
4114 			nv_put8(cmdhdl, nvp->nvp_feature,
4115 			    satacmd->satacmd_features_reg_ext);
4116 			nv_put8(cmdhdl, nvp->nvp_feature,
4117 			    satacmd->satacmd_features_reg);
4118 		} else {
4119 			nv_put8(cmdhdl, nvp->nvp_count,
4120 			    satacmd->satacmd_sec_count_msb);
4121 			nv_put8(cmdhdl, nvp->nvp_count,
4122 			    satacmd->satacmd_sec_count_lsb);
4123 		}
4124 
4125 		/*
4126 		 * send the high-order half first
4127 		 */
4128 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
4129 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
4130 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
4131 		/*
4132 		 * Send the low-order half
4133 		 */
4134 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4135 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4136 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4137 
4138 		break;
4139 
4140 	case 0:
4141 		/*
4142 		 * non-media access commands such as identify and features
4143 		 * take this path.
4144 		 */
4145 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4146 		nv_put8(cmdhdl, nvp->nvp_feature,
4147 		    satacmd->satacmd_features_reg);
4148 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4149 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4150 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4151 
4152 		break;
4153 
4154 	default:
4155 		break;
4156 	}
4157 
4158 	ASSERT(nvp->nvp_slot);
4159 }
4160 
4161 
4162 /*
4163  * start a command that involves no media access
4164  */
4165 static int
4166 nv_start_nodata(nv_port_t *nvp, int slot)
4167 {
4168 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4169 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4170 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4171 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4172 
4173 	nv_program_taskfile_regs(nvp, slot);
4174 
4175 	/*
4176 	 * This next one sets the controller in motion
4177 	 */
4178 	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
4179 
4180 	return (SATA_TRAN_ACCEPTED);
4181 }
4182 
4183 
4184 static int
4185 nv_bm_status_clear(nv_port_t *nvp)
4186 {
4187 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4188 	uchar_t	status, ret;
4189 
4190 	/*
4191 	 * Get the current BM status
4192 	 */
4193 	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
4194 
4195 	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
4196 
4197 	/*
4198 	 * Clear the latches (and preserve the other bits)
4199 	 */
4200 	nv_put8(bmhdl, nvp->nvp_bmisx, status);
4201 
4202 	return (ret);
4203 }
4204 
4205 
4206 /*
4207  * program the bus master DMA engine with the PRD address for
4208  * the active slot command, and start the DMA engine.
4209  */
4210 static void
4211 nv_start_dma_engine(nv_port_t *nvp, int slot)
4212 {
4213 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4214 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4215 	uchar_t direction;
4216 
4217 	ASSERT(nv_slotp->nvslot_spkt != NULL);
4218 
4219 	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4220 	    == SATA_DIR_READ) {
4221 		direction = BMICX_RWCON_WRITE_TO_MEMORY;
4222 	} else {
4223 		direction = BMICX_RWCON_READ_FROM_MEMORY;
4224 	}
4225 
4226 	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4227 	    "nv_start_dma_engine entered"));
4228 
4229 #if NOT_USED
4230 	/*
4231 	 * NOT NEEDED. Left here of historical reason.
4232 	 * Reset the controller's interrupt and error status bits.
4233 	 */
4234 	(void) nv_bm_status_clear(nvp);
4235 #endif
4236 	/*
4237 	 * program the PRD table physical start address
4238 	 */
4239 	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4240 
4241 	/*
4242 	 * set the direction control and start the DMA controller
4243 	 */
4244 	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4245 }
4246 
4247 /*
4248  * start dma command, either in or out
4249  */
4250 static int
4251 nv_start_dma(nv_port_t *nvp, int slot)
4252 {
4253 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4254 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4255 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4256 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4257 	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4258 #ifdef NCQ
4259 	uint8_t ncq = B_FALSE;
4260 #endif
4261 	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4262 	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4263 	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4264 	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
4265 
4266 	ASSERT(sg_count != 0);
4267 
4268 	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4269 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4270 		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4271 		    sata_cmdp->satacmd_num_dma_cookies);
4272 
4273 		return (NV_FAILURE);
4274 	}
4275 
4276 	nv_program_taskfile_regs(nvp, slot);
4277 
4278 	/*
4279 	 * start the drive in motion
4280 	 */
4281 	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4282 
4283 	/*
4284 	 * the drive starts processing the transaction when the cmd register
4285 	 * is written.  This is done here before programming the DMA engine to
4286 	 * parallelize and save some time.  In the event that the drive is ready
4287 	 * before DMA, it will wait.
4288 	 */
4289 #ifdef NCQ
4290 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4291 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4292 		ncq = B_TRUE;
4293 	}
4294 #endif
4295 
4296 	/*
4297 	 * copy the PRD list to PRD table in DMA accessible memory
4298 	 * so that the controller can access it.
4299 	 */
4300 	for (idx = 0; idx < sg_count; idx++, srcp++) {
4301 		uint32_t size;
4302 
4303 		nv_put32(sghdl, dstp++, srcp->dmac_address);
4304 
4305 		/* Set the number of bytes to transfer, 0 implies 64KB */
4306 		size = srcp->dmac_size;
4307 		if (size == 0x10000)
4308 			size = 0;
4309 
4310 		/*
4311 		 * If this is a 40-bit address, copy bits 32-40 of the
4312 		 * physical address to bits 16-24 of the PRD count.
4313 		 */
4314 		if (srcp->dmac_laddress > UINT32_MAX) {
4315 			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4316 		}
4317 
4318 		/*
4319 		 * set the end of table flag for the last entry
4320 		 */
4321 		if (idx == (sg_count - 1)) {
4322 			size |= PRDE_EOT;
4323 		}
4324 
4325 		nv_put32(sghdl, dstp++, size);
4326 	}
4327 
4328 	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4329 	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4330 
4331 	nv_start_dma_engine(nvp, slot);
4332 
4333 #ifdef NCQ
4334 	/*
4335 	 * optimization:  for SWNCQ, start DMA engine if this is the only
4336 	 * command running.  Preliminary NCQ efforts indicated this needs
4337 	 * more debugging.
4338 	 *
4339 	 * if (nvp->nvp_ncq_run <= 1)
4340 	 */
4341 
4342 	if (ncq == B_FALSE) {
4343 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4344 		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4345 		    " cmd = %X", non_ncq_commands++, cmd));
4346 		nv_start_dma_engine(nvp, slot);
4347 	} else {
4348 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
4349 		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
4350 	}
4351 #endif /* NCQ */
4352 
4353 	return (SATA_TRAN_ACCEPTED);
4354 }
4355 
4356 
4357 /*
4358  * start a PIO data-in ATA command
4359  */
4360 static int
4361 nv_start_pio_in(nv_port_t *nvp, int slot)
4362 {
4363 
4364 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4365 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4366 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4367 
4368 	nv_program_taskfile_regs(nvp, slot);
4369 
4370 	/*
4371 	 * This next one sets the drive in motion
4372 	 */
4373 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4374 
4375 	return (SATA_TRAN_ACCEPTED);
4376 }
4377 
4378 
4379 /*
4380  * start a PIO data-out ATA command
4381  */
4382 static int
4383 nv_start_pio_out(nv_port_t *nvp, int slot)
4384 {
4385 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4386 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4387 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4388 
4389 	nv_program_taskfile_regs(nvp, slot);
4390 
4391 	/*
4392 	 * this next one sets the drive in motion
4393 	 */
4394 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4395 
4396 	/*
4397 	 * wait for the busy bit to settle
4398 	 */
4399 	NV_DELAY_NSEC(400);
4400 
4401 	/*
4402 	 * wait for the drive to assert DRQ to send the first chunk
4403 	 * of data. Have to busy wait because there's no interrupt for
4404 	 * the first chunk. This is bad... uses a lot of cycles if the
4405 	 * drive responds too slowly or if the wait loop granularity
4406 	 * is too large. It's even worse if the drive is defective and
4407 	 * the loop times out.
4408 	 */
4409 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4410 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4411 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4412 	    4000000, 0) == B_FALSE) {
4413 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4414 
4415 		goto error;
4416 	}
4417 
4418 	/*
4419 	 * send the first block.
4420 	 */
4421 	nv_intr_pio_out(nvp, nv_slotp);
4422 
4423 	/*
4424 	 * If nvslot_flags is not set to COMPLETE yet, then processing
4425 	 * is OK so far, so return.  Otherwise, fall into error handling
4426 	 * below.
4427 	 */
4428 	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4429 
4430 		return (SATA_TRAN_ACCEPTED);
4431 	}
4432 
4433 	error:
4434 	/*
4435 	 * there was an error so reset the device and complete the packet.
4436 	 */
4437 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4438 	nv_complete_io(nvp, spkt, 0);
4439 	nvp->nvp_state |= NV_PORT_RESET;
4440 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4441 	nv_reset(nvp);
4442 
4443 	return (SATA_TRAN_PORT_ERROR);
4444 }
4445 
4446 
4447 /*
4448  * start a ATAPI Packet command (PIO data in or out)
4449  */
4450 static int
4451 nv_start_pkt_pio(nv_port_t *nvp, int slot)
4452 {
4453 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4454 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4455 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4456 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4457 
4458 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4459 	    "nv_start_pkt_pio: start"));
4460 
4461 	/*
4462 	 * Write the PACKET command to the command register.  Normally
4463 	 * this would be done through nv_program_taskfile_regs().  It
4464 	 * is done here because some values need to be overridden.
4465 	 */
4466 
4467 	/* select the drive */
4468 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4469 
4470 	/* make certain the drive selected */
4471 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4472 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4473 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4474 		    "nv_start_pkt_pio: drive select failed"));
4475 		return (SATA_TRAN_PORT_ERROR);
4476 	}
4477 
4478 	/*
4479 	 * The command is always sent via PIO, despite whatever the SATA
4480 	 * framework sets in the command.  Overwrite the DMA bit to do this.
4481 	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4482 	 */
4483 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4484 
4485 	/* set appropriately by the sata framework */
4486 	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4487 	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4488 	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4489 	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4490 
4491 	/* initiate the command by writing the command register last */
4492 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4493 
4494 	/* Give the host controller time to do its thing */
4495 	NV_DELAY_NSEC(400);
4496 
4497 	/*
4498 	 * Wait for the device to indicate that it is ready for the command
4499 	 * ATAPI protocol state - HP0: Check_Status_A
4500 	 */
4501 
4502 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4503 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4504 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4505 	    4000000, 0) == B_FALSE) {
4506 		/*
4507 		 * Either an error or device fault occurred or the wait
4508 		 * timed out.  According to the ATAPI protocol, command
4509 		 * completion is also possible.  Other implementations of
4510 		 * this protocol don't handle this last case, so neither
4511 		 * does this code.
4512 		 */
4513 
4514 		if (nv_get8(cmdhdl, nvp->nvp_status) &
4515 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4516 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4517 
4518 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4519 			    "nv_start_pkt_pio: device error (HP0)"));
4520 		} else {
4521 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4522 
4523 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4524 			    "nv_start_pkt_pio: timeout (HP0)"));
4525 		}
4526 
4527 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4528 		nv_complete_io(nvp, spkt, 0);
4529 		nvp->nvp_state |= NV_PORT_RESET;
4530 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4531 		nv_reset(nvp);
4532 
4533 		return (SATA_TRAN_PORT_ERROR);
4534 	}
4535 
4536 	/*
4537 	 * Put the ATAPI command in the data register
4538 	 * ATAPI protocol state - HP1: Send_Packet
4539 	 */
4540 
4541 	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4542 	    (ushort_t *)nvp->nvp_data,
4543 	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4544 
4545 	/*
4546 	 * See you in nv_intr_pkt_pio.
4547 	 * ATAPI protocol state - HP3: INTRQ_wait
4548 	 */
4549 
4550 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4551 	    "nv_start_pkt_pio: exiting into HP3"));
4552 
4553 	return (SATA_TRAN_ACCEPTED);
4554 }
4555 
4556 
4557 /*
4558  * Interrupt processing for a non-data ATA command.
4559  */
4560 static void
4561 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4562 {
4563 	uchar_t status;
4564 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4565 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4566 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4567 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4568 
4569 	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
4570 
4571 	status = nv_get8(cmdhdl, nvp->nvp_status);
4572 
4573 	/*
4574 	 * check for errors
4575 	 */
4576 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4577 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4578 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4579 		    nvp->nvp_altstatus);
4580 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4581 	} else {
4582 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4583 	}
4584 
4585 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4586 }
4587 
4588 
4589 /*
4590  * ATA command, PIO data in
4591  */
4592 static void
4593 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4594 {
4595 	uchar_t	status;
4596 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4597 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4598 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4599 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4600 	int count;
4601 
4602 	status = nv_get8(cmdhdl, nvp->nvp_status);
4603 
4604 	if (status & SATA_STATUS_BSY) {
4605 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4606 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4607 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4608 		    nvp->nvp_altstatus);
4609 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4610 		nvp->nvp_state |= NV_PORT_RESET;
4611 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4612 		nv_reset(nvp);
4613 
4614 		return;
4615 	}
4616 
4617 	/*
4618 	 * check for errors
4619 	 */
4620 	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4621 	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4622 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4623 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4624 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4625 
4626 		return;
4627 	}
4628 
4629 	/*
4630 	 * read the next chunk of data (if any)
4631 	 */
4632 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4633 
4634 	/*
4635 	 * read count bytes
4636 	 */
4637 	ASSERT(count != 0);
4638 
4639 	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4640 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4641 
4642 	nv_slotp->nvslot_v_addr += count;
4643 	nv_slotp->nvslot_byte_count -= count;
4644 
4645 
4646 	if (nv_slotp->nvslot_byte_count != 0) {
4647 		/*
4648 		 * more to transfer.  Wait for next interrupt.
4649 		 */
4650 		return;
4651 	}
4652 
4653 	/*
4654 	 * transfer is complete. wait for the busy bit to settle.
4655 	 */
4656 	NV_DELAY_NSEC(400);
4657 
4658 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4659 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4660 }
4661 
4662 
4663 /*
4664  * ATA command PIO data out
4665  */
4666 static void
4667 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4668 {
4669 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4670 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4671 	uchar_t status;
4672 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4673 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4674 	int count;
4675 
4676 	/*
4677 	 * clear the IRQ
4678 	 */
4679 	status = nv_get8(cmdhdl, nvp->nvp_status);
4680 
4681 	if (status & SATA_STATUS_BSY) {
4682 		/*
4683 		 * this should not happen
4684 		 */
4685 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4686 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4687 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4688 		    nvp->nvp_altstatus);
4689 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4690 
4691 		return;
4692 	}
4693 
4694 	/*
4695 	 * check for errors
4696 	 */
4697 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4698 		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4699 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4700 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4701 
4702 		return;
4703 	}
4704 
4705 	/*
4706 	 * this is the condition which signals the drive is
4707 	 * no longer ready to transfer.  Likely that the transfer
4708 	 * completed successfully, but check that byte_count is
4709 	 * zero.
4710 	 */
4711 	if ((status & SATA_STATUS_DRQ) == 0) {
4712 
4713 		if (nv_slotp->nvslot_byte_count == 0) {
4714 			/*
4715 			 * complete; successful transfer
4716 			 */
4717 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4718 		} else {
4719 			/*
4720 			 * error condition, incomplete transfer
4721 			 */
4722 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4723 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4724 		}
4725 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4726 
4727 		return;
4728 	}
4729 
4730 	/*
4731 	 * write the next chunk of data
4732 	 */
4733 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4734 
4735 	/*
4736 	 * read or write count bytes
4737 	 */
4738 
4739 	ASSERT(count != 0);
4740 
4741 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4742 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4743 
4744 	nv_slotp->nvslot_v_addr += count;
4745 	nv_slotp->nvslot_byte_count -= count;
4746 }
4747 
4748 
4749 /*
4750  * ATAPI PACKET command, PIO in/out interrupt
4751  *
4752  * Under normal circumstances, one of four different interrupt scenarios
4753  * will result in this function being called:
4754  *
4755  * 1. Packet command data transfer
4756  * 2. Packet command completion
4757  * 3. Request sense data transfer
4758  * 4. Request sense command completion
4759  */
4760 static void
4761 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4762 {
4763 	uchar_t	status;
4764 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4765 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4766 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4767 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4768 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4769 	uint16_t ctlr_count;
4770 	int count;
4771 
4772 	/* ATAPI protocol state - HP2: Check_Status_B */
4773 
4774 	status = nv_get8(cmdhdl, nvp->nvp_status);
4775 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4776 	    "nv_intr_pkt_pio: status 0x%x", status));
4777 
4778 	if (status & SATA_STATUS_BSY) {
4779 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4780 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4781 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4782 		} else {
4783 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4784 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4785 			nvp->nvp_state |= NV_PORT_RESET;
4786 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
4787 			    NV_PORT_RESET_RETRY);
4788 			nv_reset(nvp);
4789 		}
4790 
4791 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4792 		    "nv_intr_pkt_pio: busy - status 0x%x", status));
4793 
4794 		return;
4795 	}
4796 
4797 	if ((status & SATA_STATUS_DF) != 0) {
4798 		/*
4799 		 * On device fault, just clean up and bail.  Request sense
4800 		 * will just default to its NO SENSE initialized value.
4801 		 */
4802 
4803 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4804 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4805 		}
4806 
4807 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4808 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4809 
4810 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4811 		    nvp->nvp_altstatus);
4812 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4813 		    nvp->nvp_error);
4814 
4815 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4816 		    "nv_intr_pkt_pio: device fault"));
4817 
4818 		return;
4819 	}
4820 
4821 	if ((status & SATA_STATUS_ERR) != 0) {
4822 		/*
4823 		 * On command error, figure out whether we are processing a
4824 		 * request sense.  If so, clean up and bail.  Otherwise,
4825 		 * do a REQUEST SENSE.
4826 		 */
4827 
4828 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4829 			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4830 			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4831 			    NV_FAILURE) {
4832 				nv_copy_registers(nvp, &spkt->satapkt_device,
4833 				    spkt);
4834 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4835 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4836 			}
4837 
4838 			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4839 			    nvp->nvp_altstatus);
4840 			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4841 			    nvp->nvp_error);
4842 		} else {
4843 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4844 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4845 
4846 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4847 		}
4848 
4849 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4850 		    "nv_intr_pkt_pio: error (status 0x%x)", status));
4851 
4852 		return;
4853 	}
4854 
4855 	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4856 		/*
4857 		 * REQUEST SENSE command processing
4858 		 */
4859 
4860 		if ((status & (SATA_STATUS_DRQ)) != 0) {
4861 			/* ATAPI state - HP4: Transfer_Data */
4862 
4863 			/* read the byte count from the controller */
4864 			ctlr_count =
4865 			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4866 			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4867 
4868 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4869 			    "nv_intr_pkt_pio: ctlr byte count - %d",
4870 			    ctlr_count));
4871 
4872 			if (ctlr_count == 0) {
4873 				/* no data to transfer - some devices do this */
4874 
4875 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4876 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4877 
4878 				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4879 				    "nv_intr_pkt_pio: done (no data)"));
4880 
4881 				return;
4882 			}
4883 
4884 			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4885 
4886 			/* transfer the data */
4887 			ddi_rep_get16(cmdhdl,
4888 			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4889 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4890 			    DDI_DEV_NO_AUTOINCR);
4891 
4892 			/* consume residual bytes */
4893 			ctlr_count -= count;
4894 
4895 			if (ctlr_count > 0) {
4896 				for (; ctlr_count > 0; ctlr_count -= 2)
4897 					(void) ddi_get16(cmdhdl,
4898 					    (ushort_t *)nvp->nvp_data);
4899 			}
4900 
4901 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4902 			    "nv_intr_pkt_pio: transition to HP2"));
4903 		} else {
4904 			/* still in ATAPI state - HP2 */
4905 
4906 			/*
4907 			 * In order to avoid clobbering the rqsense data
4908 			 * set by the SATA framework, the sense data read
4909 			 * from the device is put in a separate buffer and
4910 			 * copied into the packet after the request sense
4911 			 * command successfully completes.
4912 			 */
4913 			bcopy(nv_slotp->nvslot_rqsense_buff,
4914 			    spkt->satapkt_cmd.satacmd_rqsense,
4915 			    SATA_ATAPI_RQSENSE_LEN);
4916 
4917 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4918 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4919 
4920 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4921 			    "nv_intr_pkt_pio: request sense done"));
4922 		}
4923 
4924 		return;
4925 	}
4926 
4927 	/*
4928 	 * Normal command processing
4929 	 */
4930 
4931 	if ((status & (SATA_STATUS_DRQ)) != 0) {
4932 		/* ATAPI protocol state - HP4: Transfer_Data */
4933 
4934 		/* read the byte count from the controller */
4935 		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4936 		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4937 
4938 		if (ctlr_count == 0) {
4939 			/* no data to transfer - some devices do this */
4940 
4941 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4942 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4943 
4944 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4945 			    "nv_intr_pkt_pio: done (no data)"));
4946 
4947 			return;
4948 		}
4949 
4950 		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
4951 
4952 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4953 		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
4954 
4955 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4956 		    "nv_intr_pkt_pio: byte_count 0x%x",
4957 		    nv_slotp->nvslot_byte_count));
4958 
4959 		/* transfer the data */
4960 
4961 		if (direction == SATA_DIR_READ) {
4962 			ddi_rep_get16(cmdhdl,
4963 			    (ushort_t *)nv_slotp->nvslot_v_addr,
4964 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4965 			    DDI_DEV_NO_AUTOINCR);
4966 
4967 			ctlr_count -= count;
4968 
4969 			if (ctlr_count > 0) {
4970 				/* consume remainding bytes */
4971 
4972 				for (; ctlr_count > 0;
4973 				    ctlr_count -= 2)
4974 					(void) ddi_get16(cmdhdl,
4975 					    (ushort_t *)nvp->nvp_data);
4976 
4977 				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4978 				    "nv_intr_pkt_pio: bytes remained"));
4979 			}
4980 		} else {
4981 			ddi_rep_put16(cmdhdl,
4982 			    (ushort_t *)nv_slotp->nvslot_v_addr,
4983 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4984 			    DDI_DEV_NO_AUTOINCR);
4985 		}
4986 
4987 		nv_slotp->nvslot_v_addr += count;
4988 		nv_slotp->nvslot_byte_count -= count;
4989 
4990 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4991 		    "nv_intr_pkt_pio: transition to HP2"));
4992 	} else {
4993 		/* still in ATAPI state - HP2 */
4994 
4995 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4996 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4997 
4998 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4999 		    "nv_intr_pkt_pio: done"));
5000 	}
5001 }
5002 
5003 
5004 /*
5005  * ATA command, DMA data in/out
5006  */
5007 static void
5008 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
5009 {
5010 	uchar_t status;
5011 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5012 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
5013 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5014 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5015 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
5016 	uchar_t	bmicx;
5017 	uchar_t bm_status;
5018 
5019 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5020 
5021 	/*
5022 	 * stop DMA engine.
5023 	 */
5024 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
5025 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
5026 
5027 	/*
5028 	 * get the status and clear the IRQ, and check for DMA error
5029 	 */
5030 	status = nv_get8(cmdhdl, nvp->nvp_status);
5031 
5032 	/*
5033 	 * check for drive errors
5034 	 */
5035 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
5036 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5037 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5038 		(void) nv_bm_status_clear(nvp);
5039 
5040 		return;
5041 	}
5042 
5043 	bm_status = nv_bm_status_clear(nvp);
5044 
5045 	/*
5046 	 * check for bus master errors
5047 	 */
5048 	if (bm_status & BMISX_IDERR) {
5049 		spkt->satapkt_reason = SATA_PKT_RESET;   /* ? */
5050 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
5051 		    nvp->nvp_altstatus);
5052 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5053 		nvp->nvp_state |= NV_PORT_RESET;
5054 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5055 		nv_reset(nvp);
5056 
5057 		return;
5058 	}
5059 
5060 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
5061 }
5062 
5063 
5064 /*
5065  * Wait for a register of a controller to achieve a specific state.
5066  * To return normally, all the bits in the first sub-mask must be ON,
5067  * all the bits in the second sub-mask must be OFF.
5068  * If timeout_usec microseconds pass without the controller achieving
5069  * the desired bit configuration, return TRUE, else FALSE.
5070  *
5071  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5072  * occur for the first 250 us, then switch over to a sleeping wait.
5073  *
5074  */
5075 int
5076 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
5077     int type_wait)
5078 {
5079 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5080 	hrtime_t end, cur, start_sleep, start;
5081 	int first_time = B_TRUE;
5082 	ushort_t val;
5083 
5084 	for (;;) {
5085 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5086 
5087 		if ((val & onbits) == onbits && (val & offbits) == 0) {
5088 
5089 			return (B_TRUE);
5090 		}
5091 
5092 		cur = gethrtime();
5093 
5094 		/*
5095 		 * store the start time and calculate the end
5096 		 * time.  also calculate "start_sleep" which is
5097 		 * the point after which the driver will stop busy
5098 		 * waiting and change to sleep waiting.
5099 		 */
5100 		if (first_time) {
5101 			first_time = B_FALSE;
5102 			/*
5103 			 * start and end are in nanoseconds
5104 			 */
5105 			start = cur;
5106 			end = start + timeout_usec * 1000;
5107 			/*
5108 			 * add 1 ms to start
5109 			 */
5110 			start_sleep =  start + 250000;
5111 
5112 			if (servicing_interrupt()) {
5113 				type_wait = NV_NOSLEEP;
5114 			}
5115 		}
5116 
5117 		if (cur > end) {
5118 
5119 			break;
5120 		}
5121 
5122 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5123 #if ! defined(__lock_lint)
5124 			delay(1);
5125 #endif
5126 		} else {
5127 			drv_usecwait(nv_usec_delay);
5128 		}
5129 	}
5130 
5131 	return (B_FALSE);
5132 }
5133 
5134 
5135 /*
5136  * This is a slightly more complicated version that checks
5137  * for error conditions and bails-out rather than looping
5138  * until the timeout is exceeded.
5139  *
5140  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5141  * occur for the first 250 us, then switch over to a sleeping wait.
5142  */
5143 int
5144 nv_wait3(
5145 	nv_port_t	*nvp,
5146 	uchar_t		onbits1,
5147 	uchar_t		offbits1,
5148 	uchar_t		failure_onbits2,
5149 	uchar_t		failure_offbits2,
5150 	uchar_t		failure_onbits3,
5151 	uchar_t		failure_offbits3,
5152 	uint_t		timeout_usec,
5153 	int		type_wait)
5154 {
5155 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5156 	hrtime_t end, cur, start_sleep, start;
5157 	int first_time = B_TRUE;
5158 	ushort_t val;
5159 
5160 	for (;;) {
5161 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5162 
5163 		/*
5164 		 * check for expected condition
5165 		 */
5166 		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
5167 
5168 			return (B_TRUE);
5169 		}
5170 
5171 		/*
5172 		 * check for error conditions
5173 		 */
5174 		if ((val & failure_onbits2) == failure_onbits2 &&
5175 		    (val & failure_offbits2) == 0) {
5176 
5177 			return (B_FALSE);
5178 		}
5179 
5180 		if ((val & failure_onbits3) == failure_onbits3 &&
5181 		    (val & failure_offbits3) == 0) {
5182 
5183 			return (B_FALSE);
5184 		}
5185 
5186 		/*
5187 		 * store the start time and calculate the end
5188 		 * time.  also calculate "start_sleep" which is
5189 		 * the point after which the driver will stop busy
5190 		 * waiting and change to sleep waiting.
5191 		 */
5192 		if (first_time) {
5193 			first_time = B_FALSE;
5194 			/*
5195 			 * start and end are in nanoseconds
5196 			 */
5197 			cur = start = gethrtime();
5198 			end = start + timeout_usec * 1000;
5199 			/*
5200 			 * add 1 ms to start
5201 			 */
5202 			start_sleep =  start + 250000;
5203 
5204 			if (servicing_interrupt()) {
5205 				type_wait = NV_NOSLEEP;
5206 			}
5207 		} else {
5208 			cur = gethrtime();
5209 		}
5210 
5211 		if (cur > end) {
5212 
5213 			break;
5214 		}
5215 
5216 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5217 #if ! defined(__lock_lint)
5218 			delay(1);
5219 #endif
5220 		} else {
5221 			drv_usecwait(nv_usec_delay);
5222 		}
5223 	}
5224 
5225 	return (B_FALSE);
5226 }
5227 
5228 
5229 /*
5230  * nv_port_state_change() reports the state of the port to the
5231  * sata module by calling sata_hba_event_notify().  This
5232  * function is called any time the state of the port is changed
5233  */
5234 static void
5235 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5236 {
5237 	sata_device_t sd;
5238 
5239 	NVLOG((NVDBG_EVENT, nvp->nvp_ctlp, nvp,
5240 	    "nv_port_state_change: event 0x%x type 0x%x state 0x%x "
5241 	    "time %ld (ticks)", event, addr_type, state, ddi_get_lbolt()));
5242 
5243 	bzero((void *)&sd, sizeof (sata_device_t));
5244 	sd.satadev_rev = SATA_DEVICE_REV;
5245 	nv_copy_registers(nvp, &sd, NULL);
5246 
5247 	/*
5248 	 * When NCQ is implemented sactive and snotific field need to be
5249 	 * updated.
5250 	 */
5251 	sd.satadev_addr.cport = nvp->nvp_port_num;
5252 	sd.satadev_addr.qual = addr_type;
5253 	sd.satadev_state = state;
5254 
5255 	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5256 }
5257 
5258 
5259 
5260 /*
5261  * Monitor reset progress and signature gathering.
5262  * This function may loop, so it should not be called from interrupt
5263  * context.
5264  *
5265  * Entered with nvp mutex held.
5266  */
5267 static void
5268 nv_monitor_reset(nv_port_t *nvp)
5269 {
5270 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5271 	uint32_t sstatus;
5272 	int send_notification = B_FALSE;
5273 	uint8_t dev_type;
5274 
5275 	sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5276 
5277 	/*
5278 	 * We do not know here the reason for port reset.
5279 	 * Check the link status. The link needs to be active before
5280 	 * we can check the link's status.
5281 	 */
5282 	if ((SSTATUS_GET_IPM(sstatus) != SSTATUS_IPM_ACTIVE) ||
5283 	    (SSTATUS_GET_DET(sstatus) != SSTATUS_DET_DEVPRE_PHYCOM)) {
5284 		/*
5285 		 * Either link is not active or there is no device
5286 		 * If the link remains down for more than NV_LINK_DOWN_TIMEOUT
5287 		 * (milliseconds), abort signature acquisition and complete
5288 		 * reset processing.
5289 		 * The link will go down when COMRESET is sent by nv_reset(),
5290 		 * so it is practically nvp_reset_time milliseconds.
5291 		 */
5292 
5293 		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5294 		    NV_LINK_DOWN_TIMEOUT) {
5295 			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5296 			    "nv_monitor_reset: no link - ending signature "
5297 			    "acquisition; time after reset %ldms",
5298 			    TICK_TO_MSEC(ddi_get_lbolt() -
5299 			    nvp->nvp_reset_time)));
5300 		}
5301 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5302 		    NV_PORT_PROBE | NV_PORT_HOTPLUG_DELAY);
5303 		/*
5304 		 * Else, if the link was lost (i.e. was present before)
5305 		 * the controller should generate a 'remove' interrupt
5306 		 * that will cause the appropriate event notification.
5307 		 */
5308 		return;
5309 	}
5310 
5311 	NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5312 	    "nv_monitor_reset: link up after reset; time %ldms",
5313 	    TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time)));
5314 
5315 sig_read:
5316 	if (nvp->nvp_signature != 0) {
5317 		/*
5318 		 * The link is up. The signature was acquired before (device
5319 		 * was present).
5320 		 * But we may need to wait for the signature (D2H FIS) before
5321 		 * accessing the drive.
5322 		 */
5323 		if (nv_wait_for_signature != 0) {
5324 			uint32_t old_signature;
5325 			uint8_t old_type;
5326 
5327 			old_signature = nvp->nvp_signature;
5328 			old_type = nvp->nvp_type;
5329 			nvp->nvp_signature = 0;
5330 			nv_read_signature(nvp);
5331 			if (nvp->nvp_signature == 0) {
5332 				nvp->nvp_signature = old_signature;
5333 				nvp->nvp_type = old_type;
5334 
5335 #ifdef NV_DEBUG
5336 				/* FOR DEBUGGING */
5337 				if (nv_wait_here_forever) {
5338 					drv_usecwait(1000);
5339 					goto sig_read;
5340 				}
5341 #endif
5342 				/*
5343 				 * Wait, but not endlessly.
5344 				 */
5345 				if (TICK_TO_MSEC(ddi_get_lbolt() -
5346 				    nvp->nvp_reset_time) <
5347 				    nv_sig_acquisition_time) {
5348 					drv_usecwait(1000);
5349 					goto sig_read;
5350 				} else if (!(nvp->nvp_state &
5351 				    NV_PORT_RESET_RETRY)) {
5352 					/*
5353 					 * Retry reset.
5354 					 */
5355 					NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5356 					    "nv_monitor_reset: retrying reset "
5357 					    "time after first reset: %ldms",
5358 					    TICK_TO_MSEC(ddi_get_lbolt() -
5359 					    nvp->nvp_reset_time)));
5360 					nvp->nvp_state |= NV_PORT_RESET_RETRY;
5361 					nv_reset(nvp);
5362 					goto sig_read;
5363 				}
5364 
5365 				NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5366 				    "nv_monitor_reset: terminating signature "
5367 				    "acquisition (1); time after reset: %ldms",
5368 				    TICK_TO_MSEC(ddi_get_lbolt() -
5369 				    nvp->nvp_reset_time)));
5370 			} else {
5371 				NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5372 				    "nv_monitor_reset: signature acquired; "
5373 				    "time after reset: %ldms",
5374 				    TICK_TO_MSEC(ddi_get_lbolt() -
5375 				    nvp->nvp_reset_time)));
5376 			}
5377 		}
5378 		/*
5379 		 * Clear reset state, set device reset recovery state
5380 		 */
5381 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5382 				    NV_PORT_PROBE);
5383 		nvp->nvp_state |= NV_PORT_RESTORE;
5384 
5385 		/*
5386 		 * Need to send reset event notification
5387 		 */
5388 		send_notification = B_TRUE;
5389 	} else {
5390 		/*
5391 		 * The link is up. The signature was not acquired before.
5392 		 * We can try to fetch a device signature.
5393 		 */
5394 		dev_type = nvp->nvp_type;
5395 
5396 acquire_signature:
5397 		nv_read_signature(nvp);
5398 		if (nvp->nvp_signature != 0) {
5399 			/*
5400 			 * Got device signature.
5401 			 */
5402 			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5403 			    "nv_monitor_reset: signature acquired; "
5404 			    "time after reset: %ldms",
5405 			    TICK_TO_MSEC(ddi_get_lbolt() -
5406 			    nvp->nvp_reset_time)));
5407 
5408 			/* Clear internal reset state */
5409 			nvp->nvp_state &=
5410 			    ~(NV_PORT_RESET | NV_PORT_RESET_RETRY);
5411 
5412 			if (dev_type != SATA_DTYPE_NONE) {
5413 				/*
5414 				 * We acquired the signature for a
5415 				 * pre-existing device that was not identified
5416 				 * before and and was reset.
5417 				 * Need to enter the device reset recovery
5418 				 * state and to send the reset notification.
5419 				 */
5420 				nvp->nvp_state |= NV_PORT_RESTORE;
5421 				send_notification = B_TRUE;
5422 			} else {
5423 				/*
5424 				 * Else, We acquired the signature because a new
5425 				 * device was attached (the driver attach or
5426 				 * a hot-plugged device). There is no need to
5427 				 * enter the device reset recovery state or to
5428 				 * send the reset notification, but we may need
5429 				 * to send a device attached notification.
5430 				 */
5431 				if (nvp->nvp_state & NV_PORT_PROBE) {
5432 					nv_port_state_change(nvp,
5433 					    SATA_EVNT_DEVICE_ATTACHED,
5434 					    SATA_ADDR_CPORT, 0);
5435 					nvp->nvp_state &= ~NV_PORT_PROBE;
5436 				}
5437 			}
5438 		} else {
5439 			if (TICK_TO_MSEC(ddi_get_lbolt() -
5440 			    nvp->nvp_reset_time) < nv_sig_acquisition_time) {
5441 				drv_usecwait(1000);
5442 				goto acquire_signature;
5443 			} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
5444 				/*
5445 				 * Some drives may require additional
5446 				 * reset(s) to get a valid signature
5447 				 * (indicating that the drive is ready).
5448 				 * If a drive was not just powered
5449 				 * up, the signature should be available
5450 				 * within few hundred milliseconds
5451 				 * after reset.  Therefore, if more than
5452 				 * NV_SIG_ACQUISITION_TIME has elapsed
5453 				 * while waiting for a signature, reset
5454 				 * device again.
5455 				 */
5456 				NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5457 				    "nv_monitor_reset: retrying reset "
5458 				    "time after first reset: %ldms",
5459 				    TICK_TO_MSEC(ddi_get_lbolt() -
5460 				    nvp->nvp_reset_time)));
5461 				nvp->nvp_state |= NV_PORT_RESET_RETRY;
5462 				nv_reset(nvp);
5463 				drv_usecwait(1000);
5464 				goto acquire_signature;
5465 			}
5466 			/*
5467 			 * Terminating signature acquisition.
5468 			 * Hopefully, the drive is ready.
5469 			 * The SATA module can deal with this as long as it
5470 			 * knows that some device is attached and a device
5471 			 * responds to commands.
5472 			 */
5473 			if (!(nvp->nvp_state & NV_PORT_PROBE)) {
5474 				send_notification = B_TRUE;
5475 			}
5476 			nvp->nvp_state &= ~(NV_PORT_RESET |
5477 			    NV_PORT_RESET_RETRY);
5478 			nvp->nvp_type = SATA_DTYPE_UNKNOWN;
5479 			if (nvp->nvp_state & NV_PORT_PROBE) {
5480 				nv_port_state_change(nvp,
5481 				    SATA_EVNT_DEVICE_ATTACHED,
5482 				    SATA_ADDR_CPORT, 0);
5483 				nvp->nvp_state &= ~NV_PORT_PROBE;
5484 			}
5485 			nvp->nvp_type = dev_type;
5486 			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5487 			    "nv_monitor_reset: terminating signature "
5488 			    "acquisition (2); time after reset: %ldms",
5489 			    TICK_TO_MSEC(ddi_get_lbolt() -
5490 			    nvp->nvp_reset_time)));
5491 		}
5492 	}
5493 	if (send_notification) {
5494 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_RESET,
5495 		    SATA_ADDR_DCPORT,
5496 		    SATA_DSTATE_RESET | SATA_DSTATE_PWR_ACTIVE);
5497 	}
5498 }
5499 
5500 
5501 /*
5502  * Send a hotplug (add device) notification at the appropriate time after
5503  * hotplug detection.
5504  * Relies on nvp_reset_time set at a hotplug detection time.
5505  * Called only from nv_timeout when NV_PORT_HOTPLUG_DELAY flag is set in
5506  * the nvp_state.
5507  */
5508 static void
5509 nv_delay_hotplug_notification(nv_port_t *nvp)
5510 {
5511 
5512 	if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5513 	    nv_hotplug_delay) {
5514 		NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5515 		    "nv_delay_hotplug_notification: notifying framework after "
5516 		    "%dms delay", TICK_TO_MSEC(ddi_get_lbolt() -
5517 		    nvp->nvp_reset_time)));
5518 		nvp->nvp_state &= ~NV_PORT_HOTPLUG_DELAY;
5519 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5520 		    SATA_ADDR_CPORT, 0);
5521 	}
5522 }
5523 
5524 /*
5525  * timeout processing:
5526  *
5527  * Check if any packets have crossed a timeout threshold.  If so,
5528  * abort the packet.  This function is not NCQ-aware.
5529  *
5530  * If reset was invoked, call reset monitoring function.
5531  *
5532  * Timeout frequency may be lower for checking packet timeout (1s)
5533  * and higher for reset monitoring (1ms)
5534  *
5535  */
5536 static void
5537 nv_timeout(void *arg)
5538 {
5539 	nv_port_t *nvp = arg;
5540 	nv_slot_t *nv_slotp;
5541 	int next_timeout = NV_ONE_SEC;	/* Default */
5542 	uint16_t int_status;
5543 	uint8_t status, bmstatus;
5544 	static int intr_warn_once = 0;
5545 
5546 	ASSERT(nvp != NULL);
5547 
5548 	mutex_enter(&nvp->nvp_mutex);
5549 	nvp->nvp_timeout_id = 0;
5550 
5551 	/*
5552 	 * If the port is not in the init state, ignore it.
5553 	 */
5554 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5555 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5556 		    "nv_timeout: port uninitialized"));
5557 		next_timeout = 0;
5558 
5559 		goto finished;
5560 	}
5561 
5562 	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
5563 		nv_monitor_reset(nvp);
5564 		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5565 
5566 		goto finished;
5567 	}
5568 
5569 	if ((nvp->nvp_state & NV_PORT_HOTPLUG_DELAY) != 0) {
5570 		nv_delay_hotplug_notification(nvp);
5571 		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5572 
5573 		goto finished;
5574 	}
5575 
5576 	/*
5577 	 * Not yet NCQ-aware - there is only one command active.
5578 	 */
5579 	nv_slotp = &(nvp->nvp_slot[0]);
5580 
5581 	/*
5582 	 * perform timeout checking and processing only if there is an
5583 	 * active packet on the port
5584 	 */
5585 	if (nv_slotp != NULL && nv_slotp->nvslot_spkt != NULL)  {
5586 		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5587 		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5588 		uint8_t cmd = satacmd->satacmd_cmd_reg;
5589 		uint64_t lba;
5590 
5591 #if ! defined(__lock_lint) && defined(DEBUG)
5592 
5593 		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5594 		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5595 		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5596 		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5597 		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5598 		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5599 #endif
5600 
5601 		/*
5602 		 * timeout not needed if there is a polling thread
5603 		 */
5604 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5605 			next_timeout = 0;
5606 
5607 			goto finished;
5608 		}
5609 
5610 		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5611 		    spkt->satapkt_time) {
5612 
5613 			uint32_t serr = nv_get32(nvp->nvp_ctlp->nvc_bar_hdl[5],
5614 				nvp->nvp_serror);
5615 
5616 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5617 			    "nv_timeout: aborting: "
5618 			    "nvslot_stime: %ld max ticks till timeout: "
5619 			    "%ld cur_time: %ld cmd=%x lba=%d",
5620 			    nv_slotp->nvslot_stime,
5621 			    drv_usectohz(MICROSEC *
5622 			    spkt->satapkt_time), ddi_get_lbolt(),
5623 			    cmd, lba));
5624 
5625 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5626 			    "nv_timeout: SError at timeout: 0x%x", serr));
5627 
5628 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5629 			    "nv_timeout: previous cmd=%x",
5630 			    nvp->nvp_previous_cmd));
5631 
5632 			if (nvp->nvp_mcp5x_int_status != NULL) {
5633 				status = nv_get8(nvp->nvp_ctl_hdl,
5634 				    nvp->nvp_altstatus);
5635 				bmstatus = nv_get8(nvp->nvp_bm_hdl,
5636 				    nvp->nvp_bmisx);
5637 				int_status = nv_get16(
5638 				    nvp->nvp_ctlp->nvc_bar_hdl[5],
5639 				    nvp->nvp_mcp5x_int_status);
5640 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5641 				    "nv_timeout: altstatus %x, bmicx %x, "
5642 				    "int_status %X", status, bmstatus,
5643 				    int_status));
5644 
5645 				if (int_status & MCP5X_INT_COMPLETE) {
5646 					/*
5647 					 * Completion interrupt was missed!
5648 					 * Issue warning message once
5649 					 */
5650 					if (!intr_warn_once) {
5651 						cmn_err(CE_WARN,
5652 						    "nv_sata: missing command "
5653 						    "completion interrupt(s)!");
5654 						intr_warn_once = 1;
5655 					}
5656 					NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp,
5657 					    nvp, "timeout detected with "
5658 					    "interrupt ready - calling "
5659 					    "int directly"));
5660 					mutex_exit(&nvp->nvp_mutex);
5661 					mcp5x_intr_port(nvp);
5662 					mutex_enter(&nvp->nvp_mutex);
5663 				} else {
5664 					/*
5665 					 * True timeout and not a missing
5666 					 * interrupt.
5667 					 */
5668 					(void) nv_abort_active(nvp, spkt,
5669 					    SATA_PKT_TIMEOUT, B_TRUE);
5670 				}
5671 			} else {
5672 				(void) nv_abort_active(nvp, spkt,
5673 				    SATA_PKT_TIMEOUT, B_TRUE);
5674 			}
5675 
5676 		} else {
5677 #ifdef NV_DEBUG
5678 			if (nv_debug_flags & NVDBG_VERBOSE) {
5679 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5680 				    "nv_timeout:"
5681 				    " still in use so restarting timeout"));
5682 			}
5683 #endif
5684 			next_timeout = NV_ONE_SEC;
5685 		}
5686 	} else {
5687 		/*
5688 		 * there was no active packet, so do not re-enable timeout
5689 		 */
5690 		next_timeout = 0;
5691 #ifdef NV_DEBUG
5692 		if (nv_debug_flags & NVDBG_VERBOSE) {
5693 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5694 			    "nv_timeout: no active packet so not re-arming "
5695 			    "timeout"));
5696 		}
5697 #endif
5698 	}
5699 
5700 finished:
5701 	if (next_timeout != 0) {
5702 		nv_setup_timeout(nvp, next_timeout);
5703 	}
5704 	mutex_exit(&nvp->nvp_mutex);
5705 }
5706 
5707 
5708 /*
5709  * enable or disable the 3 interrupt types the driver is
5710  * interested in: completion, add and remove.
5711  */
5712 static void
5713 ck804_set_intr(nv_port_t *nvp, int flag)
5714 {
5715 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5716 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5717 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5718 	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5719 	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5720 	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5721 	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5722 
5723 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5724 		int_en = nv_get8(bar5_hdl,
5725 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5726 		int_en &= ~intr_bits[port];
5727 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5728 		    int_en);
5729 		return;
5730 	}
5731 
5732 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5733 
5734 	/*
5735 	 * controller level lock also required since access to an 8-bit
5736 	 * interrupt register is shared between both channels.
5737 	 */
5738 	mutex_enter(&nvc->nvc_mutex);
5739 
5740 	if (flag & NV_INTR_CLEAR_ALL) {
5741 		NVLOG((NVDBG_INTR, nvc, nvp,
5742 		    "ck804_set_intr: NV_INTR_CLEAR_ALL"));
5743 
5744 		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5745 		    (uint8_t *)(nvc->nvc_ck804_int_status));
5746 
5747 		if (intr_status & clear_all_bits[port]) {
5748 
5749 			nv_put8(nvc->nvc_bar_hdl[5],
5750 			    (uint8_t *)(nvc->nvc_ck804_int_status),
5751 			    clear_all_bits[port]);
5752 
5753 			NVLOG((NVDBG_INTR, nvc, nvp,
5754 			    "interrupt bits cleared %x",
5755 			    intr_status & clear_all_bits[port]));
5756 		}
5757 	}
5758 
5759 	if (flag & NV_INTR_DISABLE) {
5760 		NVLOG((NVDBG_INTR, nvc, nvp,
5761 		    "ck804_set_intr: NV_INTR_DISABLE"));
5762 		int_en = nv_get8(bar5_hdl,
5763 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5764 		int_en &= ~intr_bits[port];
5765 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5766 		    int_en);
5767 	}
5768 
5769 	if (flag & NV_INTR_ENABLE) {
5770 		NVLOG((NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE"));
5771 		int_en = nv_get8(bar5_hdl,
5772 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5773 		int_en |= intr_bits[port];
5774 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5775 		    int_en);
5776 	}
5777 
5778 	mutex_exit(&nvc->nvc_mutex);
5779 }
5780 
5781 
5782 /*
5783  * enable or disable the 3 interrupts the driver is interested in:
5784  * completion interrupt, hot add, and hot remove interrupt.
5785  */
5786 static void
5787 mcp5x_set_intr(nv_port_t *nvp, int flag)
5788 {
5789 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5790 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5791 	uint16_t intr_bits =
5792 	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5793 	uint16_t int_en;
5794 
5795 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5796 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5797 		int_en &= ~intr_bits;
5798 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5799 		return;
5800 	}
5801 
5802 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5803 
5804 	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
5805 
5806 	if (flag & NV_INTR_CLEAR_ALL) {
5807 		NVLOG((NVDBG_INTR, nvc, nvp,
5808 		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL"));
5809 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
5810 	}
5811 
5812 	if (flag & NV_INTR_ENABLE) {
5813 		NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE"));
5814 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5815 		int_en |= intr_bits;
5816 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5817 	}
5818 
5819 	if (flag & NV_INTR_DISABLE) {
5820 		NVLOG((NVDBG_INTR, nvc, nvp,
5821 		    "mcp5x_set_intr: NV_INTR_DISABLE"));
5822 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5823 		int_en &= ~intr_bits;
5824 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5825 	}
5826 }
5827 
5828 
5829 static void
5830 nv_resume(nv_port_t *nvp)
5831 {
5832 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
5833 
5834 	mutex_enter(&nvp->nvp_mutex);
5835 
5836 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5837 		mutex_exit(&nvp->nvp_mutex);
5838 
5839 		return;
5840 	}
5841 
5842 	/* Enable interrupt */
5843 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5844 
5845 	/*
5846 	 * Power may have been removed to the port and the
5847 	 * drive, and/or a drive may have been added or removed.
5848 	 * Force a reset which will cause a probe and re-establish
5849 	 * any state needed on the drive.
5850 	 */
5851 	nvp->nvp_state |= NV_PORT_RESET;
5852 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5853 	nv_reset(nvp);
5854 
5855 #ifdef SGPIO_SUPPORT
5856 	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5857 	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5858 #endif
5859 
5860 	mutex_exit(&nvp->nvp_mutex);
5861 }
5862 
5863 
5864 static void
5865 nv_suspend(nv_port_t *nvp)
5866 {
5867 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
5868 
5869 	mutex_enter(&nvp->nvp_mutex);
5870 
5871 #ifdef SGPIO_SUPPORT
5872 	nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5873 	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5874 #endif
5875 
5876 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5877 		mutex_exit(&nvp->nvp_mutex);
5878 
5879 		return;
5880 	}
5881 
5882 	/*
5883 	 * Stop the timeout handler.
5884 	 * (It will be restarted in nv_reset() during nv_resume().)
5885 	 */
5886 	if (nvp->nvp_timeout_id) {
5887 		(void) untimeout(nvp->nvp_timeout_id);
5888 		nvp->nvp_timeout_id = 0;
5889 	}
5890 
5891 	/* Disable interrupt */
5892 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5893 	    NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
5894 
5895 	mutex_exit(&nvp->nvp_mutex);
5896 }
5897 
5898 
5899 static void
5900 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
5901 {
5902 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5903 	sata_cmd_t *scmd = &spkt->satapkt_cmd;
5904 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5905 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5906 	uchar_t status;
5907 	struct sata_cmd_flags flags;
5908 
5909 	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5910 	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
5911 	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5912 
5913 	if (spkt == NULL) {
5914 
5915 		return;
5916 	}
5917 
5918 	/*
5919 	 * in the error case, implicitly set the return of regs needed
5920 	 * for error handling.
5921 	 */
5922 	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
5923 	    nvp->nvp_altstatus);
5924 
5925 	flags = scmd->satacmd_flags;
5926 
5927 	if (status & SATA_STATUS_ERR) {
5928 		flags.sata_copy_out_lba_low_msb = B_TRUE;
5929 		flags.sata_copy_out_lba_mid_msb = B_TRUE;
5930 		flags.sata_copy_out_lba_high_msb = B_TRUE;
5931 		flags.sata_copy_out_lba_low_lsb = B_TRUE;
5932 		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
5933 		flags.sata_copy_out_lba_high_lsb = B_TRUE;
5934 		flags.sata_copy_out_error_reg = B_TRUE;
5935 		flags.sata_copy_out_sec_count_msb = B_TRUE;
5936 		flags.sata_copy_out_sec_count_lsb = B_TRUE;
5937 		scmd->satacmd_status_reg = status;
5938 	}
5939 
5940 	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
5941 
5942 		/*
5943 		 * set HOB so that high byte will be read
5944 		 */
5945 		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
5946 
5947 		/*
5948 		 * get the requested high bytes
5949 		 */
5950 		if (flags.sata_copy_out_sec_count_msb) {
5951 			scmd->satacmd_sec_count_msb =
5952 			    nv_get8(cmdhdl, nvp->nvp_count);
5953 		}
5954 
5955 		if (flags.sata_copy_out_lba_low_msb) {
5956 			scmd->satacmd_lba_low_msb =
5957 			    nv_get8(cmdhdl, nvp->nvp_sect);
5958 		}
5959 
5960 		if (flags.sata_copy_out_lba_mid_msb) {
5961 			scmd->satacmd_lba_mid_msb =
5962 			    nv_get8(cmdhdl, nvp->nvp_lcyl);
5963 		}
5964 
5965 		if (flags.sata_copy_out_lba_high_msb) {
5966 			scmd->satacmd_lba_high_msb =
5967 			    nv_get8(cmdhdl, nvp->nvp_hcyl);
5968 		}
5969 	}
5970 
5971 	/*
5972 	 * disable HOB so that low byte is read
5973 	 */
5974 	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
5975 
5976 	/*
5977 	 * get the requested low bytes
5978 	 */
5979 	if (flags.sata_copy_out_sec_count_lsb) {
5980 		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
5981 	}
5982 
5983 	if (flags.sata_copy_out_lba_low_lsb) {
5984 		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
5985 	}
5986 
5987 	if (flags.sata_copy_out_lba_mid_lsb) {
5988 		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
5989 	}
5990 
5991 	if (flags.sata_copy_out_lba_high_lsb) {
5992 		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
5993 	}
5994 
5995 	/*
5996 	 * get the device register if requested
5997 	 */
5998 	if (flags.sata_copy_out_device_reg) {
5999 		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
6000 	}
6001 
6002 	/*
6003 	 * get the error register if requested
6004 	 */
6005 	if (flags.sata_copy_out_error_reg) {
6006 		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
6007 	}
6008 }
6009 
6010 
6011 /*
6012  * Hot plug and remove interrupts can occur when the device is reset.  Just
6013  * masking the interrupt doesn't always work well because if a
6014  * different interrupt arrives on the other port, the driver can still
6015  * end up checking the state of the other port and discover the hot
6016  * interrupt flag is set even though it was masked.  Checking for recent
6017  * reset activity and then ignoring turns out to be the easiest way.
6018  *
6019  * Entered with nvp mutex held.
6020  */
6021 static void
6022 nv_report_add_remove(nv_port_t *nvp, int flags)
6023 {
6024 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6025 	uint32_t sstatus;
6026 	int i;
6027 	clock_t nv_lbolt = ddi_get_lbolt();
6028 
6029 
6030 	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove() - "
6031 	    "time (ticks) %d", nv_lbolt));
6032 
6033 	/*
6034 	 * wait up to 1ms for sstatus to settle and reflect the true
6035 	 * status of the port.  Failure to do so can create confusion
6036 	 * in probe, where the incorrect sstatus value can still
6037 	 * persist.
6038 	 */
6039 	for (i = 0; i < 1000; i++) {
6040 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6041 
6042 		if ((flags == NV_PORT_HOTREMOVED) &&
6043 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
6044 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6045 			break;
6046 		}
6047 
6048 		if ((flags != NV_PORT_HOTREMOVED) &&
6049 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
6050 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6051 			break;
6052 		}
6053 		drv_usecwait(1);
6054 	}
6055 
6056 	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6057 	    "sstatus took %d us for DEVPRE_PHYCOM to settle", i));
6058 
6059 	if (flags == NV_PORT_HOTREMOVED) {
6060 
6061 		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
6062 		    B_FALSE);
6063 
6064 		/*
6065 		 * No device, no point of bothering with device reset
6066 		 */
6067 		nvp->nvp_type = SATA_DTYPE_NONE;
6068 		nvp->nvp_signature = 0;
6069 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
6070 		    NV_PORT_RESTORE);
6071 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6072 		    "nv_report_add_remove() hot removed"));
6073 		nv_port_state_change(nvp,
6074 		    SATA_EVNT_DEVICE_DETACHED,
6075 		    SATA_ADDR_CPORT, 0);
6076 
6077 	} else {
6078 		/*
6079 		 * This is a hot plug or link up indication
6080 		 * Now, re-check the link state - no link, no device
6081 		 */
6082 		if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
6083 		    (SSTATUS_GET_DET(sstatus) == SSTATUS_DET_DEVPRE_PHYCOM)) {
6084 
6085 			if (nvp->nvp_type == SATA_DTYPE_NONE) {
6086 				/*
6087 				 * Real device attach - there was no device
6088 				 * attached to this port before this report
6089 				 */
6090 				NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6091 				    "nv_report_add_remove() new device hot"
6092 				    "plugged"));
6093 				nvp->nvp_reset_time = ddi_get_lbolt();
6094 				if (!(nvp->nvp_state &
6095 				    (NV_PORT_RESET_RETRY | NV_PORT_RESET))) {
6096 
6097 					nvp->nvp_signature = 0;
6098 					if (nv_reset_after_hotplug != 0) {
6099 
6100 						/*
6101 						 * Send reset to obtain a device
6102 						 * signature
6103 						 */
6104 						nvp->nvp_state |=
6105 						    NV_PORT_RESET |
6106 						    NV_PORT_PROBE;
6107 						nv_reset(nvp);
6108 						NVLOG((NVDBG_HOT,
6109 						    nvp->nvp_ctlp, nvp,
6110 						    "nv_report_add_remove() "
6111 						    "resetting device"));
6112 					} else {
6113 						nvp->nvp_type =
6114 						    SATA_DTYPE_UNKNOWN;
6115 					}
6116 				}
6117 
6118 				if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6119 					if (nv_reset_after_hotplug == 0) {
6120 						/*
6121 						 * In case a hotplug interrupt
6122 						 * is generated right after a
6123 						 * link is up, delay reporting
6124 						 * a hotplug event to let the
6125 						 * drive to initialize and to
6126 						 * send a D2H FIS with a
6127 						 * signature.
6128 						 * The timeout will issue an
6129 						 * event notification after
6130 						 * the NV_HOTPLUG_DELAY
6131 						 * milliseconds delay.
6132 						 */
6133 						nvp->nvp_state |=
6134 						    NV_PORT_HOTPLUG_DELAY;
6135 						nvp->nvp_type =
6136 						    SATA_DTYPE_UNKNOWN;
6137 						/*
6138 						 * Make sure timer is running.
6139 						 */
6140 						nv_setup_timeout(nvp,
6141 						    NV_ONE_MSEC);
6142 					} else {
6143 						nv_port_state_change(nvp,
6144 						    SATA_EVNT_DEVICE_ATTACHED,
6145 						    SATA_ADDR_CPORT, 0);
6146 					}
6147 				}
6148 				return;
6149 			}
6150 			/*
6151 			 * Othervise it is a bogus attach, indicating recovered
6152 			 * link loss. No real need to report it after-the-fact.
6153 			 * But we may keep some statistics, or notify the
6154 			 * sata module by reporting LINK_LOST/LINK_ESTABLISHED
6155 			 * events to keep track of such occurrences.
6156 			 * Anyhow, we may want to terminate signature
6157 			 * acquisition.
6158 			 */
6159 			NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6160 			    "nv_report_add_remove() ignoring plug interrupt "
6161 			    "- recovered link?"));
6162 
6163 			if (nvp->nvp_state &
6164 			    (NV_PORT_RESET_RETRY | NV_PORT_RESET)) {
6165 				NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6166 				    "nv_report_add_remove() - "
6167 				    "time since last reset %dms",
6168 				    TICK_TO_MSEC(ddi_get_lbolt() -
6169 				    nvp->nvp_reset_time)));
6170 				/*
6171 				 * If the driver does not have to wait for
6172 				 * a signature, then terminate reset processing
6173 				 * now.
6174 				 */
6175 				if (nv_wait_for_signature == 0) {
6176 					NVLOG((NVDBG_RESET, nvp->nvp_ctlp,
6177 					    nvp, "nv_report_add_remove() - ",
6178 					    "terminating signature acquisition",
6179 					    ", time after reset: %dms",
6180 					    TICK_TO_MSEC(ddi_get_lbolt() -
6181 					    nvp->nvp_reset_time)));
6182 
6183 					nvp->nvp_state &= ~(NV_PORT_RESET |
6184 					    NV_PORT_RESET_RETRY);
6185 
6186 					if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6187 						nvp->nvp_state |=
6188 						    NV_PORT_RESTORE;
6189 						nvp->nvp_state &=
6190 						    ~NV_PORT_PROBE;
6191 
6192 						/*
6193 						 * It is not the initial device
6194 						 * probing, so notify sata
6195 						 * module that device was
6196 						 * reset
6197 						 */
6198 						nv_port_state_change(nvp,
6199 						    SATA_EVNT_DEVICE_RESET,
6200 						    SATA_ADDR_DCPORT,
6201 						    SATA_DSTATE_RESET |
6202 						    SATA_DSTATE_PWR_ACTIVE);
6203 					}
6204 
6205 				}
6206 			}
6207 			return;
6208 		}
6209 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
6210 		    "ignoring add dev interrupt - "
6211 		    "link is down or no device!"));
6212 	}
6213 
6214 }
6215 
6216 /*
6217  * Get request sense data and stuff it the command's sense buffer.
6218  * Start a request sense command in order to get sense data to insert
6219  * in the sata packet's rqsense buffer.  The command completion
6220  * processing is in nv_intr_pkt_pio.
6221  *
6222  * The sata framework provides a function to allocate and set-up a
6223  * request sense packet command. The reasons it is not being used here is:
6224  * a) it cannot be called in an interrupt context and this function is
6225  *    called in an interrupt context.
6226  * b) it allocates DMA resources that are not used here because this is
6227  *    implemented using PIO.
6228  *
6229  * If, in the future, this is changed to use DMA, the sata framework should
6230  * be used to allocate and set-up the error retrieval (request sense)
6231  * command.
6232  */
6233 static int
6234 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
6235 {
6236 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
6237 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
6238 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6239 	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
6240 
6241 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6242 	    "nv_start_rqsense_pio: start"));
6243 
6244 	/* clear the local request sense buffer before starting the command */
6245 	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
6246 
6247 	/* Write the request sense PACKET command */
6248 
6249 	/* select the drive */
6250 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
6251 
6252 	/* make certain the drive selected */
6253 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
6254 	    NV_SEC2USEC(5), 0) == B_FALSE) {
6255 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6256 		    "nv_start_rqsense_pio: drive select failed"));
6257 		return (NV_FAILURE);
6258 	}
6259 
6260 	/* set up the command */
6261 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
6262 	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
6263 	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
6264 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
6265 	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
6266 
6267 	/* initiate the command by writing the command register last */
6268 	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
6269 
6270 	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
6271 	NV_DELAY_NSEC(400);
6272 
6273 	/*
6274 	 * Wait for the device to indicate that it is ready for the command
6275 	 * ATAPI protocol state - HP0: Check_Status_A
6276 	 */
6277 
6278 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
6279 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
6280 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
6281 	    4000000, 0) == B_FALSE) {
6282 		if (nv_get8(cmdhdl, nvp->nvp_status) &
6283 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
6284 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6285 			    "nv_start_rqsense_pio: rqsense dev error (HP0)"));
6286 		} else {
6287 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6288 			    "nv_start_rqsense_pio: rqsense timeout (HP0)"));
6289 		}
6290 
6291 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
6292 		nv_complete_io(nvp, spkt, 0);
6293 		nvp->nvp_state |= NV_PORT_RESET;
6294 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
6295 		nv_reset(nvp);
6296 
6297 		return (NV_FAILURE);
6298 	}
6299 
6300 	/*
6301 	 * Put the ATAPI command in the data register
6302 	 * ATAPI protocol state - HP1: Send_Packet
6303 	 */
6304 
6305 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
6306 	    (ushort_t *)nvp->nvp_data,
6307 	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
6308 
6309 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6310 	    "nv_start_rqsense_pio: exiting into HP3"));
6311 
6312 	return (NV_SUCCESS);
6313 }
6314 
6315 /*
6316  * quiesce(9E) entry point.
6317  *
6318  * This function is called when the system is single-threaded at high
6319  * PIL with preemption disabled. Therefore, this function must not be
6320  * blocked.
6321  *
6322  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6323  * DDI_FAILURE indicates an error condition and should almost never happen.
6324  */
6325 static int
6326 nv_quiesce(dev_info_t *dip)
6327 {
6328 	int port, instance = ddi_get_instance(dip);
6329 	nv_ctl_t *nvc;
6330 
6331 	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
6332 		return (DDI_FAILURE);
6333 
6334 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
6335 		nv_port_t *nvp = &(nvc->nvc_port[port]);
6336 		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6337 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6338 		uint32_t sctrl;
6339 
6340 		/*
6341 		 * Stop the controllers from generating interrupts.
6342 		 */
6343 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
6344 
6345 		/*
6346 		 * clear signature registers
6347 		 */
6348 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
6349 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
6350 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
6351 		nv_put8(cmdhdl, nvp->nvp_count, 0);
6352 
6353 		nvp->nvp_signature = 0;
6354 		nvp->nvp_type = 0;
6355 		nvp->nvp_state |= NV_PORT_RESET;
6356 		nvp->nvp_reset_time = ddi_get_lbolt();
6357 
6358 		/*
6359 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
6360 		 */
6361 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6362 
6363 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
6364 		    sctrl | SCONTROL_DET_COMRESET);
6365 
6366 		/*
6367 		 * wait 1ms
6368 		 */
6369 		drv_usecwait(1000);
6370 
6371 		/*
6372 		 * de-assert reset in PHY
6373 		 */
6374 		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
6375 	}
6376 
6377 	return (DDI_SUCCESS);
6378 }
6379 
6380 
6381 #ifdef SGPIO_SUPPORT
6382 /*
6383  * NVIDIA specific SGPIO LED support
6384  * Please refer to the NVIDIA documentation for additional details
6385  */
6386 
6387 /*
6388  * nv_sgp_led_init
6389  * Detect SGPIO support.  If present, initialize.
6390  */
6391 static void
6392 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
6393 {
6394 	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
6395 	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
6396 	nv_sgp_cmn_t *cmn;	/* shared data structure */
6397 	int i;
6398 	char tqname[SGPIO_TQ_NAME_LEN];
6399 	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
6400 
6401 	/*
6402 	 * Initialize with appropriately invalid values in case this function
6403 	 * exits without initializing SGPIO (for example, there is no SGPIO
6404 	 * support).
6405 	 */
6406 	nvc->nvc_sgp_csr = 0;
6407 	nvc->nvc_sgp_cbp = NULL;
6408 	nvc->nvc_sgp_cmn = NULL;
6409 
6410 	/*
6411 	 * Only try to initialize SGPIO LED support if this property
6412 	 * indicates it should be.
6413 	 */
6414 	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
6415 	    "enable-sgpio-leds", 0) != 1)
6416 		return;
6417 
6418 	/*
6419 	 * CK804 can pass the sgpio_detect test even though it does not support
6420 	 * SGPIO, so don't even look at a CK804.
6421 	 */
6422 	if (nvc->nvc_mcp5x_flag != B_TRUE)
6423 		return;
6424 
6425 	/*
6426 	 * The NVIDIA SGPIO support can nominally handle 6 drives.
6427 	 * However, the current implementation only supports 4 drives.
6428 	 * With two drives per controller, that means only look at the
6429 	 * first two controllers.
6430 	 */
6431 	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
6432 		return;
6433 
6434 	/* confirm that the SGPIO registers are there */
6435 	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
6436 		NVLOG((NVDBG_INIT, nvc, NULL,
6437 		    "SGPIO registers not detected"));
6438 		return;
6439 	}
6440 
6441 	/* save off the SGPIO_CSR I/O address */
6442 	nvc->nvc_sgp_csr = csrp;
6443 
6444 	/* map in Control Block */
6445 	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
6446 	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
6447 
6448 	/* initialize the SGPIO h/w */
6449 	if (nv_sgp_init(nvc) == NV_FAILURE) {
6450 		nv_cmn_err(CE_WARN, nvc, NULL,
6451 		    "!Unable to initialize SGPIO");
6452 	}
6453 
6454 	/*
6455 	 * Initialize the shared space for this instance.  This could
6456 	 * involve allocating the space, saving a pointer to the space
6457 	 * and starting the taskq that actually turns the LEDs on and off.
6458 	 * Or, it could involve just getting the pointer to the already
6459 	 * allocated space.
6460 	 */
6461 
6462 	mutex_enter(&nv_sgp_c2c_mutex);
6463 
6464 	/* try and find our CBP in the mapping table */
6465 	cmn = NULL;
6466 	for (i = 0; i < NV_MAX_CBPS; i++) {
6467 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == cbp) {
6468 			cmn = nv_sgp_cbp2cmn[i].c2cm_cmn;
6469 			break;
6470 		}
6471 
6472 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
6473 			break;
6474 	}
6475 
6476 	if (i >= NV_MAX_CBPS) {
6477 		/*
6478 		 * CBP to shared space mapping table is full
6479 		 */
6480 		nvc->nvc_sgp_cmn = NULL;
6481 		nv_cmn_err(CE_WARN, nvc, NULL,
6482 		    "!LED handling not initialized - too many controllers");
6483 	} else if (cmn == NULL) {
6484 		/*
6485 		 * Allocate the shared space, point the SGPIO scratch register
6486 		 * at it and start the led update taskq.
6487 		 */
6488 
6489 		/* allocate shared space */
6490 		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
6491 		    KM_SLEEP);
6492 		if (cmn == NULL) {
6493 			nv_cmn_err(CE_WARN, nvc, NULL,
6494 			    "!Failed to allocate shared data");
6495 			return;
6496 		}
6497 
6498 		nvc->nvc_sgp_cmn = cmn;
6499 
6500 		/* initialize the shared data structure */
6501 		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
6502 		cmn->nvs_connected = 0;
6503 		cmn->nvs_activity = 0;
6504 		cmn->nvs_cbp = cbp;
6505 
6506 		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
6507 		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
6508 		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
6509 
6510 		/* put the address in the SGPIO scratch register */
6511 #if defined(__amd64)
6512 		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
6513 #else
6514 		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
6515 #endif
6516 
6517 		/* add an entry to the cbp to cmn mapping table */
6518 
6519 		/* i should be the next available table position */
6520 		nv_sgp_cbp2cmn[i].c2cm_cbp = cbp;
6521 		nv_sgp_cbp2cmn[i].c2cm_cmn = cmn;
6522 
6523 		/* start the activity LED taskq */
6524 
6525 		/*
6526 		 * The taskq name should be unique and the time
6527 		 */
6528 		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
6529 		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
6530 		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
6531 		    TASKQ_DEFAULTPRI, 0);
6532 		if (cmn->nvs_taskq == NULL) {
6533 			cmn->nvs_taskq_delay = 0;
6534 			nv_cmn_err(CE_WARN, nvc, NULL,
6535 			    "!Failed to start activity LED taskq");
6536 		} else {
6537 			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
6538 			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
6539 			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
6540 		}
6541 	} else {
6542 		nvc->nvc_sgp_cmn = cmn;
6543 		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6544 	}
6545 
6546 	mutex_exit(&nv_sgp_c2c_mutex);
6547 }
6548 
6549 /*
6550  * nv_sgp_detect
6551  * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
6552  * report back whether both were readable.
6553  */
6554 static int
6555 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
6556     uint32_t *cbpp)
6557 {
6558 	/* get the SGPIO_CSRP */
6559 	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
6560 	if (*csrpp == 0) {
6561 		return (NV_FAILURE);
6562 	}
6563 
6564 	/* SGPIO_CSRP is good, get the SGPIO_CBP */
6565 	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
6566 	if (*cbpp == 0) {
6567 		return (NV_FAILURE);
6568 	}
6569 
6570 	/* SGPIO_CBP is good, so we must support SGPIO */
6571 	return (NV_SUCCESS);
6572 }
6573 
6574 /*
6575  * nv_sgp_init
6576  * Initialize SGPIO.
6577  * The initialization process is described by NVIDIA, but the hardware does
6578  * not always behave as documented, so several steps have been changed and/or
6579  * omitted.
6580  */
6581 static int
6582 nv_sgp_init(nv_ctl_t *nvc)
6583 {
6584 	int seq;
6585 	int rval = NV_SUCCESS;
6586 	hrtime_t start, end;
6587 	uint32_t cmd;
6588 	uint32_t status;
6589 	int drive_count;
6590 
6591 	status = nv_sgp_csr_read(nvc);
6592 	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
6593 		/* SGPIO logic is in reset state and requires initialization */
6594 
6595 		/* noting the Sequence field value */
6596 		seq = SGPIO_CSR_SEQ(status);
6597 
6598 		/* issue SGPIO_CMD_READ_PARAMS command */
6599 		cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
6600 		nv_sgp_csr_write(nvc, cmd);
6601 
6602 		DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
6603 
6604 		/* poll for command completion */
6605 		start = gethrtime();
6606 		end = start + NV_SGP_CMD_TIMEOUT;
6607 		for (;;) {
6608 			status = nv_sgp_csr_read(nvc);
6609 
6610 			/* break on error */
6611 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) {
6612 				NVLOG((NVDBG_ALWAYS, nvc, NULL,
6613 				    "Command error during initialization"));
6614 				rval = NV_FAILURE;
6615 				break;
6616 			}
6617 
6618 			/* command processing is taking place */
6619 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) {
6620 				if (SGPIO_CSR_SEQ(status) != seq) {
6621 					NVLOG((NVDBG_ALWAYS, nvc, NULL,
6622 					    "Sequence number change error"));
6623 				}
6624 
6625 				break;
6626 			}
6627 
6628 			/* if completion not detected in 2000ms ... */
6629 
6630 			if (gethrtime() > end)
6631 				break;
6632 
6633 			/* wait 400 ns before checking again */
6634 			NV_DELAY_NSEC(400);
6635 		}
6636 	}
6637 
6638 	if (rval == NV_FAILURE)
6639 		return (rval);
6640 
6641 	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
6642 		NVLOG((NVDBG_ALWAYS, nvc, NULL,
6643 		    "SGPIO logic not operational after init - state %d",
6644 		    SGPIO_CSR_SSTAT(status)));
6645 		/*
6646 		 * Should return (NV_FAILURE) but the hardware can be
6647 		 * operational even if the SGPIO Status does not indicate
6648 		 * this.
6649 		 */
6650 	}
6651 
6652 	/*
6653 	 * NVIDIA recommends reading the supported drive count even
6654 	 * though they also indicate that it is always 4 at this time.
6655 	 */
6656 	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
6657 	if (drive_count != SGPIO_DRV_CNT_VALUE) {
6658 		NVLOG((NVDBG_INIT, nvc, NULL,
6659 		    "SGPIO reported undocumented drive count - %d",
6660 		    drive_count));
6661 	}
6662 
6663 	NVLOG((NVDBG_INIT, nvc, NULL,
6664 	    "initialized ctlr: %d csr: 0x%08x",
6665 	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr));
6666 
6667 	return (rval);
6668 }
6669 
6670 static int
6671 nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6672 {
6673 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6674 
6675 	if (cmn == NULL)
6676 		return (NV_FAILURE);
6677 
6678 	mutex_enter(&cmn->nvs_slock);
6679 	cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6680 	mutex_exit(&cmn->nvs_slock);
6681 
6682 	return (NV_SUCCESS);
6683 }
6684 
6685 /*
6686  * nv_sgp_csr_read
6687  * This is just a 32-bit port read from the value that was obtained from the
6688  * PCI config space.
6689  *
6690  * XXX It was advised to use the in[bwl] function for this, even though they
6691  * are obsolete interfaces.
6692  */
6693 static int
6694 nv_sgp_csr_read(nv_ctl_t *nvc)
6695 {
6696 	return (inl(nvc->nvc_sgp_csr));
6697 }
6698 
6699 /*
6700  * nv_sgp_csr_write
6701  * This is just a 32-bit I/O port write.  The port number was obtained from
6702  * the PCI config space.
6703  *
6704  * XXX It was advised to use the out[bwl] function for this, even though they
6705  * are obsolete interfaces.
6706  */
6707 static void
6708 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6709 {
6710 	outl(nvc->nvc_sgp_csr, val);
6711 }
6712 
6713 /*
6714  * nv_sgp_write_data
6715  * Cause SGPIO to send Control Block data
6716  */
6717 static int
6718 nv_sgp_write_data(nv_ctl_t *nvc)
6719 {
6720 	hrtime_t start, end;
6721 	uint32_t status;
6722 	uint32_t cmd;
6723 
6724 	/* issue command */
6725 	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6726 	nv_sgp_csr_write(nvc, cmd);
6727 
6728 	/* poll for completion */
6729 	start = gethrtime();
6730 	end = start + NV_SGP_CMD_TIMEOUT;
6731 	for (;;) {
6732 		status = nv_sgp_csr_read(nvc);
6733 
6734 		/* break on error completion */
6735 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6736 			break;
6737 
6738 		/* break on successful completion */
6739 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6740 			break;
6741 
6742 		/* Wait 400 ns and try again */
6743 		NV_DELAY_NSEC(400);
6744 
6745 		if (gethrtime() > end)
6746 			break;
6747 	}
6748 
6749 	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6750 		return (NV_SUCCESS);
6751 
6752 	return (NV_FAILURE);
6753 }
6754 
6755 /*
6756  * nv_sgp_activity_led_ctl
6757  * This is run as a taskq.  It wakes up at a fixed interval and checks to
6758  * see if any of the activity LEDs need to be changed.
6759  */
6760 static void
6761 nv_sgp_activity_led_ctl(void *arg)
6762 {
6763 	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6764 	nv_sgp_cmn_t *cmn;
6765 	volatile nv_sgp_cb_t *cbp;
6766 	clock_t ticks;
6767 	uint8_t drv_leds;
6768 	uint32_t old_leds;
6769 	uint32_t new_led_state;
6770 	int i;
6771 
6772 	cmn = nvc->nvc_sgp_cmn;
6773 	cbp = nvc->nvc_sgp_cbp;
6774 
6775 	do {
6776 		/* save off the old state of all of the LEDs */
6777 		old_leds = cbp->sgpio0_tr;
6778 
6779 		DTRACE_PROBE3(sgpio__activity__state,
6780 		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6781 		    int, old_leds);
6782 
6783 		new_led_state = 0;
6784 
6785 		/* for each drive */
6786 		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6787 
6788 			/* get the current state of the LEDs for the drive */
6789 			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6790 
6791 			if ((cmn->nvs_connected & (1 << i)) == 0) {
6792 				/* if not connected, turn off activity */
6793 				drv_leds &= ~TR_ACTIVE_MASK;
6794 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6795 
6796 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6797 				new_led_state |=
6798 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6799 
6800 				continue;
6801 			}
6802 
6803 			if ((cmn->nvs_activity & (1 << i)) == 0) {
6804 				/* connected, but not active */
6805 				drv_leds &= ~TR_ACTIVE_MASK;
6806 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6807 
6808 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6809 				new_led_state |=
6810 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6811 
6812 				continue;
6813 			}
6814 
6815 			/* connected and active */
6816 			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6817 				/* was enabled, so disable */
6818 				drv_leds &= ~TR_ACTIVE_MASK;
6819 				drv_leds |=
6820 				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6821 
6822 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6823 				new_led_state |=
6824 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6825 			} else {
6826 				/* was disabled, so enable */
6827 				drv_leds &= ~TR_ACTIVE_MASK;
6828 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6829 
6830 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6831 				new_led_state |=
6832 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6833 			}
6834 
6835 			/*
6836 			 * clear the activity bit
6837 			 * if there is drive activity again within the
6838 			 * loop interval (now 1/16 second), nvs_activity
6839 			 * will be reset and the "connected and active"
6840 			 * condition above will cause the LED to blink
6841 			 * off and on at the loop interval rate.  The
6842 			 * rate may be increased (interval shortened) as
6843 			 * long as it is not more than 1/30 second.
6844 			 */
6845 			mutex_enter(&cmn->nvs_slock);
6846 			cmn->nvs_activity &= ~(1 << i);
6847 			mutex_exit(&cmn->nvs_slock);
6848 		}
6849 
6850 		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6851 
6852 		/* write out LED values */
6853 
6854 		mutex_enter(&cmn->nvs_slock);
6855 		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6856 		cbp->sgpio0_tr |= new_led_state;
6857 		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6858 		mutex_exit(&cmn->nvs_slock);
6859 
6860 		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6861 			NVLOG((NVDBG_VERBOSE, nvc, NULL,
6862 			    "nv_sgp_write_data failure updating active LED"));
6863 		}
6864 
6865 		/* now rest for the interval */
6866 		mutex_enter(&cmn->nvs_tlock);
6867 		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6868 		if (ticks > 0)
6869 			(void) cv_timedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6870 			    ddi_get_lbolt() + ticks);
6871 		mutex_exit(&cmn->nvs_tlock);
6872 	} while (ticks > 0);
6873 }
6874 
6875 /*
6876  * nv_sgp_drive_connect
6877  * Set the flag used to indicate that the drive is attached to the HBA.
6878  * Used to let the taskq know that it should turn the Activity LED on.
6879  */
6880 static void
6881 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6882 {
6883 	nv_sgp_cmn_t *cmn;
6884 
6885 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6886 		return;
6887 	cmn = nvc->nvc_sgp_cmn;
6888 
6889 	mutex_enter(&cmn->nvs_slock);
6890 	cmn->nvs_connected |= (1 << drive);
6891 	mutex_exit(&cmn->nvs_slock);
6892 }
6893 
6894 /*
6895  * nv_sgp_drive_disconnect
6896  * Clears the flag used to indicate that the drive is no longer attached
6897  * to the HBA.  Used to let the taskq know that it should turn the
6898  * Activity LED off.  The flag that indicates that the drive is in use is
6899  * also cleared.
6900  */
6901 static void
6902 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6903 {
6904 	nv_sgp_cmn_t *cmn;
6905 
6906 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6907 		return;
6908 	cmn = nvc->nvc_sgp_cmn;
6909 
6910 	mutex_enter(&cmn->nvs_slock);
6911 	cmn->nvs_connected &= ~(1 << drive);
6912 	cmn->nvs_activity &= ~(1 << drive);
6913 	mutex_exit(&cmn->nvs_slock);
6914 }
6915 
6916 /*
6917  * nv_sgp_drive_active
6918  * Sets the flag used to indicate that the drive has been accessed and the
6919  * LED should be flicked off, then on.  It is cleared at a fixed time
6920  * interval by the LED taskq and set by the sata command start.
6921  */
6922 static void
6923 nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6924 {
6925 	nv_sgp_cmn_t *cmn;
6926 
6927 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6928 		return;
6929 	cmn = nvc->nvc_sgp_cmn;
6930 
6931 	DTRACE_PROBE1(sgpio__active, int, drive);
6932 
6933 	mutex_enter(&cmn->nvs_slock);
6934 	cmn->nvs_connected |= (1 << drive);
6935 	cmn->nvs_activity |= (1 << drive);
6936 	mutex_exit(&cmn->nvs_slock);
6937 }
6938 
6939 
6940 /*
6941  * nv_sgp_locate
6942  * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
6943  * maintained in the SGPIO Control Block.
6944  */
6945 static void
6946 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
6947 {
6948 	uint8_t leds;
6949 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6950 	nv_sgp_cmn_t *cmn;
6951 
6952 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6953 		return;
6954 	cmn = nvc->nvc_sgp_cmn;
6955 
6956 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6957 		return;
6958 
6959 	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
6960 
6961 	mutex_enter(&cmn->nvs_slock);
6962 
6963 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6964 
6965 	leds &= ~TR_LOCATE_MASK;
6966 	leds |= TR_LOCATE_SET(value);
6967 
6968 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6969 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6970 
6971 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6972 
6973 	mutex_exit(&cmn->nvs_slock);
6974 
6975 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6976 		nv_cmn_err(CE_WARN, nvc, NULL,
6977 		    "!nv_sgp_write_data failure updating OK2RM/Locate LED");
6978 	}
6979 }
6980 
6981 /*
6982  * nv_sgp_error
6983  * Turns the Error/Failure LED off or on for a particular drive.  State is
6984  * maintained in the SGPIO Control Block.
6985  */
6986 static void
6987 nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
6988 {
6989 	uint8_t leds;
6990 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6991 	nv_sgp_cmn_t *cmn;
6992 
6993 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6994 		return;
6995 	cmn = nvc->nvc_sgp_cmn;
6996 
6997 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6998 		return;
6999 
7000 	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
7001 
7002 	mutex_enter(&cmn->nvs_slock);
7003 
7004 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7005 
7006 	leds &= ~TR_ERROR_MASK;
7007 	leds |= TR_ERROR_SET(value);
7008 
7009 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7010 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7011 
7012 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7013 
7014 	mutex_exit(&cmn->nvs_slock);
7015 
7016 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7017 		nv_cmn_err(CE_WARN, nvc, NULL,
7018 		    "!nv_sgp_write_data failure updating Fail/Error LED");
7019 	}
7020 }
7021 
7022 static void
7023 nv_sgp_cleanup(nv_ctl_t *nvc)
7024 {
7025 	int drive, i;
7026 	uint8_t drv_leds;
7027 	uint32_t led_state;
7028 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7029 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
7030 	extern void psm_unmap_phys(caddr_t, size_t);
7031 
7032 	/*
7033 	 * If the SGPIO Control Block isn't mapped or the shared data
7034 	 * structure isn't present in this instance, there isn't much that
7035 	 * can be cleaned up.
7036 	 */
7037 	if ((cb == NULL) || (cmn == NULL))
7038 		return;
7039 
7040 	/* turn off activity LEDs for this controller */
7041 	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
7042 
7043 	/* get the existing LED state */
7044 	led_state = cb->sgpio0_tr;
7045 
7046 	/* turn off port 0 */
7047 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
7048 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7049 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7050 
7051 	/* turn off port 1 */
7052 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
7053 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7054 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7055 
7056 	/* set the new led state, which should turn off this ctrl's LEDs */
7057 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7058 	(void) nv_sgp_write_data(nvc);
7059 
7060 	/* clear the controller's in use bit */
7061 	mutex_enter(&cmn->nvs_slock);
7062 	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
7063 	mutex_exit(&cmn->nvs_slock);
7064 
7065 	if (cmn->nvs_in_use == 0) {
7066 		/* if all "in use" bits cleared, take everything down */
7067 
7068 		if (cmn->nvs_taskq != NULL) {
7069 			/* allow activity taskq to exit */
7070 			cmn->nvs_taskq_delay = 0;
7071 			cv_broadcast(&cmn->nvs_cv);
7072 
7073 			/* then destroy it */
7074 			ddi_taskq_destroy(cmn->nvs_taskq);
7075 		}
7076 
7077 		/* turn off all of the LEDs */
7078 		cb->sgpio0_tr = 0;
7079 		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7080 		(void) nv_sgp_write_data(nvc);
7081 
7082 		cb->sgpio_sr = NULL;
7083 
7084 		/* zero out the CBP to cmn mapping */
7085 		for (i = 0; i < NV_MAX_CBPS; i++) {
7086 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == cmn->nvs_cbp) {
7087 				nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
7088 				break;
7089 			}
7090 
7091 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
7092 				break;
7093 		}
7094 
7095 		/* free resources */
7096 		cv_destroy(&cmn->nvs_cv);
7097 		mutex_destroy(&cmn->nvs_tlock);
7098 		mutex_destroy(&cmn->nvs_slock);
7099 
7100 		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
7101 	}
7102 
7103 	nvc->nvc_sgp_cmn = NULL;
7104 
7105 	/* unmap the SGPIO Control Block */
7106 	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
7107 }
7108 #endif	/* SGPIO_SUPPORT */
7109