1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25 */
26
27/*
28 *
29 * nv_sata is a combo SATA HBA driver for CK804/MCP04 (ck804) and
30 * MCP55/MCP51/MCP61 (mcp5x) based chipsets.
31 *
32 * NCQ
33 * ---
34 *
35 * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
36 * and is likely to be revisited in the future.
37 *
38 *
39 * Power Management
40 * ----------------
41 *
42 * Normally power management would be responsible for ensuring the device
43 * is quiescent and then changing power states to the device, such as
44 * powering down parts or all of the device.  mcp5x/ck804 is unique in
45 * that it is only available as part of a larger southbridge chipset, so
46 * removing power to the device isn't possible.  Switches to control
47 * power management states D0/D3 in the PCI configuration space appear to
48 * be supported but changes to these states are apparently are ignored.
49 * The only further PM that the driver _could_ do is shut down the PHY,
50 * but in order to deliver the first rev of the driver sooner than later,
51 * that will be deferred until some future phase.
52 *
53 * Since the driver currently will not directly change any power state to
54 * the device, no power() entry point will be required.  However, it is
55 * possible that in ACPI power state S3, aka suspend to RAM, that power
56 * can be removed to the device, and the driver cannot rely on BIOS to
57 * have reset any state.  For the time being, there is no known
58 * non-default configurations that need to be programmed.  This judgement
59 * is based on the port of the legacy ata driver not having any such
60 * functionality and based on conversations with the PM team.  If such a
61 * restoration is later deemed necessary it can be incorporated into the
62 * DDI_RESUME processing.
63 *
64 */
65
66#include <sys/scsi/scsi.h>
67#include <sys/pci.h>
68#include <sys/byteorder.h>
69#include <sys/sunddi.h>
70#include <sys/sata/sata_hba.h>
71#ifdef SGPIO_SUPPORT
72#include <sys/sata/adapters/nv_sata/nv_sgpio.h>
73#include <sys/devctl.h>
74#include <sys/sdt.h>
75#endif
76#include <sys/sata/adapters/nv_sata/nv_sata.h>
77#include <sys/disp.h>
78#include <sys/note.h>
79#include <sys/promif.h>
80
81
82/*
83 * Function prototypes for driver entry points
84 */
85static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
86static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
87static int nv_quiesce(dev_info_t *dip);
88static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
89    void *arg, void **result);
90
91/*
92 * Function prototypes for entry points from sata service module
93 * These functions are distinguished from other local functions
94 * by the prefix "nv_sata_"
95 */
96static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
97static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
98static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
99static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
100static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
101
102/*
103 * Local function prototypes
104 */
105static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
106static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
107static int nv_add_legacy_intrs(nv_ctl_t *nvc);
108#ifdef NV_MSI_SUPPORTED
109static int nv_add_msi_intrs(nv_ctl_t *nvc);
110#endif
111static void nv_rem_intrs(nv_ctl_t *nvc);
112static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
113static int nv_start_nodata(nv_port_t *nvp, int slot);
114static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
115static int nv_start_pio_in(nv_port_t *nvp, int slot);
116static int nv_start_pio_out(nv_port_t *nvp, int slot);
117static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
118static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
119static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
120static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
121static int nv_start_dma(nv_port_t *nvp, int slot);
122static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
123static void nv_uninit_ctl(nv_ctl_t *nvc);
124static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
125static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
126static void nv_uninit_port(nv_port_t *nvp);
127static void nv_init_port(nv_port_t *nvp);
128static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
129static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
130#ifdef NCQ
131static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
132#endif
133static void nv_start_dma_engine(nv_port_t *nvp, int slot);
134static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
135    int state);
136static void nv_common_reg_init(nv_ctl_t *nvc);
137static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
138static void nv_reset(nv_port_t *nvp, char *reason);
139static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
140static void nv_timeout(void *);
141static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
142static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
143static void nv_read_signature(nv_port_t *nvp);
144static void mcp5x_set_intr(nv_port_t *nvp, int flag);
145static void ck804_set_intr(nv_port_t *nvp, int flag);
146static void nv_resume(nv_port_t *nvp);
147static void nv_suspend(nv_port_t *nvp);
148static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
149static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
150    boolean_t reset);
151static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
152    sata_pkt_t *spkt);
153static void nv_link_event(nv_port_t *nvp, int flags);
154static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
155static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
156    uchar_t failure_onbits2, uchar_t failure_offbits2,
157    uchar_t failure_onbits3, uchar_t failure_offbits3,
158    uint_t timeout_usec, int type_wait);
159static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
160    uint_t timeout_usec, int type_wait);
161static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
162static void nv_setup_timeout(nv_port_t *nvp, clock_t microseconds);
163static clock_t nv_monitor_reset(nv_port_t *nvp);
164static int nv_bm_status_clear(nv_port_t *nvp);
165static void nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...);
166
167#ifdef SGPIO_SUPPORT
168static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
169static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
170static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
171    cred_t *credp, int *rvalp);
172
173static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
174static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
175    uint32_t *cbpp);
176static int nv_sgp_init(nv_ctl_t *nvc);
177static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
178static int nv_sgp_csr_read(nv_ctl_t *nvc);
179static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
180static int nv_sgp_write_data(nv_ctl_t *nvc);
181static void nv_sgp_activity_led_ctl(void *arg);
182static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
183static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
184static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
185static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
186static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
187static void nv_sgp_cleanup(nv_ctl_t *nvc);
188#endif
189
190
191/*
192 * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
193 * Verify if needed if ported to other ISA.
194 */
195static ddi_dma_attr_t buffer_dma_attr = {
196	DMA_ATTR_V0,		/* dma_attr_version */
197	0,			/* dma_attr_addr_lo: lowest bus address */
198	0xffffffffull,		/* dma_attr_addr_hi: */
199	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
200	4,			/* dma_attr_align */
201	1,			/* dma_attr_burstsizes. */
202	1,			/* dma_attr_minxfer */
203	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
204	0xffffffffull,		/* dma_attr_seg */
205	NV_DMA_NSEGS,		/* dma_attr_sgllen */
206	512,			/* dma_attr_granular */
207	0,			/* dma_attr_flags */
208};
209static ddi_dma_attr_t buffer_dma_40bit_attr = {
210	DMA_ATTR_V0,		/* dma_attr_version */
211	0,			/* dma_attr_addr_lo: lowest bus address */
212	0xffffffffffull,	/* dma_attr_addr_hi: */
213	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
214	4,			/* dma_attr_align */
215	1,			/* dma_attr_burstsizes. */
216	1,			/* dma_attr_minxfer */
217	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
218	0xffffffffull,		/* dma_attr_seg */
219	NV_DMA_NSEGS,		/* dma_attr_sgllen */
220	512,			/* dma_attr_granular */
221	0,			/* dma_attr_flags */
222};
223
224
225/*
226 * DMA attributes for PRD tables
227 */
228ddi_dma_attr_t nv_prd_dma_attr = {
229	DMA_ATTR_V0,		/* dma_attr_version */
230	0,			/* dma_attr_addr_lo */
231	0xffffffffull,		/* dma_attr_addr_hi */
232	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
233	4,			/* dma_attr_align */
234	1,			/* dma_attr_burstsizes */
235	1,			/* dma_attr_minxfer */
236	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
237	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
238	1,			/* dma_attr_sgllen */
239	1,			/* dma_attr_granular */
240	0			/* dma_attr_flags */
241};
242
243/*
244 * Device access attributes
245 */
246static ddi_device_acc_attr_t accattr = {
247    DDI_DEVICE_ATTR_V0,
248    DDI_STRUCTURE_LE_ACC,
249    DDI_STRICTORDER_ACC
250};
251
252
253#ifdef SGPIO_SUPPORT
254static struct cb_ops nv_cb_ops = {
255	nv_open,		/* open */
256	nv_close,		/* close */
257	nodev,			/* strategy (block) */
258	nodev,			/* print (block) */
259	nodev,			/* dump (block) */
260	nodev,			/* read */
261	nodev,			/* write */
262	nv_ioctl,		/* ioctl */
263	nodev,			/* devmap */
264	nodev,			/* mmap */
265	nodev,			/* segmap */
266	nochpoll,		/* chpoll */
267	ddi_prop_op,		/* prop_op */
268	NULL,			/* streams */
269	D_NEW | D_MP |
270	D_64BIT | D_HOTPLUG,	/* flags */
271	CB_REV			/* rev */
272};
273#endif  /* SGPIO_SUPPORT */
274
275
276static struct dev_ops nv_dev_ops = {
277	DEVO_REV,		/* devo_rev */
278	0,			/* refcnt  */
279	nv_getinfo,		/* info */
280	nulldev,		/* identify */
281	nulldev,		/* probe */
282	nv_attach,		/* attach */
283	nv_detach,		/* detach */
284	nodev,			/* no reset */
285#ifdef SGPIO_SUPPORT
286	&nv_cb_ops,		/* driver operations */
287#else
288	(struct cb_ops *)0,	/* driver operations */
289#endif
290	NULL,			/* bus operations */
291	NULL,			/* power */
292	nv_quiesce		/* quiesce */
293};
294
295
296/*
297 * Request Sense CDB for ATAPI
298 */
299static const uint8_t nv_rqsense_cdb[16] = {
300	SCMD_REQUEST_SENSE,
301	0,
302	0,
303	0,
304	SATA_ATAPI_MIN_RQSENSE_LEN,
305	0,
306	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
307};
308
309
310static sata_tran_hotplug_ops_t nv_hotplug_ops;
311
312extern struct mod_ops mod_driverops;
313
314static  struct modldrv modldrv = {
315	&mod_driverops,	/* driverops */
316	"NVIDIA CK804/MCP04/MCP51/MCP55/MCP61 HBA",
317	&nv_dev_ops,	/* driver ops */
318};
319
320static  struct modlinkage modlinkage = {
321	MODREV_1,
322	&modldrv,
323	NULL
324};
325
326/*
327 * Maximum number of consecutive interrupts processed in the loop in the
328 * single invocation of the port interrupt routine.
329 */
330int nv_max_intr_loops = NV_MAX_INTR_PER_DEV;
331
332/*
333 * wait between checks of reg status
334 */
335int nv_usec_delay = NV_WAIT_REG_CHECK;
336
337/*
338 * The following used for nv_vcmn_err() and nv_log()
339 */
340
341/*
342 * temp buffer to save from wasting limited stack space
343 */
344static char nv_log_buf[NV_LOGBUF_LEN];
345
346/*
347 * protects nv_log_buf
348 */
349static kmutex_t nv_log_mutex;
350
351/*
352 * these on-by-default flags were chosen so that the driver
353 * logs as much non-usual run-time information as possible
354 * without overflowing the ring with useless information or
355 * causing any significant performance penalty.
356 */
357int nv_debug_flags =
358    NVDBG_HOT|NVDBG_RESET|NVDBG_ALWAYS|NVDBG_TIMEOUT|NVDBG_EVENT;
359
360/*
361 * normally debug information is not logged to the console
362 * but this allows it to be enabled.
363 */
364int nv_log_to_console = B_FALSE;
365
366/*
367 * normally debug information is not logged to cmn_err but
368 * in some cases it may be desired.
369 */
370int nv_log_to_cmn_err = B_FALSE;
371
372/*
373 * using prom print avoids using cmn_err/syslog and goes right
374 * to the console which may be desirable in some situations, but
375 * it may be synchronous, which would change timings and
376 * impact performance.  Use with caution.
377 */
378int nv_prom_print = B_FALSE;
379
380/*
381 * Opaque state pointer to be initialized by ddi_soft_state_init()
382 */
383static void *nv_statep	= NULL;
384
385/*
386 * Map from CBP to shared space
387 *
388 * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
389 * Control Block Pointer as well as the corresponding Control Block) that
390 * is shared across all driver instances associated with that part.  The
391 * Control Block is used to update and query the LED state for the devices
392 * on the controllers associated with those instances.  There is also some
393 * driver state (called the 'common' area here) associated with each SGPIO
394 * Control Block.  The nv_sgp_cpb2cmn is used to map a given CBP to its
395 * control area.
396 *
397 * The driver can also use this mapping array to determine whether the
398 * common area for a given CBP has been initialized, and, if it isn't
399 * initialized, initialize it.
400 *
401 * When a driver instance with a CBP value that is already in the array is
402 * initialized, it will use the pointer to the previously initialized common
403 * area associated with that SGPIO CBP value, rather than initialize it
404 * itself.
405 *
406 * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
407 */
408#ifdef SGPIO_SUPPORT
409static kmutex_t nv_sgp_c2c_mutex;
410static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
411#endif
412
413/*
414 * control whether 40bit DMA is used or not
415 */
416int nv_sata_40bit_dma = B_TRUE;
417
418static sata_tran_hotplug_ops_t nv_hotplug_ops = {
419	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
420	nv_sata_activate,	/* activate port. cfgadm -c connect */
421	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
422};
423
424
425/*
426 *  nv module initialization
427 */
428int
429_init(void)
430{
431	int	error;
432#ifdef SGPIO_SUPPORT
433	int	i;
434#endif
435
436	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
437
438	if (error != 0) {
439
440		return (error);
441	}
442
443	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
444#ifdef SGPIO_SUPPORT
445	mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
446
447	for (i = 0; i < NV_MAX_CBPS; i++) {
448		nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
449		nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
450	}
451#endif
452
453	if ((error = sata_hba_init(&modlinkage)) != 0) {
454		ddi_soft_state_fini(&nv_statep);
455		mutex_destroy(&nv_log_mutex);
456
457		return (error);
458	}
459
460	error = mod_install(&modlinkage);
461	if (error != 0) {
462		sata_hba_fini(&modlinkage);
463		ddi_soft_state_fini(&nv_statep);
464		mutex_destroy(&nv_log_mutex);
465
466		return (error);
467	}
468
469	return (error);
470}
471
472
473/*
474 * nv module uninitialize
475 */
476int
477_fini(void)
478{
479	int	error;
480
481	error = mod_remove(&modlinkage);
482
483	if (error != 0) {
484		return (error);
485	}
486
487	/*
488	 * remove the resources allocated in _init()
489	 */
490	mutex_destroy(&nv_log_mutex);
491#ifdef SGPIO_SUPPORT
492	mutex_destroy(&nv_sgp_c2c_mutex);
493#endif
494	sata_hba_fini(&modlinkage);
495	ddi_soft_state_fini(&nv_statep);
496
497	return (error);
498}
499
500
501/*
502 * nv _info entry point
503 */
504int
505_info(struct modinfo *modinfop)
506{
507	return (mod_info(&modlinkage, modinfop));
508}
509
510
511/*
512 * these wrappers for ddi_{get,put}8 are for observability
513 * with dtrace
514 */
515#ifdef DEBUG
516
517static void
518nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
519{
520	ddi_put8(handle, dev_addr, value);
521}
522
523static void
524nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
525{
526	ddi_put32(handle, dev_addr, value);
527}
528
529static uint32_t
530nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
531{
532	return (ddi_get32(handle, dev_addr));
533}
534
535static void
536nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
537{
538	ddi_put16(handle, dev_addr, value);
539}
540
541static uint16_t
542nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
543{
544	return (ddi_get16(handle, dev_addr));
545}
546
547static uint8_t
548nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
549{
550	return (ddi_get8(handle, dev_addr));
551}
552
553#else
554
555#define	nv_put8 ddi_put8
556#define	nv_put32 ddi_put32
557#define	nv_get32 ddi_get32
558#define	nv_put16 ddi_put16
559#define	nv_get16 ddi_get16
560#define	nv_get8 ddi_get8
561
562#endif
563
564
565/*
566 * Driver attach
567 */
568static int
569nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
570{
571	int status, attach_state, intr_types, bar, i, j, command;
572	int inst = ddi_get_instance(dip);
573	ddi_acc_handle_t pci_conf_handle;
574	nv_ctl_t *nvc;
575	uint8_t subclass;
576	uint32_t reg32;
577#ifdef SGPIO_SUPPORT
578	pci_regspec_t *regs;
579	int rlen;
580#endif
581
582	switch (cmd) {
583
584	case DDI_ATTACH:
585
586		attach_state = ATTACH_PROGRESS_NONE;
587
588		status = ddi_soft_state_zalloc(nv_statep, inst);
589
590		if (status != DDI_SUCCESS) {
591			break;
592		}
593
594		nvc = ddi_get_soft_state(nv_statep, inst);
595
596		nvc->nvc_dip = dip;
597
598		NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach(): DDI_ATTACH", NULL);
599
600		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
601
602		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
603			nvc->nvc_devid = pci_config_get16(pci_conf_handle,
604			    PCI_CONF_DEVID);
605			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
606			    PCI_CONF_REVID);
607			NVLOG(NVDBG_INIT, nvc, NULL,
608			    "inst %d: devid is %x silicon revid is %x"
609			    " nv_debug_flags=%x", inst, nvc->nvc_devid,
610			    nvc->nvc_revid, nv_debug_flags);
611		} else {
612			break;
613		}
614
615		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
616
617		/*
618		 * Set the PCI command register: enable IO/MEM/Master.
619		 */
620		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
621		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
622		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
623
624		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
625
626		if (subclass & PCI_MASS_RAID) {
627			cmn_err(CE_WARN,
628			    "attach failed: RAID mode not supported");
629
630			break;
631		}
632
633		/*
634		 * the 6 bars of the controller are:
635		 * 0: port 0 task file
636		 * 1: port 0 status
637		 * 2: port 1 task file
638		 * 3: port 1 status
639		 * 4: bus master for both ports
640		 * 5: extended registers for SATA features
641		 */
642		for (bar = 0; bar < 6; bar++) {
643			status = ddi_regs_map_setup(dip, bar + 1,
644			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
645			    &nvc->nvc_bar_hdl[bar]);
646
647			if (status != DDI_SUCCESS) {
648				NVLOG(NVDBG_INIT, nvc, NULL,
649				    "ddi_regs_map_setup failure for bar"
650				    " %d status = %d", bar, status);
651				break;
652			}
653		}
654
655		attach_state |= ATTACH_PROGRESS_BARS;
656
657		/*
658		 * initialize controller structures
659		 */
660		status = nv_init_ctl(nvc, pci_conf_handle);
661
662		if (status == NV_FAILURE) {
663			NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl failed",
664			    NULL);
665
666			break;
667		}
668
669		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
670
671		/*
672		 * initialize mutexes
673		 */
674		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
675		    DDI_INTR_PRI(nvc->nvc_intr_pri));
676
677		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
678
679		/*
680		 * get supported interrupt types
681		 */
682		if (ddi_intr_get_supported_types(dip, &intr_types) !=
683		    DDI_SUCCESS) {
684			nv_cmn_err(CE_WARN, nvc, NULL,
685			    "ddi_intr_get_supported_types failed");
686
687			break;
688		}
689
690		NVLOG(NVDBG_INIT, nvc, NULL,
691		    "ddi_intr_get_supported_types() returned: 0x%x",
692		    intr_types);
693
694#ifdef NV_MSI_SUPPORTED
695		if (intr_types & DDI_INTR_TYPE_MSI) {
696			NVLOG(NVDBG_INIT, nvc, NULL,
697			    "using MSI interrupt type", NULL);
698
699			/*
700			 * Try MSI first, but fall back to legacy if MSI
701			 * attach fails
702			 */
703			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
704				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
705				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
706				NVLOG(NVDBG_INIT, nvc, NULL,
707				    "MSI interrupt setup done", NULL);
708			} else {
709				nv_cmn_err(CE_CONT, nvc, NULL,
710				    "MSI registration failed "
711				    "will try Legacy interrupts");
712			}
713		}
714#endif
715
716		/*
717		 * Either the MSI interrupt setup has failed or only
718		 * the fixed interrupts are available on the system.
719		 */
720		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
721		    (intr_types & DDI_INTR_TYPE_FIXED)) {
722
723			NVLOG(NVDBG_INIT, nvc, NULL,
724			    "using Legacy interrupt type", NULL);
725
726			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
727				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
728				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
729				NVLOG(NVDBG_INIT, nvc, NULL,
730				    "Legacy interrupt setup done", NULL);
731			} else {
732				nv_cmn_err(CE_WARN, nvc, NULL,
733				    "legacy interrupt setup failed");
734				NVLOG(NVDBG_INIT, nvc, NULL,
735				    "legacy interrupt setup failed", NULL);
736				break;
737			}
738		}
739
740		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
741			NVLOG(NVDBG_INIT, nvc, NULL,
742			    "no interrupts registered", NULL);
743			break;
744		}
745
746#ifdef SGPIO_SUPPORT
747		/*
748		 * save off the controller number
749		 */
750		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
751		    "reg", (caddr_t)&regs, &rlen);
752		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
753		kmem_free(regs, rlen);
754
755		/*
756		 * initialize SGPIO
757		 */
758		nv_sgp_led_init(nvc, pci_conf_handle);
759#endif	/* SGPIO_SUPPORT */
760
761		/*
762		 * Do initial reset so that signature can be gathered
763		 */
764		for (j = 0; j < NV_NUM_PORTS; j++) {
765			ddi_acc_handle_t bar5_hdl;
766			uint32_t sstatus;
767			nv_port_t *nvp;
768
769			nvp = &(nvc->nvc_port[j]);
770			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
771			sstatus = ddi_get32(bar5_hdl, nvp->nvp_sstatus);
772
773			if (SSTATUS_GET_DET(sstatus) ==
774			    SSTATUS_DET_DEVPRE_PHYCOM) {
775
776				nvp->nvp_state |= NV_ATTACH;
777				nvp->nvp_type = SATA_DTYPE_UNKNOWN;
778				mutex_enter(&nvp->nvp_mutex);
779				nv_reset(nvp, "attach");
780
781				while (nvp->nvp_state & NV_RESET) {
782					cv_wait(&nvp->nvp_reset_cv,
783					    &nvp->nvp_mutex);
784				}
785
786				mutex_exit(&nvp->nvp_mutex);
787			}
788		}
789
790		/*
791		 * attach to sata module
792		 */
793		if (sata_hba_attach(nvc->nvc_dip,
794		    &nvc->nvc_sata_hba_tran,
795		    DDI_ATTACH) != DDI_SUCCESS) {
796			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
797
798			break;
799		}
800
801		pci_config_teardown(&pci_conf_handle);
802
803		NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS", NULL);
804
805		return (DDI_SUCCESS);
806
807	case DDI_RESUME:
808
809		nvc = ddi_get_soft_state(nv_statep, inst);
810
811		NVLOG(NVDBG_INIT, nvc, NULL,
812		    "nv_attach(): DDI_RESUME inst %d", inst);
813
814		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
815			return (DDI_FAILURE);
816		}
817
818		/*
819		 * Set the PCI command register: enable IO/MEM/Master.
820		 */
821		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
822		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
823		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
824
825		/*
826		 * Need to set bit 2 to 1 at config offset 0x50
827		 * to enable access to the bar5 registers.
828		 */
829		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
830
831		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
832			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
833			    reg32 | NV_BAR5_SPACE_EN);
834		}
835
836		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
837
838		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
839			nv_resume(&(nvc->nvc_port[i]));
840		}
841
842		pci_config_teardown(&pci_conf_handle);
843
844		return (DDI_SUCCESS);
845
846	default:
847		return (DDI_FAILURE);
848	}
849
850
851	/*
852	 * DDI_ATTACH failure path starts here
853	 */
854
855	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
856		nv_rem_intrs(nvc);
857	}
858
859	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
860		/*
861		 * Remove timers
862		 */
863		int port = 0;
864		nv_port_t *nvp;
865
866		for (; port < NV_MAX_PORTS(nvc); port++) {
867			nvp = &(nvc->nvc_port[port]);
868			if (nvp->nvp_timeout_id != 0) {
869				(void) untimeout(nvp->nvp_timeout_id);
870			}
871		}
872	}
873
874	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
875		mutex_destroy(&nvc->nvc_mutex);
876	}
877
878	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
879		nv_uninit_ctl(nvc);
880	}
881
882	if (attach_state & ATTACH_PROGRESS_BARS) {
883		while (--bar >= 0) {
884			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
885		}
886	}
887
888	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
889		ddi_soft_state_free(nv_statep, inst);
890	}
891
892	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
893		pci_config_teardown(&pci_conf_handle);
894	}
895
896	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
897
898	return (DDI_FAILURE);
899}
900
901
902static int
903nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
904{
905	int i, port, inst = ddi_get_instance(dip);
906	nv_ctl_t *nvc;
907	nv_port_t *nvp;
908
909	nvc = ddi_get_soft_state(nv_statep, inst);
910
911	switch (cmd) {
912
913	case DDI_DETACH:
914
915		NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH", NULL);
916
917		/*
918		 * Remove interrupts
919		 */
920		nv_rem_intrs(nvc);
921
922		/*
923		 * Remove timers
924		 */
925		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
926			nvp = &(nvc->nvc_port[port]);
927			if (nvp->nvp_timeout_id != 0) {
928				(void) untimeout(nvp->nvp_timeout_id);
929			}
930		}
931
932		/*
933		 * Remove maps
934		 */
935		for (i = 0; i < 6; i++) {
936			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
937		}
938
939		/*
940		 * Destroy mutexes
941		 */
942		mutex_destroy(&nvc->nvc_mutex);
943
944		/*
945		 * Uninitialize the controller structures
946		 */
947		nv_uninit_ctl(nvc);
948
949#ifdef SGPIO_SUPPORT
950		/*
951		 * release SGPIO resources
952		 */
953		nv_sgp_cleanup(nvc);
954#endif
955
956		/*
957		 * unregister from the sata module
958		 */
959		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
960
961		/*
962		 * Free soft state
963		 */
964		ddi_soft_state_free(nv_statep, inst);
965
966		return (DDI_SUCCESS);
967
968	case DDI_SUSPEND:
969
970		NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND", NULL);
971
972		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
973			nv_suspend(&(nvc->nvc_port[i]));
974		}
975
976		nvc->nvc_state |= NV_CTRL_SUSPEND;
977
978		return (DDI_SUCCESS);
979
980	default:
981		return (DDI_FAILURE);
982	}
983}
984
985
986/*ARGSUSED*/
987static int
988nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
989{
990	nv_ctl_t *nvc;
991	int instance;
992	dev_t dev;
993
994	dev = (dev_t)arg;
995	instance = getminor(dev);
996
997	switch (infocmd) {
998	case DDI_INFO_DEVT2DEVINFO:
999		nvc = ddi_get_soft_state(nv_statep,  instance);
1000		if (nvc != NULL) {
1001			*result = nvc->nvc_dip;
1002			return (DDI_SUCCESS);
1003		} else {
1004			*result = NULL;
1005			return (DDI_FAILURE);
1006		}
1007	case DDI_INFO_DEVT2INSTANCE:
1008		*(int *)result = instance;
1009		break;
1010	default:
1011		break;
1012	}
1013	return (DDI_SUCCESS);
1014}
1015
1016
1017#ifdef SGPIO_SUPPORT
1018/* ARGSUSED */
1019static int
1020nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1021{
1022	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
1023
1024	if (nvc == NULL) {
1025		return (ENXIO);
1026	}
1027
1028	return (0);
1029}
1030
1031
1032/* ARGSUSED */
1033static int
1034nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
1035{
1036	return (0);
1037}
1038
1039
1040/* ARGSUSED */
1041static int
1042nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1043{
1044	nv_ctl_t *nvc;
1045	int inst;
1046	int status;
1047	int ctlr, port;
1048	int drive;
1049	uint8_t curr_led;
1050	struct dc_led_ctl led;
1051
1052	inst = getminor(dev);
1053	if (inst == -1) {
1054		return (EBADF);
1055	}
1056
1057	nvc = ddi_get_soft_state(nv_statep, inst);
1058	if (nvc == NULL) {
1059		return (EBADF);
1060	}
1061
1062	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
1063		return (EIO);
1064	}
1065
1066	switch (cmd) {
1067	case DEVCTL_SET_LED:
1068		status = ddi_copyin((void *)arg, &led,
1069		    sizeof (struct dc_led_ctl), mode);
1070		if (status != 0)
1071			return (EFAULT);
1072
1073		/*
1074		 * Since only the first two controller currently support
1075		 * SGPIO (as per NVIDIA docs), this code will as well.
1076		 * Note that this validate the port value within led_state
1077		 * as well.
1078		 */
1079
1080		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1081		if ((ctlr != 0) && (ctlr != 1))
1082			return (ENXIO);
1083
1084		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1085		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
1086			return (EINVAL);
1087		}
1088
1089		drive = led.led_number;
1090
1091		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1092		    (led.led_state == DCL_STATE_OFF)) {
1093
1094			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1095				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1096			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1097				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1098			} else {
1099				return (ENXIO);
1100			}
1101
1102			port = SGP_DRV_TO_PORT(led.led_number);
1103			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1104		}
1105
1106		if (led.led_ctl_active == DCL_CNTRL_ON) {
1107			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1108				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1109			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1110				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1111			} else {
1112				return (ENXIO);
1113			}
1114
1115			port = SGP_DRV_TO_PORT(led.led_number);
1116			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1117		}
1118
1119		break;
1120
1121	case DEVCTL_GET_LED:
1122		status = ddi_copyin((void *)arg, &led,
1123		    sizeof (struct dc_led_ctl), mode);
1124		if (status != 0)
1125			return (EFAULT);
1126
1127		/*
1128		 * Since only the first two controller currently support
1129		 * SGPIO (as per NVIDIA docs), this code will as well.
1130		 * Note that this validate the port value within led_state
1131		 * as well.
1132		 */
1133
1134		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1135		if ((ctlr != 0) && (ctlr != 1))
1136			return (ENXIO);
1137
1138		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1139		    led.led_number);
1140
1141		port = SGP_DRV_TO_PORT(led.led_number);
1142		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1143			led.led_ctl_active = DCL_CNTRL_ON;
1144
1145			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1146				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1147					led.led_state = DCL_STATE_OFF;
1148				else
1149					led.led_state = DCL_STATE_ON;
1150			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1151				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1152					led.led_state = DCL_STATE_OFF;
1153				else
1154					led.led_state = DCL_STATE_ON;
1155			} else {
1156				return (ENXIO);
1157			}
1158		} else {
1159			led.led_ctl_active = DCL_CNTRL_OFF;
1160			/*
1161			 * Not really off, but never set and no constant for
1162			 * tri-state
1163			 */
1164			led.led_state = DCL_STATE_OFF;
1165		}
1166
1167		status = ddi_copyout(&led, (void *)arg,
1168		    sizeof (struct dc_led_ctl), mode);
1169		if (status != 0)
1170			return (EFAULT);
1171
1172		break;
1173
1174	case DEVCTL_NUM_LEDS:
1175		led.led_number = SGPIO_DRV_CNT_VALUE;
1176		led.led_ctl_active = 1;
1177		led.led_type = 3;
1178
1179		/*
1180		 * According to documentation, NVIDIA SGPIO is supposed to
1181		 * support blinking, but it does not seem to work in practice.
1182		 */
1183		led.led_state = DCL_STATE_ON;
1184
1185		status = ddi_copyout(&led, (void *)arg,
1186		    sizeof (struct dc_led_ctl), mode);
1187		if (status != 0)
1188			return (EFAULT);
1189
1190		break;
1191
1192	default:
1193		return (EINVAL);
1194	}
1195
1196	return (0);
1197}
1198#endif	/* SGPIO_SUPPORT */
1199
1200
1201/*
1202 * Called by sata module to probe a port.  Port and device state
1203 * are not changed here... only reported back to the sata module.
1204 *
1205 */
1206static int
1207nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1208{
1209	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1210	uint8_t cport = sd->satadev_addr.cport;
1211	uint8_t pmport = sd->satadev_addr.pmport;
1212	uint8_t qual = sd->satadev_addr.qual;
1213	uint8_t det;
1214
1215	nv_port_t *nvp;
1216
1217	if (cport >= NV_MAX_PORTS(nvc)) {
1218		sd->satadev_type = SATA_DTYPE_NONE;
1219		sd->satadev_state = SATA_STATE_UNKNOWN;
1220
1221		return (SATA_FAILURE);
1222	}
1223
1224	ASSERT(nvc->nvc_port != NULL);
1225	nvp = &(nvc->nvc_port[cport]);
1226	ASSERT(nvp != NULL);
1227
1228	NVLOG(NVDBG_ENTRY, nvc, nvp,
1229	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1230	    "qual: 0x%x", cport, pmport, qual);
1231
1232	mutex_enter(&nvp->nvp_mutex);
1233
1234	/*
1235	 * This check seems to be done in the SATA module.
1236	 * It may not be required here
1237	 */
1238	if (nvp->nvp_state & NV_DEACTIVATED) {
1239		nv_cmn_err(CE_WARN, nvc, nvp,
1240		    "port inactive.  Use cfgadm to activate");
1241		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1242		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1243		mutex_exit(&nvp->nvp_mutex);
1244
1245		return (SATA_SUCCESS);
1246	}
1247
1248	if (nvp->nvp_state & NV_FAILED) {
1249		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
1250		    "probe: port failed", NULL);
1251		sd->satadev_type = nvp->nvp_type;
1252		sd->satadev_state = SATA_PSTATE_FAILED;
1253		mutex_exit(&nvp->nvp_mutex);
1254
1255		return (SATA_SUCCESS);
1256	}
1257
1258	if (qual == SATA_ADDR_PMPORT) {
1259		sd->satadev_type = SATA_DTYPE_NONE;
1260		sd->satadev_state = SATA_STATE_UNKNOWN;
1261		mutex_exit(&nvp->nvp_mutex);
1262		nv_cmn_err(CE_WARN, nvc, nvp,
1263		    "controller does not support port multiplier");
1264
1265		return (SATA_SUCCESS);
1266	}
1267
1268	sd->satadev_state = SATA_PSTATE_PWRON;
1269
1270	nv_copy_registers(nvp, sd, NULL);
1271
1272	if (nvp->nvp_state & (NV_RESET|NV_LINK_EVENT)) {
1273		/*
1274		 * during a reset or link event, fake the status
1275		 * as it may be changing as a result of the reset
1276		 * or link event.
1277		 */
1278		DTRACE_PROBE(state_reset_link_event_faking_status_p);
1279		DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
1280
1281		SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1282		    SSTATUS_IPM_ACTIVE);
1283		SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1284		    SSTATUS_DET_DEVPRE_PHYCOM);
1285		sd->satadev_type = nvp->nvp_type;
1286		mutex_exit(&nvp->nvp_mutex);
1287
1288		return (SATA_SUCCESS);
1289	}
1290
1291	det = SSTATUS_GET_DET(sd->satadev_scr.sstatus);
1292
1293	/*
1294	 * determine link status
1295	 */
1296	if (det != SSTATUS_DET_DEVPRE_PHYCOM) {
1297		switch (det) {
1298
1299		case SSTATUS_DET_NODEV:
1300		case SSTATUS_DET_PHYOFFLINE:
1301			sd->satadev_type = SATA_DTYPE_NONE;
1302			break;
1303
1304		default:
1305			sd->satadev_type = SATA_DTYPE_UNKNOWN;
1306			break;
1307		}
1308
1309		mutex_exit(&nvp->nvp_mutex);
1310
1311		return (SATA_SUCCESS);
1312	}
1313
1314	/*
1315	 * Just report the current port state
1316	 */
1317	sd->satadev_type = nvp->nvp_type;
1318	DTRACE_PROBE1(nvp_type_h, int, nvp->nvp_type);
1319
1320	mutex_exit(&nvp->nvp_mutex);
1321
1322	return (SATA_SUCCESS);
1323}
1324
1325
1326/*
1327 * Called by sata module to start a new command.
1328 */
1329static int
1330nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1331{
1332	int cport = spkt->satapkt_device.satadev_addr.cport;
1333	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1334	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1335	int ret;
1336
1337	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1338	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg);
1339
1340	mutex_enter(&nvp->nvp_mutex);
1341
1342	if (nvp->nvp_state & NV_DEACTIVATED) {
1343
1344		NVLOG(NVDBG_ERRS, nvc, nvp,
1345		    "nv_sata_start: NV_DEACTIVATED", NULL);
1346		DTRACE_PROBE(nvp_state_inactive_p);
1347
1348		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1349		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1350		mutex_exit(&nvp->nvp_mutex);
1351
1352		return (SATA_TRAN_PORT_ERROR);
1353	}
1354
1355	if (nvp->nvp_state & NV_FAILED) {
1356
1357		NVLOG(NVDBG_ERRS, nvc, nvp,
1358		    "nv_sata_start: NV_FAILED state", NULL);
1359		DTRACE_PROBE(nvp_state_failed_p);
1360
1361		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1362		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1363		mutex_exit(&nvp->nvp_mutex);
1364
1365		return (SATA_TRAN_PORT_ERROR);
1366	}
1367
1368	if (nvp->nvp_state & NV_RESET) {
1369
1370		NVLOG(NVDBG_ERRS, nvc, nvp,
1371		    "still waiting for reset completion", NULL);
1372		DTRACE_PROBE(nvp_state_reset_p);
1373
1374		spkt->satapkt_reason = SATA_PKT_BUSY;
1375
1376		/*
1377		 * If in panic, timeouts do not occur, so invoke
1378		 * reset handling directly so that the signature
1379		 * can be acquired to complete the reset handling.
1380		 */
1381		if (ddi_in_panic()) {
1382			NVLOG(NVDBG_ERRS, nvc, nvp,
1383			    "nv_sata_start: calling nv_monitor_reset "
1384			    "synchronously", NULL);
1385
1386			(void) nv_monitor_reset(nvp);
1387		}
1388
1389		mutex_exit(&nvp->nvp_mutex);
1390
1391		return (SATA_TRAN_BUSY);
1392	}
1393
1394	if (nvp->nvp_state & NV_LINK_EVENT) {
1395
1396		NVLOG(NVDBG_ERRS, nvc, nvp,
1397		    "nv_sata_start(): link event ret bsy", NULL);
1398		DTRACE_PROBE(nvp_state_link_event_p);
1399
1400		spkt->satapkt_reason = SATA_PKT_BUSY;
1401
1402		if (ddi_in_panic()) {
1403			NVLOG(NVDBG_ERRS, nvc, nvp,
1404			    "nv_sata_start: calling nv_timeout "
1405			    "synchronously", NULL);
1406
1407			nv_timeout(nvp);
1408		}
1409
1410		mutex_exit(&nvp->nvp_mutex);
1411
1412		return (SATA_TRAN_BUSY);
1413	}
1414
1415
1416	if ((nvp->nvp_type == SATA_DTYPE_NONE) ||
1417	    (nvp->nvp_type == SATA_DTYPE_UNKNOWN)) {
1418
1419		NVLOG(NVDBG_ERRS, nvc, nvp,
1420		    "nv_sata_start: nvp_type 0x%x", nvp->nvp_type);
1421		DTRACE_PROBE1(not_ready_nvp_type_h, int, nvp->nvp_type);
1422
1423		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1424		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1425		mutex_exit(&nvp->nvp_mutex);
1426
1427		return (SATA_TRAN_PORT_ERROR);
1428	}
1429
1430	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1431
1432		nv_cmn_err(CE_WARN, nvc, nvp,
1433		    "port multiplier not supported by controller");
1434
1435		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1436		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1437		mutex_exit(&nvp->nvp_mutex);
1438
1439		return (SATA_TRAN_CMD_UNSUPPORTED);
1440	}
1441
1442	/*
1443	 * after a device reset, and then when sata module restore processing
1444	 * is complete, the sata module will set sata_clear_dev_reset which
1445	 * indicates that restore processing has completed and normal
1446	 * non-restore related commands should be processed.
1447	 */
1448	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1449
1450		NVLOG(NVDBG_RESET, nvc, nvp,
1451		    "nv_sata_start: clearing NV_RESTORE", NULL);
1452		DTRACE_PROBE(clearing_restore_p);
1453		DTRACE_PROBE1(nvp_state_before_clear_h, int, nvp->nvp_state);
1454
1455		nvp->nvp_state &= ~NV_RESTORE;
1456	}
1457
1458	/*
1459	 * if the device was recently reset as indicated by NV_RESTORE,
1460	 * only allow commands which restore device state.  The sata module
1461	 * marks such commands with sata_ignore_dev_reset.
1462	 *
1463	 * during coredump, nv_reset is called but the restore isn't
1464	 * processed, so ignore the wait for restore if the system
1465	 * is panicing.
1466	 */
1467	if ((nvp->nvp_state & NV_RESTORE) &&
1468	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1469	    (ddi_in_panic() == 0)) {
1470
1471		NVLOG(NVDBG_RESET, nvc, nvp,
1472		    "nv_sata_start: waiting for restore ", NULL);
1473		DTRACE_PROBE1(restore_no_ignore_reset_nvp_state_h,
1474		    int, nvp->nvp_state);
1475
1476		spkt->satapkt_reason = SATA_PKT_BUSY;
1477		mutex_exit(&nvp->nvp_mutex);
1478
1479		return (SATA_TRAN_BUSY);
1480	}
1481
1482	if (nvp->nvp_state & NV_ABORTING) {
1483
1484		NVLOG(NVDBG_ERRS, nvc, nvp,
1485		    "nv_sata_start: NV_ABORTING", NULL);
1486		DTRACE_PROBE1(aborting_nvp_state_h, int, nvp->nvp_state);
1487
1488		spkt->satapkt_reason = SATA_PKT_BUSY;
1489		mutex_exit(&nvp->nvp_mutex);
1490
1491		return (SATA_TRAN_BUSY);
1492	}
1493
1494	/*
1495	 * record command sequence for debugging.
1496	 */
1497	nvp->nvp_seq++;
1498
1499	DTRACE_PROBE2(command_start, int *, nvp, int,
1500	    spkt->satapkt_cmd.satacmd_cmd_reg);
1501
1502	/*
1503	 * clear SError to be able to check errors after the command failure
1504	 */
1505	nv_put32(nvp->nvp_ctlp->nvc_bar_hdl[5], nvp->nvp_serror, 0xffffffff);
1506
1507	if (spkt->satapkt_op_mode &
1508	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1509
1510		ret = nv_start_sync(nvp, spkt);
1511
1512		mutex_exit(&nvp->nvp_mutex);
1513
1514		return (ret);
1515	}
1516
1517	/*
1518	 * start command asynchronous command
1519	 */
1520	ret = nv_start_async(nvp, spkt);
1521
1522	mutex_exit(&nvp->nvp_mutex);
1523
1524	return (ret);
1525}
1526
1527
1528/*
1529 * SATA_OPMODE_POLLING implies the driver is in a
1530 * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1531 * If only SATA_OPMODE_SYNCH is set, the driver can use
1532 * interrupts and sleep wait on a cv.
1533 *
1534 * If SATA_OPMODE_POLLING is set, the driver can't use
1535 * interrupts and must busy wait and simulate the
1536 * interrupts by waiting for BSY to be cleared.
1537 *
1538 * Synchronous mode has to return BUSY if there are
1539 * any other commands already on the drive.
1540 */
1541static int
1542nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1543{
1544	nv_ctl_t *nvc = nvp->nvp_ctlp;
1545	int ret;
1546
1547	NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry",
1548	    NULL);
1549
1550	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1551		spkt->satapkt_reason = SATA_PKT_BUSY;
1552		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1553		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1554		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1555		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1556		    (&(nvp->nvp_slot[0]))->nvslot_spkt);
1557
1558		return (SATA_TRAN_BUSY);
1559	}
1560
1561	/*
1562	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1563	 */
1564	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1565	    servicing_interrupt()) {
1566		spkt->satapkt_reason = SATA_PKT_BUSY;
1567		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1568		    "SYNC mode not allowed during interrupt", NULL);
1569
1570		return (SATA_TRAN_BUSY);
1571
1572	}
1573
1574	/*
1575	 * disable interrupt generation if in polled mode
1576	 */
1577	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1578		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1579	}
1580
1581	/*
1582	 * overload the satapkt_reason with BUSY so code below
1583	 * will know when it's done
1584	 */
1585	spkt->satapkt_reason = SATA_PKT_BUSY;
1586
1587	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1588		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1589			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1590		}
1591
1592		return (ret);
1593	}
1594
1595	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1596		mutex_exit(&nvp->nvp_mutex);
1597		ret = nv_poll_wait(nvp, spkt);
1598		mutex_enter(&nvp->nvp_mutex);
1599
1600		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1601
1602		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1603		    " done % reason %d", ret);
1604
1605		return (ret);
1606	}
1607
1608	/*
1609	 * non-polling synchronous mode handling.  The interrupt will signal
1610	 * when device IO is completed.
1611	 */
1612	while (spkt->satapkt_reason == SATA_PKT_BUSY) {
1613		cv_wait(&nvp->nvp_sync_cv, &nvp->nvp_mutex);
1614	}
1615
1616
1617	NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1618	    " done % reason %d", spkt->satapkt_reason);
1619
1620	return (SATA_TRAN_ACCEPTED);
1621}
1622
1623
1624static int
1625nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1626{
1627	int ret;
1628	nv_ctl_t *nvc = nvp->nvp_ctlp;
1629#if ! defined(__lock_lint)
1630	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1631#endif
1632
1633	NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter", NULL);
1634
1635	for (;;) {
1636
1637		NV_DELAY_NSEC(400);
1638
1639		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait",
1640		    NULL);
1641		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1642		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1643			mutex_enter(&nvp->nvp_mutex);
1644			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1645			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1646			nv_reset(nvp, "poll_wait");
1647			nv_complete_io(nvp, spkt, 0);
1648			mutex_exit(&nvp->nvp_mutex);
1649			NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1650			    "SATA_STATUS_BSY", NULL);
1651
1652			return (SATA_TRAN_ACCEPTED);
1653		}
1654
1655		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr",
1656		    NULL);
1657
1658		/*
1659		 * Simulate interrupt.
1660		 */
1661		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1662		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr",
1663		    NULL);
1664
1665		if (ret != DDI_INTR_CLAIMED) {
1666			NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1667			    " unclaimed -- resetting", NULL);
1668			mutex_enter(&nvp->nvp_mutex);
1669			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1670			nv_reset(nvp, "poll_wait intr not claimed");
1671			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1672			nv_complete_io(nvp, spkt, 0);
1673			mutex_exit(&nvp->nvp_mutex);
1674
1675			return (SATA_TRAN_ACCEPTED);
1676		}
1677
1678#if ! defined(__lock_lint)
1679		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1680			/*
1681			 * packet is complete
1682			 */
1683			return (SATA_TRAN_ACCEPTED);
1684		}
1685#endif
1686	}
1687	/*NOTREACHED*/
1688}
1689
1690
1691/*
1692 * Called by sata module to abort outstanding packets.
1693 */
1694/*ARGSUSED*/
1695static int
1696nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1697{
1698	int cport = spkt->satapkt_device.satadev_addr.cport;
1699	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1700	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1701	int c_a, ret;
1702
1703	ASSERT(cport < NV_MAX_PORTS(nvc));
1704	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt);
1705
1706	mutex_enter(&nvp->nvp_mutex);
1707
1708	if (nvp->nvp_state & NV_DEACTIVATED) {
1709		mutex_exit(&nvp->nvp_mutex);
1710		nv_cmn_err(CE_WARN, nvc, nvp,
1711		    "abort request failed: port inactive");
1712
1713		return (SATA_FAILURE);
1714	}
1715
1716	/*
1717	 * spkt == NULL then abort all commands
1718	 */
1719	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED, B_TRUE);
1720
1721	if (c_a) {
1722		NVLOG(NVDBG_ENTRY, nvc, nvp,
1723		    "packets aborted running=%d", c_a);
1724		ret = SATA_SUCCESS;
1725	} else {
1726		if (spkt == NULL) {
1727			NVLOG(NVDBG_ENTRY, nvc, nvp, "no spkts to abort", NULL);
1728		} else {
1729			NVLOG(NVDBG_ENTRY, nvc, nvp,
1730			    "can't find spkt to abort", NULL);
1731		}
1732		ret = SATA_FAILURE;
1733	}
1734
1735	mutex_exit(&nvp->nvp_mutex);
1736
1737	return (ret);
1738}
1739
1740
1741/*
1742 * if spkt == NULL abort all pkts running, otherwise
1743 * abort the requested packet.  must be called with nv_mutex
1744 * held and returns with it held.  Not NCQ aware.
1745 */
1746static int
1747nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
1748    boolean_t reset)
1749{
1750	int aborted = 0, i, reset_once = B_FALSE;
1751	struct nv_slot *nv_slotp;
1752	sata_pkt_t *spkt_slot;
1753
1754	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1755
1756	NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active", NULL);
1757
1758	nvp->nvp_state |= NV_ABORTING;
1759
1760	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1761
1762		nv_slotp = &(nvp->nvp_slot[i]);
1763		spkt_slot = nv_slotp->nvslot_spkt;
1764
1765		/*
1766		 * skip if not active command in slot
1767		 */
1768		if (spkt_slot == NULL) {
1769			continue;
1770		}
1771
1772		/*
1773		 * if a specific packet was requested, skip if
1774		 * this is not a match
1775		 */
1776		if ((spkt != NULL) && (spkt != spkt_slot)) {
1777			continue;
1778		}
1779
1780		/*
1781		 * stop the hardware.  This could need reworking
1782		 * when NCQ is enabled in the driver.
1783		 */
1784		if (reset_once == B_FALSE) {
1785			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1786
1787			/*
1788			 * stop DMA engine
1789			 */
1790			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1791
1792			/*
1793			 * Reset only if explicitly specified by the arg reset
1794			 */
1795			if (reset == B_TRUE) {
1796				reset_once = B_TRUE;
1797				nv_reset(nvp, "abort_active");
1798			}
1799		}
1800
1801		spkt_slot->satapkt_reason = abort_reason;
1802		nv_complete_io(nvp, spkt_slot, i);
1803		aborted++;
1804	}
1805
1806	nvp->nvp_state &= ~NV_ABORTING;
1807
1808	return (aborted);
1809}
1810
1811
1812/*
1813 * Called by sata module to reset a port, device, or the controller.
1814 */
1815static int
1816nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1817{
1818	int cport = sd->satadev_addr.cport;
1819	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1820	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1821	int ret = SATA_FAILURE;
1822
1823	ASSERT(cport < NV_MAX_PORTS(nvc));
1824
1825	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_reset", NULL);
1826
1827	mutex_enter(&nvp->nvp_mutex);
1828
1829	switch (sd->satadev_addr.qual) {
1830
1831	case SATA_ADDR_CPORT:
1832		/*FALLTHROUGH*/
1833	case SATA_ADDR_DCPORT:
1834
1835		ret = SATA_SUCCESS;
1836
1837		/*
1838		 * If a reset is already in progress, don't disturb it
1839		 */
1840		if ((nvp->nvp_state & (NV_RESET|NV_RESTORE)) &&
1841		    (ddi_in_panic() == 0)) {
1842			NVLOG(NVDBG_RESET, nvc, nvp,
1843			    "nv_sata_reset: reset already in progress", NULL);
1844			DTRACE_PROBE(reset_already_in_progress_p);
1845
1846			break;
1847		}
1848
1849		/*
1850		 * log the pre-reset state of the driver because dumping the
1851		 * blocks will disturb it.
1852		 */
1853		if (ddi_in_panic() == 1) {
1854			NVLOG(NVDBG_RESET, nvc, nvp, "in_panic.  nvp_state: "
1855			    "0x%x nvp_reset_time: %d nvp_last_cmd: 0x%x "
1856			    "nvp_previous_cmd: 0x%x nvp_reset_count: %d "
1857			    "nvp_first_reset_reason: %s "
1858			    "nvp_reset_reason: %s nvp_seq: %d "
1859			    "in_interrupt: %d", nvp->nvp_state,
1860			    nvp->nvp_reset_time, nvp->nvp_last_cmd,
1861			    nvp->nvp_previous_cmd, nvp->nvp_reset_count,
1862			    nvp->nvp_first_reset_reason,
1863			    nvp->nvp_reset_reason, nvp->nvp_seq,
1864			    servicing_interrupt());
1865		}
1866
1867		nv_reset(nvp, "sata_reset");
1868
1869		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET, B_FALSE);
1870
1871		/*
1872		 * If the port is inactive, do a quiet reset and don't attempt
1873		 * to wait for reset completion or do any post reset processing
1874		 *
1875		 */
1876		if (nvp->nvp_state & NV_DEACTIVATED) {
1877			nvp->nvp_state &= ~NV_RESET;
1878			nvp->nvp_reset_time = 0;
1879
1880			break;
1881		}
1882
1883		/*
1884		 * clear the port failed flag.  It will get set again
1885		 * if the port is still not functioning.
1886		 */
1887		nvp->nvp_state &= ~NV_FAILED;
1888
1889		/*
1890		 * timeouts are not available while the system is
1891		 * dropping core, so call nv_monitor_reset() directly
1892		 */
1893		if (ddi_in_panic() != 0) {
1894			while (nvp->nvp_state & NV_RESET) {
1895				drv_usecwait(1000);
1896				(void) nv_monitor_reset(nvp);
1897			}
1898
1899			break;
1900		}
1901
1902		break;
1903	case SATA_ADDR_CNTRL:
1904		NVLOG(NVDBG_ENTRY, nvc, nvp,
1905		    "nv_sata_reset: controller reset not supported", NULL);
1906
1907		break;
1908	case SATA_ADDR_PMPORT:
1909	case SATA_ADDR_DPMPORT:
1910		NVLOG(NVDBG_ENTRY, nvc, nvp,
1911		    "nv_sata_reset: port multipliers not supported", NULL);
1912		/*FALLTHROUGH*/
1913	default:
1914		/*
1915		 * unsupported case
1916		 */
1917		break;
1918	}
1919
1920	mutex_exit(&nvp->nvp_mutex);
1921
1922	return (ret);
1923}
1924
1925
1926/*
1927 * Sata entry point to handle port activation.  cfgadm -c connect
1928 */
1929static int
1930nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1931{
1932	int cport = sd->satadev_addr.cport;
1933	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1934	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1935	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1936	uint32_t sstatus;
1937
1938	ASSERT(cport < NV_MAX_PORTS(nvc));
1939	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_activate", NULL);
1940
1941	mutex_enter(&nvp->nvp_mutex);
1942
1943	sd->satadev_state = SATA_STATE_READY;
1944
1945	nv_copy_registers(nvp, sd, NULL);
1946
1947	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1948
1949	/*
1950	 * initiate link probing and device signature acquisition
1951	 */
1952
1953	bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1954
1955	sstatus = ddi_get32(bar5_hdl, nvp->nvp_sstatus);
1956
1957	nvp->nvp_type = SATA_DTYPE_NONE;
1958	nvp->nvp_signature = NV_NO_SIG;
1959	nvp->nvp_state &= ~NV_DEACTIVATED;
1960
1961	if (SSTATUS_GET_DET(sstatus) ==
1962	    SSTATUS_DET_DEVPRE_PHYCOM) {
1963
1964		nvp->nvp_state |= NV_ATTACH;
1965		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1966		nv_reset(nvp, "sata_activate");
1967
1968		while (nvp->nvp_state & NV_RESET) {
1969			cv_wait(&nvp->nvp_reset_cv, &nvp->nvp_mutex);
1970		}
1971
1972	}
1973
1974	mutex_exit(&nvp->nvp_mutex);
1975
1976	return (SATA_SUCCESS);
1977}
1978
1979
1980/*
1981 * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1982 */
1983static int
1984nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1985{
1986	int cport = sd->satadev_addr.cport;
1987	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1988	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1989
1990	ASSERT(cport < NV_MAX_PORTS(nvc));
1991	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate", NULL);
1992
1993	mutex_enter(&nvp->nvp_mutex);
1994
1995	(void) nv_abort_active(nvp, NULL, SATA_PKT_ABORTED, B_FALSE);
1996
1997	/*
1998	 * make the device inaccessible
1999	 */
2000	nvp->nvp_state |= NV_DEACTIVATED;
2001
2002	/*
2003	 * disable the interrupts on port
2004	 */
2005	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2006
2007	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
2008	nv_copy_registers(nvp, sd, NULL);
2009
2010	mutex_exit(&nvp->nvp_mutex);
2011
2012	return (SATA_SUCCESS);
2013}
2014
2015
2016/*
2017 * find an empty slot in the driver's queue, increment counters,
2018 * and then invoke the appropriate PIO or DMA start routine.
2019 */
2020static int
2021nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
2022{
2023	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
2024	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
2025	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2026	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
2027	nv_ctl_t *nvc = nvp->nvp_ctlp;
2028	nv_slot_t *nv_slotp;
2029	boolean_t dma_cmd;
2030
2031	NVLOG(NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
2032	    sata_cmdp->satacmd_cmd_reg);
2033
2034	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
2035	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
2036		nvp->nvp_ncq_run++;
2037		/*
2038		 * search for an empty NCQ slot.  by the time, it's already
2039		 * been determined by the caller that there is room on the
2040		 * queue.
2041		 */
2042		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
2043		    on_bit <<= 1) {
2044			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
2045				break;
2046			}
2047		}
2048
2049		/*
2050		 * the first empty slot found, should not exceed the queue
2051		 * depth of the drive.  if it does it's an error.
2052		 */
2053		ASSERT(slot != nvp->nvp_queue_depth);
2054
2055		sactive = nv_get32(nvc->nvc_bar_hdl[5],
2056		    nvp->nvp_sactive);
2057		ASSERT((sactive & on_bit) == 0);
2058		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
2059		NVLOG(NVDBG_DELIVER, nvc, nvp, "setting SACTIVE onbit: %X",
2060		    on_bit);
2061		nvp->nvp_sactive_cache |= on_bit;
2062
2063		ncq = NVSLOT_NCQ;
2064
2065	} else {
2066		nvp->nvp_non_ncq_run++;
2067		slot = 0;
2068	}
2069
2070	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
2071
2072	ASSERT(nv_slotp->nvslot_spkt == NULL);
2073
2074	nv_slotp->nvslot_spkt = spkt;
2075	nv_slotp->nvslot_flags = ncq;
2076
2077	/*
2078	 * the sata module doesn't indicate which commands utilize the
2079	 * DMA engine, so find out using this switch table.
2080	 */
2081	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
2082	case SATAC_READ_DMA_EXT:
2083	case SATAC_WRITE_DMA_EXT:
2084	case SATAC_WRITE_DMA:
2085	case SATAC_READ_DMA:
2086	case SATAC_READ_DMA_QUEUED:
2087	case SATAC_READ_DMA_QUEUED_EXT:
2088	case SATAC_WRITE_DMA_QUEUED:
2089	case SATAC_WRITE_DMA_QUEUED_EXT:
2090	case SATAC_READ_FPDMA_QUEUED:
2091	case SATAC_WRITE_FPDMA_QUEUED:
2092	case SATAC_DSM:
2093		dma_cmd = B_TRUE;
2094		break;
2095	default:
2096		dma_cmd = B_FALSE;
2097	}
2098
2099	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
2100		NVLOG(NVDBG_DELIVER, nvc,  nvp, "DMA command", NULL);
2101		nv_slotp->nvslot_start = nv_start_dma;
2102		nv_slotp->nvslot_intr = nv_intr_dma;
2103	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
2104		NVLOG(NVDBG_DELIVER, nvc,  nvp, "packet command", NULL);
2105		nv_slotp->nvslot_start = nv_start_pkt_pio;
2106		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
2107		if ((direction == SATA_DIR_READ) ||
2108		    (direction == SATA_DIR_WRITE)) {
2109			nv_slotp->nvslot_byte_count =
2110			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2111			nv_slotp->nvslot_v_addr =
2112			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2113			/*
2114			 * Freeing DMA resources allocated by the sata common
2115			 * module to avoid buffer overwrite (dma sync) problems
2116			 * when the buffer is released at command completion.
2117			 * Primarily an issue on systems with more than
2118			 * 4GB of memory.
2119			 */
2120			sata_free_dma_resources(spkt);
2121		}
2122	} else if (direction == SATA_DIR_NODATA_XFER) {
2123		NVLOG(NVDBG_DELIVER, nvc, nvp, "non-data command", NULL);
2124		nv_slotp->nvslot_start = nv_start_nodata;
2125		nv_slotp->nvslot_intr = nv_intr_nodata;
2126	} else if (direction == SATA_DIR_READ) {
2127		NVLOG(NVDBG_DELIVER, nvc, nvp, "pio in command", NULL);
2128		nv_slotp->nvslot_start = nv_start_pio_in;
2129		nv_slotp->nvslot_intr = nv_intr_pio_in;
2130		nv_slotp->nvslot_byte_count =
2131		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2132		nv_slotp->nvslot_v_addr =
2133		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2134		/*
2135		 * Freeing DMA resources allocated by the sata common module to
2136		 * avoid buffer overwrite (dma sync) problems when the buffer
2137		 * is released at command completion.  This is not an issue
2138		 * for write because write does not update the buffer.
2139		 * Primarily an issue on systems with more than 4GB of memory.
2140		 */
2141		sata_free_dma_resources(spkt);
2142	} else if (direction == SATA_DIR_WRITE) {
2143		NVLOG(NVDBG_DELIVER, nvc, nvp, "pio out command", NULL);
2144		nv_slotp->nvslot_start = nv_start_pio_out;
2145		nv_slotp->nvslot_intr = nv_intr_pio_out;
2146		nv_slotp->nvslot_byte_count =
2147		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2148		nv_slotp->nvslot_v_addr =
2149		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2150	} else {
2151		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2152		    " %d cookies %d cmd %x",
2153		    sata_cmdp->satacmd_flags.sata_data_direction,
2154		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2155		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2156		ret = SATA_TRAN_CMD_UNSUPPORTED;
2157
2158		goto fail;
2159	}
2160
2161	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2162	    SATA_TRAN_ACCEPTED) {
2163#ifdef SGPIO_SUPPORT
2164		nv_sgp_drive_active(nvp->nvp_ctlp,
2165		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2166#endif
2167		nv_slotp->nvslot_stime = ddi_get_lbolt();
2168
2169		/*
2170		 * start timer if it's not already running and this packet
2171		 * is not requesting polled mode.
2172		 */
2173		if ((nvp->nvp_timeout_id == 0) &&
2174		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2175			nv_setup_timeout(nvp, NV_ONE_SEC);
2176		}
2177
2178		nvp->nvp_previous_cmd = nvp->nvp_last_cmd;
2179		nvp->nvp_last_cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2180
2181		return (SATA_TRAN_ACCEPTED);
2182	}
2183
2184	fail:
2185
2186	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2187
2188	if (ncq == NVSLOT_NCQ) {
2189		nvp->nvp_ncq_run--;
2190		nvp->nvp_sactive_cache &= ~on_bit;
2191	} else {
2192		nvp->nvp_non_ncq_run--;
2193	}
2194	nv_slotp->nvslot_spkt = NULL;
2195	nv_slotp->nvslot_flags = 0;
2196
2197	return (ret);
2198}
2199
2200
2201/*
2202 * Check if the signature is ready and if non-zero translate
2203 * it into a solaris sata defined type.
2204 */
2205static void
2206nv_read_signature(nv_port_t *nvp)
2207{
2208	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2209	int retry_count = 0;
2210
2211	retry:
2212
2213	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2214	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2215	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2216	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2217
2218	NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2219	    "nv_read_signature: 0x%x ", nvp->nvp_signature);
2220
2221	switch (nvp->nvp_signature) {
2222
2223	case NV_DISK_SIG:
2224		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp, "drive is a disk", NULL);
2225		DTRACE_PROBE(signature_is_disk_device_p)
2226		nvp->nvp_type = SATA_DTYPE_ATADISK;
2227
2228		break;
2229	case NV_ATAPI_SIG:
2230		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2231		    "drive is an optical device", NULL);
2232		DTRACE_PROBE(signature_is_optical_device_p)
2233		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2234		break;
2235	case NV_PM_SIG:
2236		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2237		    "device is a port multiplier", NULL);
2238		DTRACE_PROBE(signature_is_port_multiplier_p)
2239		nvp->nvp_type = SATA_DTYPE_PMULT;
2240		break;
2241	case NV_NO_SIG:
2242		NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2243		    "signature not available", NULL);
2244		DTRACE_PROBE(sig_not_available_p);
2245		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2246		break;
2247	default:
2248		if (retry_count++ == 0) {
2249			/*
2250			 * this is a rare corner case where the controller
2251			 * is updating the task file registers as the driver
2252			 * is reading them.  If this happens, wait a bit and
2253			 * retry once.
2254			 */
2255			NV_DELAY_NSEC(1000000);
2256			NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2257			    "invalid signature 0x%x retry once",
2258			    nvp->nvp_signature);
2259			DTRACE_PROBE1(signature_invalid_retry_once_h,
2260			    int, nvp->nvp_signature);
2261
2262			goto retry;
2263		}
2264
2265		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp,
2266		    "invalid signature 0x%x", nvp->nvp_signature);
2267		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2268
2269		break;
2270	}
2271}
2272
2273
2274/*
2275 * Set up a new timeout or complete a timeout in microseconds.
2276 * If microseconds is zero, no new timeout is scheduled.  Must be
2277 * called at the end of the timeout routine.
2278 */
2279static void
2280nv_setup_timeout(nv_port_t *nvp, clock_t microseconds)
2281{
2282	clock_t old_duration = nvp->nvp_timeout_duration;
2283
2284	if (microseconds == 0) {
2285
2286		return;
2287	}
2288
2289	if (nvp->nvp_timeout_id != 0 && nvp->nvp_timeout_duration == 0) {
2290		/*
2291		 * Since we are dropping the mutex for untimeout,
2292		 * the timeout may be executed while we are trying to
2293		 * untimeout and setting up a new timeout.
2294		 * If nvp_timeout_duration is 0, then this function
2295		 * was re-entered. Just exit.
2296		 */
2297		cmn_err(CE_WARN, "nv_setup_timeout re-entered");
2298
2299		return;
2300	}
2301
2302	nvp->nvp_timeout_duration = 0;
2303
2304	if (nvp->nvp_timeout_id == 0) {
2305		/*
2306		 * start new timer
2307		 */
2308		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2309		    drv_usectohz(microseconds));
2310	} else {
2311		/*
2312		 * If the currently running timeout is due later than the
2313		 * requested one, restart it with a new expiration.
2314		 * Our timeouts do not need to be accurate - we would be just
2315		 * checking that the specified time was exceeded.
2316		 */
2317		if (old_duration > microseconds) {
2318			mutex_exit(&nvp->nvp_mutex);
2319			(void) untimeout(nvp->nvp_timeout_id);
2320			mutex_enter(&nvp->nvp_mutex);
2321			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2322			    drv_usectohz(microseconds));
2323		}
2324	}
2325
2326	nvp->nvp_timeout_duration = microseconds;
2327}
2328
2329
2330
2331int nv_reset_length = NV_RESET_LENGTH;
2332
2333/*
2334 * Reset the port
2335 */
2336static void
2337nv_reset(nv_port_t *nvp, char *reason)
2338{
2339	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2340	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2341	nv_ctl_t *nvc = nvp->nvp_ctlp;
2342	uint32_t sctrl, serr, sstatus;
2343	uint8_t bmicx;
2344	int i, j;
2345	boolean_t reset_success = B_FALSE;
2346
2347	ASSERT(mutex_owned(&nvp->nvp_mutex));
2348
2349	/*
2350	 * If the port is reset right after the controller receives
2351	 * the DMA activate command (or possibly any other FIS),
2352	 * controller operation freezes without any known recovery
2353	 * procedure.  Until Nvidia advises on a recovery mechanism,
2354	 * avoid the situation by waiting sufficiently long to
2355	 * ensure the link is not actively transmitting any FIS.
2356	 * 100ms was empirically determined to be large enough to
2357	 * ensure no transaction was left in flight but not too long
2358	 * as to cause any significant thread delay.
2359	 */
2360	drv_usecwait(100000);
2361
2362	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2363	DTRACE_PROBE1(serror_h, int, serr);
2364
2365	/*
2366	 * stop DMA engine.
2367	 */
2368	bmicx = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmicx);
2369	nv_put8(nvp->nvp_bm_hdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2370
2371	/*
2372	 * the current setting of the NV_RESET in nvp_state indicates whether
2373	 * this is the first reset attempt or a retry.
2374	 */
2375	if (nvp->nvp_state & NV_RESET) {
2376		nvp->nvp_reset_retry_count++;
2377
2378		NVLOG(NVDBG_RESET, nvc, nvp, "npv_reset_retry_count: %d",
2379		    nvp->nvp_reset_retry_count);
2380
2381	} else {
2382		nvp->nvp_reset_retry_count = 0;
2383		nvp->nvp_reset_count++;
2384		nvp->nvp_state |= NV_RESET;
2385
2386		NVLOG(NVDBG_RESET, nvc, nvp, "nvp_reset_count: %d reason: %s "
2387		    "serror: 0x%x seq: %d run: %d cmd: 0x%x",
2388		    nvp->nvp_reset_count, reason, serr, nvp->nvp_seq,
2389		    nvp->nvp_non_ncq_run, nvp->nvp_last_cmd);
2390	}
2391
2392	/*
2393	 * a link event could have occurred slightly before excessive
2394	 * interrupt processing invokes a reset.  Reset handling overrides
2395	 * link event processing so it's safe to clear it here.
2396	 */
2397	nvp->nvp_state &= ~(NV_RESTORE|NV_LINK_EVENT);
2398
2399	nvp->nvp_reset_time = ddi_get_lbolt();
2400
2401	if ((nvp->nvp_state & (NV_ATTACH|NV_HOTPLUG)) == 0) {
2402		nv_cmn_err(CE_NOTE, nvc, nvp, "nv_reset: reason: %s serr 0x%x"
2403		    " nvp_state: 0x%x", reason, serr, nvp->nvp_state);
2404		/*
2405		 * keep a record of why the first reset occurred, for debugging
2406		 */
2407		if (nvp->nvp_first_reset_reason[0] == '\0') {
2408			(void) strncpy(nvp->nvp_first_reset_reason,
2409			    reason, NV_REASON_LEN);
2410			nvp->nvp_first_reset_reason[NV_REASON_LEN - 1] = '\0';
2411		}
2412	}
2413
2414	(void) strncpy(nvp->nvp_reset_reason, reason, NV_REASON_LEN);
2415
2416	/*
2417	 * ensure there is terminating NULL
2418	 */
2419	nvp->nvp_reset_reason[NV_REASON_LEN - 1] = '\0';
2420
2421	/*
2422	 * Issue hardware reset; retry if necessary.
2423	 */
2424	for (i = 0; i < NV_COMRESET_ATTEMPTS; i++) {
2425
2426		/*
2427		 * clear signature registers and the error register too
2428		 */
2429		nv_put8(cmdhdl, nvp->nvp_sect, 0);
2430		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2431		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2432		nv_put8(cmdhdl, nvp->nvp_count, 0);
2433
2434		nv_put8(nvp->nvp_cmd_hdl, nvp->nvp_error, 0);
2435
2436		/*
2437		 * assert reset in PHY by writing a 1 to bit 0 scontrol
2438		 */
2439		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2440
2441		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2442		    sctrl | SCONTROL_DET_COMRESET);
2443
2444		/* Wait at least 1ms, as required by the spec */
2445		drv_usecwait(nv_reset_length);
2446
2447		serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2448		DTRACE_PROBE1(aftercomreset_serror_h, int, serr);
2449
2450		/* Reset all accumulated error bits */
2451		nv_put32(bar5_hdl, nvp->nvp_serror, 0xffffffff);
2452
2453
2454		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2455		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2456		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset: applied (%d); "
2457		    "sctrl 0x%x, sstatus 0x%x", i, sctrl, sstatus);
2458
2459		/* de-assert reset in PHY */
2460		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2461		    sctrl & ~SCONTROL_DET_COMRESET);
2462
2463		/*
2464		 * Wait up to 10ms for COMINIT to arrive, indicating that
2465		 * the device recognized COMRESET.
2466		 */
2467		for (j = 0; j < 10; j++) {
2468			drv_usecwait(NV_ONE_MSEC);
2469			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2470			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2471			    (SSTATUS_GET_DET(sstatus) ==
2472			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2473				reset_success = B_TRUE;
2474				break;
2475			}
2476		}
2477
2478		if (reset_success == B_TRUE)
2479			break;
2480	}
2481
2482
2483	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2484	DTRACE_PROBE1(last_serror_h, int, serr);
2485
2486	if (reset_success == B_FALSE) {
2487		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset not succeeded "
2488		    "after %d attempts. serr: 0x%x", i, serr);
2489	} else {
2490		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset succeeded"
2491		    " after %dms. serr: 0x%x", TICK_TO_MSEC(ddi_get_lbolt() -
2492		    nvp->nvp_reset_time), serr);
2493	}
2494
2495	nvp->nvp_wait_sig  = NV_WAIT_SIG;
2496	nv_setup_timeout(nvp, nvp->nvp_wait_sig);
2497}
2498
2499
2500/*
2501 * Initialize register handling specific to mcp51/mcp55/mcp61
2502 */
2503/* ARGSUSED */
2504static void
2505mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2506{
2507	nv_port_t *nvp;
2508	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2509	uint8_t off, port;
2510
2511	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2512	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2513
2514	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2515		nvp = &(nvc->nvc_port[port]);
2516		nvp->nvp_mcp5x_int_status =
2517		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2518		nvp->nvp_mcp5x_int_ctl =
2519		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2520
2521		/*
2522		 * clear any previous interrupts asserted
2523		 */
2524		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2525		    MCP5X_INT_CLEAR);
2526
2527		/*
2528		 * These are the interrupts to accept for now.  The spec
2529		 * says these are enable bits, but nvidia has indicated
2530		 * these are masking bits.  Even though they may be masked
2531		 * out to prevent asserting the main interrupt, they can
2532		 * still be asserted while reading the interrupt status
2533		 * register, so that needs to be considered in the interrupt
2534		 * handler.
2535		 */
2536		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2537		    ~(MCP5X_INT_IGNORE));
2538	}
2539
2540	/*
2541	 * Allow the driver to program the BM on the first command instead
2542	 * of waiting for an interrupt.
2543	 */
2544#ifdef NCQ
2545	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2546	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2547	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2548	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2549#endif
2550
2551	/*
2552	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2553	 * Enable DMA to take advantage of that.
2554	 *
2555	 */
2556	if ((nvc->nvc_devid > 0x37f) ||
2557	    ((nvc->nvc_devid == 0x37f) && (nvc->nvc_revid >= 0xa3))) {
2558		if (nv_sata_40bit_dma == B_TRUE) {
2559			uint32_t reg32;
2560			NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2561			    "devid is %X revid is %X. 40-bit DMA"
2562			    " addressing enabled", nvc->nvc_devid,
2563			    nvc->nvc_revid);
2564			nvc->dma_40bit = B_TRUE;
2565
2566			reg32 = pci_config_get32(pci_conf_handle,
2567			    NV_SATA_CFG_20);
2568			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2569			    reg32 | NV_40BIT_PRD);
2570
2571			/*
2572			 * CFG_23 bits 0-7 contain the top 8 bits (of 40
2573			 * bits) for the primary PRD table, and bits 8-15
2574			 * contain the top 8 bits for the secondary.  Set
2575			 * to zero because the DMA attribute table for PRD
2576			 * allocation forces it into 32 bit address space
2577			 * anyway.
2578			 */
2579			reg32 = pci_config_get32(pci_conf_handle,
2580			    NV_SATA_CFG_23);
2581			pci_config_put32(pci_conf_handle, NV_SATA_CFG_23,
2582			    reg32 & 0xffff0000);
2583		} else {
2584			NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2585			    "40-bit DMA disabled by nv_sata_40bit_dma", NULL);
2586		}
2587	} else {
2588		nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "devid is %X revid is"
2589		    " %X. Not capable of 40-bit DMA addressing",
2590		    nvc->nvc_devid, nvc->nvc_revid);
2591	}
2592}
2593
2594
2595/*
2596 * Initialize register handling specific to ck804
2597 */
2598static void
2599ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2600{
2601	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2602	uint32_t reg32;
2603	uint16_t reg16;
2604	nv_port_t *nvp;
2605	int j;
2606
2607	/*
2608	 * delay hotplug interrupts until PHYRDY.
2609	 */
2610	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2611	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2612	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2613
2614	/*
2615	 * enable hot plug interrupts for channel x and y
2616	 */
2617	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2618	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2619	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2620	    NV_HIRQ_EN | reg16);
2621
2622
2623	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2624	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2625	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2626	    NV_HIRQ_EN | reg16);
2627
2628	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2629
2630	/*
2631	 * clear any existing interrupt pending then enable
2632	 */
2633	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2634		nvp = &(nvc->nvc_port[j]);
2635		mutex_enter(&nvp->nvp_mutex);
2636		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2637		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2638		mutex_exit(&nvp->nvp_mutex);
2639	}
2640}
2641
2642
2643/*
2644 * Initialize the controller and set up driver data structures.
2645 * determine if ck804 or mcp5x class.
2646 */
2647static int
2648nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2649{
2650	struct sata_hba_tran stran;
2651	nv_port_t *nvp;
2652	int j;
2653	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2654	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2655	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2656	uint32_t reg32;
2657	uint8_t reg8, reg8_save;
2658
2659	NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl entered", NULL);
2660
2661	nvc->nvc_mcp5x_flag = B_FALSE;
2662
2663	/*
2664	 * Need to set bit 2 to 1 at config offset 0x50
2665	 * to enable access to the bar5 registers.
2666	 */
2667	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2668	if (!(reg32 & NV_BAR5_SPACE_EN)) {
2669		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2670		    reg32 | NV_BAR5_SPACE_EN);
2671	}
2672
2673	/*
2674	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2675	 * task file registers into bar5 while mcp5x won't.  The offset of
2676	 * the task file registers in mcp5x's space is unused, so it will
2677	 * return zero.  So check one of the task file registers to see if it is
2678	 * writable and reads back what was written.  If it's mcp5x it will
2679	 * return back 0xff whereas ck804 will return the value written.
2680	 */
2681	reg8_save = nv_get8(bar5_hdl,
2682	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2683
2684
2685	for (j = 1; j < 3; j++) {
2686
2687		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2688		reg8 = nv_get8(bar5_hdl,
2689		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2690
2691		if (reg8 != j) {
2692			nvc->nvc_mcp5x_flag = B_TRUE;
2693			break;
2694		}
2695	}
2696
2697	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2698
2699	if (nvc->nvc_mcp5x_flag == B_FALSE) {
2700		NVLOG(NVDBG_INIT, nvc, NULL, "controller is CK804/MCP04",
2701		    NULL);
2702		nvc->nvc_interrupt = ck804_intr;
2703		nvc->nvc_reg_init = ck804_reg_init;
2704		nvc->nvc_set_intr = ck804_set_intr;
2705	} else {
2706		NVLOG(NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55/MCP61",
2707		    NULL);
2708		nvc->nvc_interrupt = mcp5x_intr;
2709		nvc->nvc_reg_init = mcp5x_reg_init;
2710		nvc->nvc_set_intr = mcp5x_set_intr;
2711	}
2712
2713
2714	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2715	stran.sata_tran_hba_dip = nvc->nvc_dip;
2716	stran.sata_tran_hba_num_cports = NV_NUM_PORTS;
2717	stran.sata_tran_hba_features_support =
2718	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2719	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2720	stran.sata_tran_probe_port = nv_sata_probe;
2721	stran.sata_tran_start = nv_sata_start;
2722	stran.sata_tran_abort = nv_sata_abort;
2723	stran.sata_tran_reset_dport = nv_sata_reset;
2724	stran.sata_tran_selftest = NULL;
2725	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2726	stran.sata_tran_pwrmgt_ops = NULL;
2727	stran.sata_tran_ioctl = NULL;
2728	nvc->nvc_sata_hba_tran = stran;
2729
2730	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2731	    KM_SLEEP);
2732
2733	/*
2734	 * initialize registers common to all chipsets
2735	 */
2736	nv_common_reg_init(nvc);
2737
2738	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2739		nvp = &(nvc->nvc_port[j]);
2740
2741		cmd_addr = nvp->nvp_cmd_addr;
2742		ctl_addr = nvp->nvp_ctl_addr;
2743		bm_addr = nvp->nvp_bm_addr;
2744
2745		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2746		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2747
2748		cv_init(&nvp->nvp_sync_cv, NULL, CV_DRIVER, NULL);
2749		cv_init(&nvp->nvp_reset_cv, NULL, CV_DRIVER, NULL);
2750
2751		nvp->nvp_data	= cmd_addr + NV_DATA;
2752		nvp->nvp_error	= cmd_addr + NV_ERROR;
2753		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2754		nvp->nvp_count	= cmd_addr + NV_COUNT;
2755		nvp->nvp_sect	= cmd_addr + NV_SECT;
2756		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2757		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2758		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2759		nvp->nvp_status	= cmd_addr + NV_STATUS;
2760		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2761		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2762		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2763
2764		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2765		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2766		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2767
2768		nvp->nvp_state = 0;
2769
2770		/*
2771		 * Initialize dma handles, etc.
2772		 * If it fails, the port is in inactive state.
2773		 */
2774		nv_init_port(nvp);
2775	}
2776
2777	/*
2778	 * initialize register by calling chip specific reg initialization
2779	 */
2780	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2781
2782	/* initialize the hba dma attribute */
2783	if (nvc->dma_40bit == B_TRUE)
2784		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2785		    &buffer_dma_40bit_attr;
2786	else
2787		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2788		    &buffer_dma_attr;
2789
2790	return (NV_SUCCESS);
2791}
2792
2793
2794/*
2795 * Initialize data structures with enough slots to handle queuing, if
2796 * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2797 * NCQ support is built into the driver and enabled.  It might have been
2798 * better to derive the true size from the drive itself, but the sata
2799 * module only sends down that information on the first NCQ command,
2800 * which means possibly re-sizing the structures on an interrupt stack,
2801 * making error handling more messy.  The easy way is to just allocate
2802 * all 32 slots, which is what most drives support anyway.
2803 */
2804static void
2805nv_init_port(nv_port_t *nvp)
2806{
2807	nv_ctl_t *nvc = nvp->nvp_ctlp;
2808	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2809	dev_info_t *dip = nvc->nvc_dip;
2810	ddi_device_acc_attr_t dev_attr;
2811	size_t buf_size;
2812	ddi_dma_cookie_t cookie;
2813	uint_t count;
2814	int rc, i;
2815
2816	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2817	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2818	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2819
2820	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2821	    NV_QUEUE_SLOTS, KM_SLEEP);
2822
2823	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2824	    NV_QUEUE_SLOTS, KM_SLEEP);
2825
2826	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2827	    NV_QUEUE_SLOTS, KM_SLEEP);
2828
2829	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2830	    NV_QUEUE_SLOTS, KM_SLEEP);
2831
2832	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2833	    KM_SLEEP);
2834
2835	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2836
2837		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2838		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2839
2840		if (rc != DDI_SUCCESS) {
2841			nv_uninit_port(nvp);
2842
2843			return;
2844		}
2845
2846		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2847		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2848		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2849		    &(nvp->nvp_sg_acc_hdl[i]));
2850
2851		if (rc != DDI_SUCCESS) {
2852			nv_uninit_port(nvp);
2853
2854			return;
2855		}
2856
2857		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2858		    nvp->nvp_sg_addr[i], buf_size,
2859		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2860		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2861
2862		if (rc != DDI_DMA_MAPPED) {
2863			nv_uninit_port(nvp);
2864
2865			return;
2866		}
2867
2868		ASSERT(count == 1);
2869		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2870
2871		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2872
2873		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2874	}
2875
2876	/*
2877	 * nvp_queue_depth represents the actual drive queue depth, not the
2878	 * number of slots allocated in the structures (which may be more).
2879	 * Actual queue depth is only learned after the first NCQ command, so
2880	 * initialize it to 1 for now.
2881	 */
2882	nvp->nvp_queue_depth = 1;
2883
2884	/*
2885	 * Port is initialized whether the device is attached or not.
2886	 * Link processing and device identification will be started later,
2887	 * after interrupts are initialized.
2888	 */
2889	nvp->nvp_type = SATA_DTYPE_NONE;
2890}
2891
2892
2893/*
2894 * Free dynamically allocated structures for port.
2895 */
2896static void
2897nv_uninit_port(nv_port_t *nvp)
2898{
2899	int i;
2900
2901	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2902	    "nv_uninit_port uninitializing", NULL);
2903
2904#ifdef SGPIO_SUPPORT
2905	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
2906		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2907		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2908	}
2909#endif
2910
2911	nvp->nvp_type = SATA_DTYPE_NONE;
2912
2913	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2914		if (nvp->nvp_sg_paddr[i]) {
2915			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2916		}
2917
2918		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2919			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2920		}
2921
2922		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2923			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2924		}
2925	}
2926
2927	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2928	nvp->nvp_slot = NULL;
2929
2930	kmem_free(nvp->nvp_sg_dma_hdl,
2931	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2932	nvp->nvp_sg_dma_hdl = NULL;
2933
2934	kmem_free(nvp->nvp_sg_acc_hdl,
2935	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2936	nvp->nvp_sg_acc_hdl = NULL;
2937
2938	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2939	nvp->nvp_sg_addr = NULL;
2940
2941	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2942	nvp->nvp_sg_paddr = NULL;
2943}
2944
2945
2946/*
2947 * Cache register offsets and access handles to frequently accessed registers
2948 * which are common to either chipset.
2949 */
2950static void
2951nv_common_reg_init(nv_ctl_t *nvc)
2952{
2953	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2954	uchar_t *bm_addr_offset, *sreg_offset;
2955	uint8_t bar, port;
2956	nv_port_t *nvp;
2957
2958	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2959		if (port == 0) {
2960			bar = NV_BAR_0;
2961			bm_addr_offset = 0;
2962			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2963		} else {
2964			bar = NV_BAR_2;
2965			bm_addr_offset = (uchar_t *)8;
2966			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2967		}
2968
2969		nvp = &(nvc->nvc_port[port]);
2970		nvp->nvp_ctlp = nvc;
2971		nvp->nvp_port_num = port;
2972		NVLOG(NVDBG_INIT, nvc, nvp, "setting up port mappings", NULL);
2973
2974		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2975		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2976		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2977		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2978		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2979		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2980		    (long)bm_addr_offset;
2981
2982		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2983		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2984		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2985		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2986	}
2987}
2988
2989
2990static void
2991nv_uninit_ctl(nv_ctl_t *nvc)
2992{
2993	int port;
2994	nv_port_t *nvp;
2995
2996	NVLOG(NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered", NULL);
2997
2998	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2999		nvp = &(nvc->nvc_port[port]);
3000		mutex_enter(&nvp->nvp_mutex);
3001		NVLOG(NVDBG_INIT, nvc, nvp, "uninitializing port", NULL);
3002		nv_uninit_port(nvp);
3003		mutex_exit(&nvp->nvp_mutex);
3004		mutex_destroy(&nvp->nvp_mutex);
3005		cv_destroy(&nvp->nvp_sync_cv);
3006		cv_destroy(&nvp->nvp_reset_cv);
3007	}
3008
3009	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
3010	nvc->nvc_port = NULL;
3011}
3012
3013
3014/*
3015 * ck804 interrupt.  This is a wrapper around ck804_intr_process so
3016 * that interrupts from other devices can be disregarded while dtracing.
3017 */
3018/* ARGSUSED */
3019static uint_t
3020ck804_intr(caddr_t arg1, caddr_t arg2)
3021{
3022	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3023	uint8_t intr_status;
3024	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3025
3026	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3027		return (DDI_INTR_UNCLAIMED);
3028
3029	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3030
3031	if (intr_status == 0) {
3032
3033		return (DDI_INTR_UNCLAIMED);
3034	}
3035
3036	ck804_intr_process(nvc, intr_status);
3037
3038	return (DDI_INTR_CLAIMED);
3039}
3040
3041
3042/*
3043 * Main interrupt handler for ck804.  handles normal device
3044 * interrupts and hot plug and remove interrupts.
3045 *
3046 */
3047static void
3048ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
3049{
3050
3051	int port, i;
3052	nv_port_t *nvp;
3053	nv_slot_t *nv_slotp;
3054	uchar_t	status;
3055	sata_pkt_t *spkt;
3056	uint8_t bmstatus, clear_bits;
3057	ddi_acc_handle_t bmhdl;
3058	int nvcleared = 0;
3059	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3060	uint32_t sstatus;
3061	int port_mask_hot[] = {
3062		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
3063	};
3064	int port_mask_pm[] = {
3065		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
3066	};
3067
3068	NVLOG(NVDBG_INTR, nvc, NULL,
3069	    "ck804_intr_process entered intr_status=%x", intr_status);
3070
3071	/*
3072	 * For command completion interrupt, explicit clear is not required.
3073	 * however, for the error cases explicit clear is performed.
3074	 */
3075	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3076
3077		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
3078
3079		if ((port_mask[port] & intr_status) == 0) {
3080
3081			continue;
3082		}
3083
3084		NVLOG(NVDBG_INTR, nvc, NULL,
3085		    "ck804_intr_process interrupt on port %d", port);
3086
3087		nvp = &(nvc->nvc_port[port]);
3088
3089		mutex_enter(&nvp->nvp_mutex);
3090
3091		/*
3092		 * this case might be encountered when the other port
3093		 * is active
3094		 */
3095		if (nvp->nvp_state & NV_DEACTIVATED) {
3096
3097			/*
3098			 * clear interrupt bits
3099			 */
3100			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3101			    port_mask[port]);
3102
3103			mutex_exit(&nvp->nvp_mutex);
3104
3105			continue;
3106		}
3107
3108
3109		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
3110			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3111			NVLOG(NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3112			    " no command in progress status=%x", status);
3113			mutex_exit(&nvp->nvp_mutex);
3114
3115			/*
3116			 * clear interrupt bits
3117			 */
3118			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3119			    port_mask[port]);
3120
3121			continue;
3122		}
3123
3124		bmhdl = nvp->nvp_bm_hdl;
3125		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3126
3127		if (!(bmstatus & BMISX_IDEINTS)) {
3128			mutex_exit(&nvp->nvp_mutex);
3129
3130			continue;
3131		}
3132
3133		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3134
3135		if (status & SATA_STATUS_BSY) {
3136			mutex_exit(&nvp->nvp_mutex);
3137
3138			continue;
3139		}
3140
3141		nv_slotp = &(nvp->nvp_slot[0]);
3142
3143		ASSERT(nv_slotp);
3144
3145		spkt = nv_slotp->nvslot_spkt;
3146
3147		if (spkt == NULL) {
3148			mutex_exit(&nvp->nvp_mutex);
3149
3150			continue;
3151		}
3152
3153		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3154
3155		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3156
3157		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3158
3159			nv_complete_io(nvp, spkt, 0);
3160		}
3161
3162		mutex_exit(&nvp->nvp_mutex);
3163	}
3164
3165	/*
3166	 * ck804 often doesn't correctly distinguish hot add/remove
3167	 * interrupts.  Frequently both the ADD and the REMOVE bits
3168	 * are asserted, whether it was a remove or add.  Use sstatus
3169	 * to distinguish hot add from hot remove.
3170	 */
3171
3172	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3173		clear_bits = 0;
3174
3175		nvp = &(nvc->nvc_port[port]);
3176		mutex_enter(&nvp->nvp_mutex);
3177
3178		if ((port_mask_pm[port] & intr_status) != 0) {
3179			clear_bits = port_mask_pm[port];
3180			NVLOG(NVDBG_HOT, nvc, nvp,
3181			    "clearing PM interrupt bit: %x",
3182			    intr_status & port_mask_pm[port]);
3183		}
3184
3185		if ((port_mask_hot[port] & intr_status) == 0) {
3186			if (clear_bits != 0) {
3187				goto clear;
3188			} else {
3189				mutex_exit(&nvp->nvp_mutex);
3190				continue;
3191			}
3192		}
3193
3194		/*
3195		 * reaching here means there was a hot add or remove.
3196		 */
3197		clear_bits |= port_mask_hot[port];
3198
3199		ASSERT(nvc->nvc_port[port].nvp_sstatus);
3200
3201		sstatus = nv_get32(bar5_hdl,
3202		    nvc->nvc_port[port].nvp_sstatus);
3203
3204		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
3205		    SSTATUS_DET_DEVPRE_PHYCOM) {
3206			nv_link_event(nvp, NV_REM_DEV);
3207		} else {
3208			nv_link_event(nvp, NV_ADD_DEV);
3209		}
3210	clear:
3211		/*
3212		 * clear interrupt bits.  explicit interrupt clear is
3213		 * required for hotplug interrupts.
3214		 */
3215		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
3216
3217		/*
3218		 * make sure it's flushed and cleared.  If not try
3219		 * again.  Sometimes it has been observed to not clear
3220		 * on the first try.
3221		 */
3222		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3223
3224		/*
3225		 * make 10 additional attempts to clear the interrupt
3226		 */
3227		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
3228			NVLOG(NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
3229			    "still not clear try=%d", intr_status,
3230			    ++nvcleared);
3231			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3232			    clear_bits);
3233			intr_status = nv_get8(bar5_hdl,
3234			    nvc->nvc_ck804_int_status);
3235		}
3236
3237		/*
3238		 * if still not clear, log a message and disable the
3239		 * port. highly unlikely that this path is taken, but it
3240		 * gives protection against a wedged interrupt.
3241		 */
3242		if (intr_status & clear_bits) {
3243			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3244			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3245			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3246			nvp->nvp_state |= NV_FAILED;
3247			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3248			    B_TRUE);
3249			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
3250			    "interrupt.  disabling port intr_status=%X",
3251			    intr_status);
3252		}
3253
3254		mutex_exit(&nvp->nvp_mutex);
3255	}
3256}
3257
3258
3259/*
3260 * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
3261 * on the controller, to handle completion and hot plug and remove events.
3262 */
3263static uint_t
3264mcp5x_intr_port(nv_port_t *nvp)
3265{
3266	nv_ctl_t *nvc = nvp->nvp_ctlp;
3267	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3268	uint8_t clear = 0, intr_cycles = 0;
3269	int ret = DDI_INTR_UNCLAIMED;
3270	uint16_t int_status;
3271	clock_t intr_time;
3272	int loop_cnt = 0;
3273
3274	nvp->intr_start_time = ddi_get_lbolt();
3275
3276	NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered", NULL);
3277
3278	do {
3279		/*
3280		 * read current interrupt status
3281		 */
3282		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3283
3284		/*
3285		 * if the port is deactivated, just clear the interrupt and
3286		 * return.  can get here even if interrupts were disabled
3287		 * on this port but enabled on the other.
3288		 */
3289		if (nvp->nvp_state & NV_DEACTIVATED) {
3290			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3291			    int_status);
3292
3293			return (DDI_INTR_CLAIMED);
3294		}
3295
3296		NVLOG(NVDBG_INTR, nvc, nvp, "int_status = %x", int_status);
3297
3298		DTRACE_PROBE1(int_status_before_h, int, int_status);
3299
3300		/*
3301		 * MCP5X_INT_IGNORE interrupts will show up in the status,
3302		 * but are masked out from causing an interrupt to be generated
3303		 * to the processor.  Ignore them here by masking them out.
3304		 */
3305		int_status &= ~(MCP5X_INT_IGNORE);
3306
3307		DTRACE_PROBE1(int_status_after_h, int, int_status);
3308
3309		/*
3310		 * exit the loop when no more interrupts to process
3311		 */
3312		if (int_status == 0) {
3313
3314			break;
3315		}
3316
3317		if (int_status & MCP5X_INT_COMPLETE) {
3318			NVLOG(NVDBG_INTR, nvc, nvp,
3319			    "mcp5x_packet_complete_intr", NULL);
3320			/*
3321			 * since int_status was set, return DDI_INTR_CLAIMED
3322			 * from the DDI's perspective even though the packet
3323			 * completion may not have succeeded.  If it fails,
3324			 * need to manually clear the interrupt, otherwise
3325			 * clearing is implicit as a result of reading the
3326			 * task file status register.
3327			 */
3328			ret = DDI_INTR_CLAIMED;
3329			if (mcp5x_packet_complete_intr(nvc, nvp) ==
3330			    NV_FAILURE) {
3331				clear |= MCP5X_INT_COMPLETE;
3332			} else {
3333				intr_cycles = 0;
3334			}
3335		}
3336
3337		if (int_status & MCP5X_INT_DMA_SETUP) {
3338			NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr",
3339			    NULL);
3340
3341			/*
3342			 * Needs to be cleared before starting the BM, so do it
3343			 * now.  make sure this is still working.
3344			 */
3345			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3346			    MCP5X_INT_DMA_SETUP);
3347#ifdef NCQ
3348			ret = mcp5x_dma_setup_intr(nvc, nvp);
3349#endif
3350		}
3351
3352		if (int_status & MCP5X_INT_REM) {
3353			clear |= MCP5X_INT_REM;
3354			ret = DDI_INTR_CLAIMED;
3355
3356			mutex_enter(&nvp->nvp_mutex);
3357			nv_link_event(nvp, NV_REM_DEV);
3358			mutex_exit(&nvp->nvp_mutex);
3359
3360		} else if (int_status & MCP5X_INT_ADD) {
3361			clear |= MCP5X_INT_ADD;
3362			ret = DDI_INTR_CLAIMED;
3363
3364			mutex_enter(&nvp->nvp_mutex);
3365			nv_link_event(nvp, NV_ADD_DEV);
3366			mutex_exit(&nvp->nvp_mutex);
3367		}
3368		if (clear) {
3369			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3370			clear = 0;
3371		}
3372
3373		/*
3374		 * protect against a stuck interrupt
3375		 */
3376		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3377
3378			NVLOG(NVDBG_INTR, nvc, nvp, "excessive interrupt "
3379			    "processing.  Disabling interrupts int_status=%X"
3380			    " clear=%X", int_status, clear);
3381			DTRACE_PROBE(excessive_interrupts_f);
3382
3383			mutex_enter(&nvp->nvp_mutex);
3384			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3385			/*
3386			 * reset the device.  If it remains inaccessible
3387			 * after a reset it will be failed then.
3388			 */
3389			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3390			    B_TRUE);
3391			mutex_exit(&nvp->nvp_mutex);
3392		}
3393
3394	} while (loop_cnt++ < nv_max_intr_loops);
3395
3396	if (loop_cnt > nvp->intr_loop_cnt) {
3397		NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp,
3398		    "Exiting with multiple intr loop count %d", loop_cnt);
3399		nvp->intr_loop_cnt = loop_cnt;
3400	}
3401
3402	if ((nv_debug_flags & (NVDBG_INTR | NVDBG_VERBOSE)) ==
3403	    (NVDBG_INTR | NVDBG_VERBOSE)) {
3404		uint8_t status, bmstatus;
3405		uint16_t int_status2;
3406
3407		if (int_status & MCP5X_INT_COMPLETE) {
3408			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3409			bmstatus = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmisx);
3410			int_status2 = nv_get16(nvp->nvp_ctlp->nvc_bar_hdl[5],
3411			    nvp->nvp_mcp5x_int_status);
3412			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
3413			    "mcp55_intr_port: Exiting with altstatus %x, "
3414			    "bmicx %x, int_status2 %X, int_status %X, ret %x,"
3415			    " loop_cnt %d ", status, bmstatus, int_status2,
3416			    int_status, ret, loop_cnt);
3417		}
3418	}
3419
3420	NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret);
3421
3422	/*
3423	 * To facilitate debugging, keep track of the length of time spent in
3424	 * the port interrupt routine.
3425	 */
3426	intr_time = ddi_get_lbolt() - nvp->intr_start_time;
3427	if (intr_time > nvp->intr_duration)
3428		nvp->intr_duration = intr_time;
3429
3430	return (ret);
3431}
3432
3433
3434/* ARGSUSED */
3435static uint_t
3436mcp5x_intr(caddr_t arg1, caddr_t arg2)
3437{
3438	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3439	int ret;
3440
3441	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3442		return (DDI_INTR_UNCLAIMED);
3443
3444	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3445	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3446
3447	return (ret);
3448}
3449
3450
3451#ifdef NCQ
3452/*
3453 * with software driven NCQ on mcp5x, an interrupt occurs right
3454 * before the drive is ready to do a DMA transfer.  At this point,
3455 * the PRD table needs to be programmed and the DMA engine enabled
3456 * and ready to go.
3457 *
3458 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3459 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3460 * -- clear bit 0 of master command reg
3461 * -- program PRD
3462 * -- clear the interrupt status bit for the DMA Setup FIS
3463 * -- set bit 0 of the bus master command register
3464 */
3465static int
3466mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3467{
3468	int slot;
3469	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3470	uint8_t bmicx;
3471	int port = nvp->nvp_port_num;
3472	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3473	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3474
3475	nv_cmn_err(CE_PANIC, nvc, nvp,
3476	    "this is should not be executed at all until NCQ");
3477
3478	mutex_enter(&nvp->nvp_mutex);
3479
3480	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3481
3482	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3483
3484	NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3485	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache);
3486
3487	/*
3488	 * halt the DMA engine.  This step is necessary according to
3489	 * the mcp5x spec, probably since there may have been a "first" packet
3490	 * that already programmed the DMA engine, but may not turn out to
3491	 * be the first one processed.
3492	 */
3493	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3494
3495	if (bmicx & BMICX_SSBM) {
3496		NVLOG(NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3497		    "another packet.  Cancelling and reprogramming", NULL);
3498		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3499	}
3500	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3501
3502	nv_start_dma_engine(nvp, slot);
3503
3504	mutex_exit(&nvp->nvp_mutex);
3505
3506	return (DDI_INTR_CLAIMED);
3507}
3508#endif /* NCQ */
3509
3510
3511/*
3512 * packet completion interrupt.  If the packet is complete, invoke
3513 * the packet completion callback.
3514 */
3515static int
3516mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3517{
3518	uint8_t status, bmstatus;
3519	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3520	int sactive;
3521	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3522	sata_pkt_t *spkt;
3523	nv_slot_t *nv_slotp;
3524
3525	mutex_enter(&nvp->nvp_mutex);
3526
3527	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3528
3529	if (!(bmstatus & (BMISX_IDEINTS | BMISX_IDERR))) {
3530		DTRACE_PROBE1(bmstatus_h, int, bmstatus);
3531		NVLOG(NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set %x",
3532		    bmstatus);
3533		mutex_exit(&nvp->nvp_mutex);
3534
3535		return (NV_FAILURE);
3536	}
3537
3538	/*
3539	 * Commands may have been processed by abort or timeout before
3540	 * interrupt processing acquired the mutex. So we may be processing
3541	 * an interrupt for packets that were already removed.
3542	 * For functioning NCQ processing all slots may be checked, but
3543	 * with NCQ disabled (current code), relying on *_run flags is OK.
3544	 */
3545	if (nvp->nvp_non_ncq_run) {
3546		/*
3547		 * If the just completed item is a non-ncq command, the busy
3548		 * bit should not be set
3549		 */
3550		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3551		if (status & SATA_STATUS_BSY) {
3552			nv_cmn_err(CE_WARN, nvc, nvp,
3553			    "unexpected SATA_STATUS_BSY set");
3554			DTRACE_PROBE(unexpected_status_bsy_p);
3555			mutex_exit(&nvp->nvp_mutex);
3556			/*
3557			 * calling function will clear interrupt.  then
3558			 * the real interrupt will either arrive or the
3559			 * packet timeout handling will take over and
3560			 * reset.
3561			 */
3562			return (NV_FAILURE);
3563		}
3564		ASSERT(nvp->nvp_ncq_run == 0);
3565	} else {
3566		ASSERT(nvp->nvp_non_ncq_run == 0);
3567		/*
3568		 * Pre-NCQ code!
3569		 * Nothing to do. The packet for the command that just
3570		 * completed is already gone. Just clear the interrupt.
3571		 */
3572		(void) nv_bm_status_clear(nvp);
3573		(void) nv_get8(nvp->nvp_cmd_hdl, nvp->nvp_status);
3574		mutex_exit(&nvp->nvp_mutex);
3575		return (NV_SUCCESS);
3576
3577		/*
3578		 * NCQ check for BSY here and wait if still bsy before
3579		 * continuing. Rather than wait for it to be cleared
3580		 * when starting a packet and wasting CPU time, the starting
3581		 * thread can exit immediate, but might have to spin here
3582		 * for a bit possibly.  Needs more work and experimentation.
3583		 *
3584		 */
3585	}
3586
3587	/*
3588	 * active_pkt_bit will represent the bitmap of the single completed
3589	 * packet.  Because of the nature of sw assisted NCQ, only one
3590	 * command will complete per interrupt.
3591	 */
3592
3593	if (ncq_command == B_FALSE) {
3594		active_pkt = 0;
3595	} else {
3596		/*
3597		 * NCQ: determine which command just completed, by examining
3598		 * which bit cleared in the register since last written.
3599		 */
3600		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3601
3602		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3603
3604		ASSERT(active_pkt_bit);
3605
3606
3607		/*
3608		 * this failure path needs more work to handle the
3609		 * error condition and recovery.
3610		 */
3611		if (active_pkt_bit == 0) {
3612			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3613
3614			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3615			    "nvp->nvp_sactive %X", sactive,
3616			    nvp->nvp_sactive_cache);
3617
3618			(void) nv_get8(cmdhdl, nvp->nvp_status);
3619
3620			mutex_exit(&nvp->nvp_mutex);
3621
3622			return (NV_FAILURE);
3623		}
3624
3625		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3626		    active_pkt++, active_pkt_bit >>= 1) {
3627		}
3628
3629		/*
3630		 * make sure only one bit is ever turned on
3631		 */
3632		ASSERT(active_pkt_bit == 1);
3633
3634		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3635	}
3636
3637	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3638
3639	spkt = nv_slotp->nvslot_spkt;
3640
3641	ASSERT(spkt != NULL);
3642
3643	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3644
3645	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3646
3647	if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3648
3649		nv_complete_io(nvp, spkt, active_pkt);
3650	}
3651
3652	mutex_exit(&nvp->nvp_mutex);
3653
3654	return (NV_SUCCESS);
3655}
3656
3657
3658static void
3659nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3660{
3661
3662	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3663
3664	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3665		nvp->nvp_ncq_run--;
3666	} else {
3667		nvp->nvp_non_ncq_run--;
3668	}
3669
3670	/*
3671	 * mark the packet slot idle so it can be reused.  Do this before
3672	 * calling satapkt_comp so the slot can be reused.
3673	 */
3674	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3675
3676	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3677		/*
3678		 * If this is not timed polled mode cmd, which has an
3679		 * active thread monitoring for completion, then need
3680		 * to signal the sleeping thread that the cmd is complete.
3681		 */
3682		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3683			cv_signal(&nvp->nvp_sync_cv);
3684		}
3685
3686		return;
3687	}
3688
3689	if (spkt->satapkt_comp != NULL) {
3690		mutex_exit(&nvp->nvp_mutex);
3691		(*spkt->satapkt_comp)(spkt);
3692		mutex_enter(&nvp->nvp_mutex);
3693	}
3694}
3695
3696
3697/*
3698 * check whether packet is ncq command or not.  for ncq command,
3699 * start it if there is still room on queue.  for non-ncq command only
3700 * start if no other command is running.
3701 */
3702static int
3703nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3704{
3705	uint8_t cmd, ncq;
3706
3707	NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry", NULL);
3708
3709	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3710
3711	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3712	    (cmd == SATAC_READ_FPDMA_QUEUED));
3713
3714	if (ncq == B_FALSE) {
3715
3716		if ((nvp->nvp_non_ncq_run == 1) ||
3717		    (nvp->nvp_ncq_run > 0)) {
3718			/*
3719			 * next command is non-ncq which can't run
3720			 * concurrently.  exit and return queue full.
3721			 */
3722			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3723
3724			return (SATA_TRAN_QUEUE_FULL);
3725		}
3726
3727		return (nv_start_common(nvp, spkt));
3728	}
3729
3730	/*
3731	 * ncq == B_TRUE
3732	 */
3733	if (nvp->nvp_non_ncq_run == 1) {
3734		/*
3735		 * cannot start any NCQ commands when there
3736		 * is a non-NCQ command running.
3737		 */
3738		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3739
3740		return (SATA_TRAN_QUEUE_FULL);
3741	}
3742
3743#ifdef NCQ
3744	/*
3745	 * this is not compiled for now as satapkt_device.satadev_qdepth
3746	 * is being pulled out until NCQ support is later addressed
3747	 *
3748	 * nvp_queue_depth is initialized by the first NCQ command
3749	 * received.
3750	 */
3751	if (nvp->nvp_queue_depth == 1) {
3752		nvp->nvp_queue_depth =
3753		    spkt->satapkt_device.satadev_qdepth;
3754
3755		ASSERT(nvp->nvp_queue_depth > 1);
3756
3757		NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3758		    "nv_process_queue: nvp_queue_depth set to %d",
3759		    nvp->nvp_queue_depth);
3760	}
3761#endif
3762
3763	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3764		/*
3765		 * max number of NCQ commands already active
3766		 */
3767		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3768
3769		return (SATA_TRAN_QUEUE_FULL);
3770	}
3771
3772	return (nv_start_common(nvp, spkt));
3773}
3774
3775
3776/*
3777 * configure INTx and legacy interrupts
3778 */
3779static int
3780nv_add_legacy_intrs(nv_ctl_t *nvc)
3781{
3782	dev_info_t	*devinfo = nvc->nvc_dip;
3783	int		actual, count = 0;
3784	int		x, y, rc, inum = 0;
3785
3786	NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_legacy_intrs", NULL);
3787
3788	/*
3789	 * get number of interrupts
3790	 */
3791	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3792	if ((rc != DDI_SUCCESS) || (count == 0)) {
3793		NVLOG(NVDBG_INIT, nvc, NULL,
3794		    "ddi_intr_get_nintrs() failed, "
3795		    "rc %d count %d", rc, count);
3796
3797		return (DDI_FAILURE);
3798	}
3799
3800	/*
3801	 * allocate an array of interrupt handles
3802	 */
3803	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3804	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3805
3806	/*
3807	 * call ddi_intr_alloc()
3808	 */
3809	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3810	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3811
3812	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3813		nv_cmn_err(CE_WARN, nvc, NULL,
3814		    "ddi_intr_alloc() failed, rc %d", rc);
3815		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3816
3817		return (DDI_FAILURE);
3818	}
3819
3820	if (actual < count) {
3821		nv_cmn_err(CE_WARN, nvc, NULL,
3822		    "ddi_intr_alloc: requested: %d, received: %d",
3823		    count, actual);
3824
3825		goto failure;
3826	}
3827
3828	nvc->nvc_intr_cnt = actual;
3829
3830	/*
3831	 * get intr priority
3832	 */
3833	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3834	    DDI_SUCCESS) {
3835		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3836
3837		goto failure;
3838	}
3839
3840	/*
3841	 * Test for high level mutex
3842	 */
3843	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3844		nv_cmn_err(CE_WARN, nvc, NULL,
3845		    "nv_add_legacy_intrs: high level intr not supported");
3846
3847		goto failure;
3848	}
3849
3850	for (x = 0; x < actual; x++) {
3851		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3852		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3853			nv_cmn_err(CE_WARN, nvc, NULL,
3854			    "ddi_intr_add_handler() failed");
3855
3856			goto failure;
3857		}
3858	}
3859
3860	/*
3861	 * call ddi_intr_enable() for legacy interrupts
3862	 */
3863	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3864		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3865	}
3866
3867	return (DDI_SUCCESS);
3868
3869	failure:
3870	/*
3871	 * free allocated intr and nvc_htable
3872	 */
3873	for (y = 0; y < actual; y++) {
3874		(void) ddi_intr_free(nvc->nvc_htable[y]);
3875	}
3876
3877	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3878
3879	return (DDI_FAILURE);
3880}
3881
3882#ifdef	NV_MSI_SUPPORTED
3883/*
3884 * configure MSI interrupts
3885 */
3886static int
3887nv_add_msi_intrs(nv_ctl_t *nvc)
3888{
3889	dev_info_t	*devinfo = nvc->nvc_dip;
3890	int		count, avail, actual;
3891	int		x, y, rc, inum = 0;
3892
3893	NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_msi_intrs", NULL);
3894
3895	/*
3896	 * get number of interrupts
3897	 */
3898	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3899	if ((rc != DDI_SUCCESS) || (count == 0)) {
3900		nv_cmn_err(CE_WARN, nvc, NULL,
3901		    "ddi_intr_get_nintrs() failed, "
3902		    "rc %d count %d", rc, count);
3903
3904		return (DDI_FAILURE);
3905	}
3906
3907	/*
3908	 * get number of available interrupts
3909	 */
3910	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3911	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3912		nv_cmn_err(CE_WARN, nvc, NULL,
3913		    "ddi_intr_get_navail() failed, "
3914		    "rc %d avail %d", rc, avail);
3915
3916		return (DDI_FAILURE);
3917	}
3918
3919	if (avail < count) {
3920		nv_cmn_err(CE_WARN, nvc, NULL,
3921		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3922		    avail, count);
3923	}
3924
3925	/*
3926	 * allocate an array of interrupt handles
3927	 */
3928	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3929	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3930
3931	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3932	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3933
3934	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3935		nv_cmn_err(CE_WARN, nvc, NULL,
3936		    "ddi_intr_alloc() failed, rc %d", rc);
3937		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3938
3939		return (DDI_FAILURE);
3940	}
3941
3942	/*
3943	 * Use interrupt count returned or abort?
3944	 */
3945	if (actual < count) {
3946		NVLOG(NVDBG_INIT, nvc, NULL,
3947		    "Requested: %d, Received: %d", count, actual);
3948	}
3949
3950	nvc->nvc_intr_cnt = actual;
3951
3952	/*
3953	 * get priority for first msi, assume remaining are all the same
3954	 */
3955	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3956	    DDI_SUCCESS) {
3957		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3958
3959		goto failure;
3960	}
3961
3962	/*
3963	 * test for high level mutex
3964	 */
3965	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3966		nv_cmn_err(CE_WARN, nvc, NULL,
3967		    "nv_add_msi_intrs: high level intr not supported");
3968
3969		goto failure;
3970	}
3971
3972	/*
3973	 * Call ddi_intr_add_handler()
3974	 */
3975	for (x = 0; x < actual; x++) {
3976		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3977		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3978			nv_cmn_err(CE_WARN, nvc, NULL,
3979			    "ddi_intr_add_handler() failed");
3980
3981			goto failure;
3982		}
3983	}
3984
3985	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3986
3987	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3988		(void) ddi_intr_block_enable(nvc->nvc_htable,
3989		    nvc->nvc_intr_cnt);
3990	} else {
3991		/*
3992		 * Call ddi_intr_enable() for MSI non block enable
3993		 */
3994		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3995			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3996		}
3997	}
3998
3999	return (DDI_SUCCESS);
4000
4001	failure:
4002	/*
4003	 * free allocated intr and nvc_htable
4004	 */
4005	for (y = 0; y < actual; y++) {
4006		(void) ddi_intr_free(nvc->nvc_htable[y]);
4007	}
4008
4009	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4010
4011	return (DDI_FAILURE);
4012}
4013#endif
4014
4015
4016static void
4017nv_rem_intrs(nv_ctl_t *nvc)
4018{
4019	int x, i;
4020	nv_port_t *nvp;
4021
4022	NVLOG(NVDBG_INIT, nvc, NULL, "nv_rem_intrs", NULL);
4023
4024	/*
4025	 * prevent controller from generating interrupts by
4026	 * masking them out.  This is an extra precaution.
4027	 */
4028	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
4029		nvp = (&nvc->nvc_port[i]);
4030		mutex_enter(&nvp->nvp_mutex);
4031		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
4032		mutex_exit(&nvp->nvp_mutex);
4033	}
4034
4035	/*
4036	 * disable all interrupts
4037	 */
4038	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
4039	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
4040		(void) ddi_intr_block_disable(nvc->nvc_htable,
4041		    nvc->nvc_intr_cnt);
4042	} else {
4043		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4044			(void) ddi_intr_disable(nvc->nvc_htable[x]);
4045		}
4046	}
4047
4048	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4049		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
4050		(void) ddi_intr_free(nvc->nvc_htable[x]);
4051	}
4052
4053	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4054}
4055
4056
4057/*
4058 * variable argument wrapper for cmn_err.  prefixes the instance and port
4059 * number if possible
4060 */
4061static void
4062nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, va_list ap,
4063    boolean_t log_to_sata_ring)
4064{
4065	char port[NV_STR_LEN];
4066	char inst[NV_STR_LEN];
4067	dev_info_t *dip;
4068
4069	if (nvc) {
4070		(void) snprintf(inst, NV_STR_LEN, "inst%d ",
4071		    ddi_get_instance(nvc->nvc_dip));
4072		dip = nvc->nvc_dip;
4073	} else {
4074		inst[0] = '\0';
4075	}
4076
4077	if (nvp) {
4078		(void) snprintf(port, NV_STR_LEN, "port%d",
4079		    nvp->nvp_port_num);
4080		dip = nvp->nvp_ctlp->nvc_dip;
4081	} else {
4082		port[0] = '\0';
4083	}
4084
4085	mutex_enter(&nv_log_mutex);
4086
4087	(void) sprintf(nv_log_buf, "%s%s%s", inst, port,
4088	    (inst[0]|port[0] ? ": " :""));
4089
4090	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4091	    NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4092
4093	/*
4094	 * Log to console or log to file, depending on
4095	 * nv_log_to_console setting.
4096	 */
4097	if (nv_log_to_console) {
4098		if (nv_prom_print) {
4099			prom_printf("%s\n", nv_log_buf);
4100		} else {
4101			cmn_err(ce, "%s\n", nv_log_buf);
4102		}
4103	} else {
4104		cmn_err(ce, "!%s", nv_log_buf);
4105	}
4106
4107	if (log_to_sata_ring == B_TRUE) {
4108		(void) sprintf(nv_log_buf, "%s%s", port, (port[0] ? ": " :""));
4109
4110		(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4111		    NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4112
4113		sata_trace_debug(dip, nv_log_buf);
4114	}
4115
4116	mutex_exit(&nv_log_mutex);
4117}
4118
4119
4120/*
4121 * wrapper for cmn_err
4122 */
4123static void
4124nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4125{
4126	va_list ap;
4127
4128	va_start(ap, fmt);
4129	nv_vcmn_err(ce, nvc, nvp, fmt, ap, B_TRUE);
4130	va_end(ap);
4131}
4132
4133
4134static void
4135nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...)
4136{
4137	va_list ap;
4138
4139	if (nv_log_to_cmn_err == B_TRUE) {
4140		va_start(ap, fmt);
4141		nv_vcmn_err(CE_CONT, nvc, nvp, fmt, ap, B_FALSE);
4142		va_end(ap);
4143
4144	}
4145
4146	va_start(ap, fmt);
4147
4148	if (nvp == NULL && nvc == NULL) {
4149		sata_vtrace_debug(NULL, fmt, ap);
4150		va_end(ap);
4151
4152		return;
4153	}
4154
4155	if (nvp == NULL && nvc != NULL) {
4156		sata_vtrace_debug(nvc->nvc_dip, fmt, ap);
4157		va_end(ap);
4158
4159		return;
4160	}
4161
4162	/*
4163	 * nvp is not NULL, but nvc might be.  Reference nvp for both
4164	 * port and dip, to get the port number prefixed on the
4165	 * message.
4166	 */
4167	mutex_enter(&nv_log_mutex);
4168
4169	(void) snprintf(nv_log_buf, NV_LOGBUF_LEN, "port%d: %s",
4170	    nvp->nvp_port_num, fmt);
4171
4172	sata_vtrace_debug(nvp->nvp_ctlp->nvc_dip, nv_log_buf, ap);
4173
4174	mutex_exit(&nv_log_mutex);
4175
4176	va_end(ap);
4177}
4178
4179
4180/*
4181 * program registers which are common to all commands
4182 */
4183static void
4184nv_program_taskfile_regs(nv_port_t *nvp, int slot)
4185{
4186	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4187	sata_pkt_t *spkt;
4188	sata_cmd_t *satacmd;
4189	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4190	uint8_t cmd, ncq = B_FALSE;
4191
4192	spkt = nv_slotp->nvslot_spkt;
4193	satacmd = &spkt->satapkt_cmd;
4194	cmd = satacmd->satacmd_cmd_reg;
4195
4196	ASSERT(nvp->nvp_slot);
4197
4198	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4199	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4200		ncq = B_TRUE;
4201	}
4202
4203	/*
4204	 * select the drive
4205	 */
4206	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4207
4208	/*
4209	 * make certain the drive selected
4210	 */
4211	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4212	    NV_SEC2USEC(5), 0) == B_FALSE) {
4213
4214		return;
4215	}
4216
4217	switch (spkt->satapkt_cmd.satacmd_addr_type) {
4218
4219	case ATA_ADDR_LBA:
4220		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode",
4221		    NULL);
4222
4223		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4224		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4225		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4226		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4227		nv_put8(cmdhdl, nvp->nvp_feature,
4228		    satacmd->satacmd_features_reg);
4229
4230
4231		break;
4232
4233	case ATA_ADDR_LBA28:
4234		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4235		    "ATA_ADDR_LBA28 mode", NULL);
4236		/*
4237		 * NCQ only uses 48-bit addressing
4238		 */
4239		ASSERT(ncq != B_TRUE);
4240
4241		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4242		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4243		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4244		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4245		nv_put8(cmdhdl, nvp->nvp_feature,
4246		    satacmd->satacmd_features_reg);
4247
4248		break;
4249
4250	case ATA_ADDR_LBA48:
4251		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4252		    "ATA_ADDR_LBA48 mode", NULL);
4253
4254		/*
4255		 * for NCQ, tag goes into count register and real sector count
4256		 * into features register.  The sata module does the translation
4257		 * in the satacmd.
4258		 */
4259		if (ncq == B_TRUE) {
4260			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
4261		} else {
4262			nv_put8(cmdhdl, nvp->nvp_count,
4263			    satacmd->satacmd_sec_count_msb);
4264			nv_put8(cmdhdl, nvp->nvp_count,
4265			    satacmd->satacmd_sec_count_lsb);
4266		}
4267
4268		nv_put8(cmdhdl, nvp->nvp_feature,
4269		    satacmd->satacmd_features_reg_ext);
4270		nv_put8(cmdhdl, nvp->nvp_feature,
4271		    satacmd->satacmd_features_reg);
4272
4273		/*
4274		 * send the high-order half first
4275		 */
4276		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
4277		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
4278		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
4279
4280		/*
4281		 * Send the low-order half
4282		 */
4283		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4284		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4285		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4286
4287		break;
4288
4289	case 0:
4290		/*
4291		 * non-media access commands such as identify and features
4292		 * take this path.
4293		 */
4294		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4295		nv_put8(cmdhdl, nvp->nvp_feature,
4296		    satacmd->satacmd_features_reg);
4297		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4298		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4299		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4300
4301		break;
4302
4303	default:
4304		break;
4305	}
4306
4307	ASSERT(nvp->nvp_slot);
4308}
4309
4310
4311/*
4312 * start a command that involves no media access
4313 */
4314static int
4315nv_start_nodata(nv_port_t *nvp, int slot)
4316{
4317	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4318	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4319	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4320	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4321
4322	nv_program_taskfile_regs(nvp, slot);
4323
4324	/*
4325	 * This next one sets the controller in motion
4326	 */
4327	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
4328
4329	return (SATA_TRAN_ACCEPTED);
4330}
4331
4332
4333static int
4334nv_bm_status_clear(nv_port_t *nvp)
4335{
4336	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4337	uchar_t	status, ret;
4338
4339	/*
4340	 * Get the current BM status
4341	 */
4342	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
4343
4344	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
4345
4346	/*
4347	 * Clear the latches (and preserve the other bits)
4348	 */
4349	nv_put8(bmhdl, nvp->nvp_bmisx, status);
4350
4351	return (ret);
4352}
4353
4354
4355/*
4356 * program the bus master DMA engine with the PRD address for
4357 * the active slot command, and start the DMA engine.
4358 */
4359static void
4360nv_start_dma_engine(nv_port_t *nvp, int slot)
4361{
4362	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4363	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4364	uchar_t direction;
4365
4366	ASSERT(nv_slotp->nvslot_spkt != NULL);
4367
4368	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4369	    == SATA_DIR_READ) {
4370		direction = BMICX_RWCON_WRITE_TO_MEMORY;
4371	} else {
4372		direction = BMICX_RWCON_READ_FROM_MEMORY;
4373	}
4374
4375	NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4376	    "nv_start_dma_engine entered", NULL);
4377
4378#if NOT_USED
4379	/*
4380	 * NOT NEEDED. Left here of historical reason.
4381	 * Reset the controller's interrupt and error status bits.
4382	 */
4383	(void) nv_bm_status_clear(nvp);
4384#endif
4385	/*
4386	 * program the PRD table physical start address
4387	 */
4388	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4389
4390	/*
4391	 * set the direction control and start the DMA controller
4392	 */
4393	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4394}
4395
4396/*
4397 * start dma command, either in or out
4398 */
4399static int
4400nv_start_dma(nv_port_t *nvp, int slot)
4401{
4402	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4403	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4404	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4405	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4406	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4407#ifdef NCQ
4408	uint8_t ncq = B_FALSE;
4409#endif
4410	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4411	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4412	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4413	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
4414
4415	ASSERT(sg_count != 0);
4416
4417	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4418		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4419		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4420		    sata_cmdp->satacmd_num_dma_cookies);
4421
4422		return (NV_FAILURE);
4423	}
4424
4425	nv_program_taskfile_regs(nvp, slot);
4426
4427	/*
4428	 * start the drive in motion
4429	 */
4430	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4431
4432	/*
4433	 * the drive starts processing the transaction when the cmd register
4434	 * is written.  This is done here before programming the DMA engine to
4435	 * parallelize and save some time.  In the event that the drive is ready
4436	 * before DMA, it will wait.
4437	 */
4438#ifdef NCQ
4439	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4440	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4441		ncq = B_TRUE;
4442	}
4443#endif
4444
4445	/*
4446	 * copy the PRD list to PRD table in DMA accessible memory
4447	 * so that the controller can access it.
4448	 */
4449	for (idx = 0; idx < sg_count; idx++, srcp++) {
4450		uint32_t size;
4451
4452		nv_put32(sghdl, dstp++, srcp->dmac_address);
4453
4454		/* Set the number of bytes to transfer, 0 implies 64KB */
4455		size = srcp->dmac_size;
4456		if (size == 0x10000)
4457			size = 0;
4458
4459		/*
4460		 * If this is a 40-bit address, copy bits 32-40 of the
4461		 * physical address to bits 16-24 of the PRD count.
4462		 */
4463		if (srcp->dmac_laddress > UINT32_MAX) {
4464			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4465		}
4466
4467		/*
4468		 * set the end of table flag for the last entry
4469		 */
4470		if (idx == (sg_count - 1)) {
4471			size |= PRDE_EOT;
4472		}
4473
4474		nv_put32(sghdl, dstp++, size);
4475	}
4476
4477	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4478	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4479
4480	nv_start_dma_engine(nvp, slot);
4481
4482#ifdef NCQ
4483	/*
4484	 * optimization:  for SWNCQ, start DMA engine if this is the only
4485	 * command running.  Preliminary NCQ efforts indicated this needs
4486	 * more debugging.
4487	 *
4488	 * if (nvp->nvp_ncq_run <= 1)
4489	 */
4490
4491	if (ncq == B_FALSE) {
4492		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4493		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4494		    " cmd = %X", non_ncq_commands++, cmd);
4495		nv_start_dma_engine(nvp, slot);
4496	} else {
4497		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "NCQ, so program "
4498		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd);
4499	}
4500#endif /* NCQ */
4501
4502	return (SATA_TRAN_ACCEPTED);
4503}
4504
4505
4506/*
4507 * start a PIO data-in ATA command
4508 */
4509static int
4510nv_start_pio_in(nv_port_t *nvp, int slot)
4511{
4512
4513	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4514	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4515	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4516
4517	nv_program_taskfile_regs(nvp, slot);
4518
4519	/*
4520	 * This next one sets the drive in motion
4521	 */
4522	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4523
4524	return (SATA_TRAN_ACCEPTED);
4525}
4526
4527
4528/*
4529 * start a PIO data-out ATA command
4530 */
4531static int
4532nv_start_pio_out(nv_port_t *nvp, int slot)
4533{
4534	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4535	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4536	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4537
4538	nv_program_taskfile_regs(nvp, slot);
4539
4540	/*
4541	 * this next one sets the drive in motion
4542	 */
4543	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4544
4545	/*
4546	 * wait for the busy bit to settle
4547	 */
4548	NV_DELAY_NSEC(400);
4549
4550	/*
4551	 * wait for the drive to assert DRQ to send the first chunk
4552	 * of data. Have to busy wait because there's no interrupt for
4553	 * the first chunk. This is bad... uses a lot of cycles if the
4554	 * drive responds too slowly or if the wait loop granularity
4555	 * is too large. It's even worse if the drive is defective and
4556	 * the loop times out.
4557	 */
4558	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4559	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4560	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4561	    4000000, 0) == B_FALSE) {
4562		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4563
4564		goto error;
4565	}
4566
4567	/*
4568	 * send the first block.
4569	 */
4570	nv_intr_pio_out(nvp, nv_slotp);
4571
4572	/*
4573	 * If nvslot_flags is not set to COMPLETE yet, then processing
4574	 * is OK so far, so return.  Otherwise, fall into error handling
4575	 * below.
4576	 */
4577	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4578
4579		return (SATA_TRAN_ACCEPTED);
4580	}
4581
4582	error:
4583	/*
4584	 * there was an error so reset the device and complete the packet.
4585	 */
4586	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4587	nv_complete_io(nvp, spkt, 0);
4588	nv_reset(nvp, "pio_out");
4589
4590	return (SATA_TRAN_PORT_ERROR);
4591}
4592
4593
4594/*
4595 * start a ATAPI Packet command (PIO data in or out)
4596 */
4597static int
4598nv_start_pkt_pio(nv_port_t *nvp, int slot)
4599{
4600	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4601	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4602	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4603	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4604
4605	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4606	    "nv_start_pkt_pio: start", NULL);
4607
4608	/*
4609	 * Write the PACKET command to the command register.  Normally
4610	 * this would be done through nv_program_taskfile_regs().  It
4611	 * is done here because some values need to be overridden.
4612	 */
4613
4614	/* select the drive */
4615	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4616
4617	/* make certain the drive selected */
4618	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4619	    NV_SEC2USEC(5), 0) == B_FALSE) {
4620		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4621		    "nv_start_pkt_pio: drive select failed", NULL);
4622		return (SATA_TRAN_PORT_ERROR);
4623	}
4624
4625	/*
4626	 * The command is always sent via PIO, despite whatever the SATA
4627	 * common module sets in the command.  Overwrite the DMA bit to do this.
4628	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4629	 */
4630	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4631
4632	/* set appropriately by the sata common module */
4633	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4634	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4635	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4636	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4637
4638	/* initiate the command by writing the command register last */
4639	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4640
4641	/* Give the host controller time to do its thing */
4642	NV_DELAY_NSEC(400);
4643
4644	/*
4645	 * Wait for the device to indicate that it is ready for the command
4646	 * ATAPI protocol state - HP0: Check_Status_A
4647	 */
4648
4649	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4650	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4651	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4652	    4000000, 0) == B_FALSE) {
4653		/*
4654		 * Either an error or device fault occurred or the wait
4655		 * timed out.  According to the ATAPI protocol, command
4656		 * completion is also possible.  Other implementations of
4657		 * this protocol don't handle this last case, so neither
4658		 * does this code.
4659		 */
4660
4661		if (nv_get8(cmdhdl, nvp->nvp_status) &
4662		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4663			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4664
4665			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4666			    "nv_start_pkt_pio: device error (HP0)", NULL);
4667		} else {
4668			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4669
4670			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4671			    "nv_start_pkt_pio: timeout (HP0)", NULL);
4672		}
4673
4674		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4675		nv_complete_io(nvp, spkt, 0);
4676		nv_reset(nvp, "start_pkt_pio");
4677
4678		return (SATA_TRAN_PORT_ERROR);
4679	}
4680
4681	/*
4682	 * Put the ATAPI command in the data register
4683	 * ATAPI protocol state - HP1: Send_Packet
4684	 */
4685
4686	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4687	    (ushort_t *)nvp->nvp_data,
4688	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4689
4690	/*
4691	 * See you in nv_intr_pkt_pio.
4692	 * ATAPI protocol state - HP3: INTRQ_wait
4693	 */
4694
4695	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4696	    "nv_start_pkt_pio: exiting into HP3", NULL);
4697
4698	return (SATA_TRAN_ACCEPTED);
4699}
4700
4701
4702/*
4703 * Interrupt processing for a non-data ATA command.
4704 */
4705static void
4706nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4707{
4708	uchar_t status;
4709	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4710	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4711	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4712	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4713
4714	NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered", NULL);
4715
4716	status = nv_get8(cmdhdl, nvp->nvp_status);
4717
4718	/*
4719	 * check for errors
4720	 */
4721	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4722		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4723		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4724		    nvp->nvp_altstatus);
4725		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4726	} else {
4727		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4728	}
4729
4730	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4731}
4732
4733
4734/*
4735 * ATA command, PIO data in
4736 */
4737static void
4738nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4739{
4740	uchar_t	status;
4741	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4742	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4743	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4744	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4745	int count;
4746
4747	status = nv_get8(cmdhdl, nvp->nvp_status);
4748
4749	if (status & SATA_STATUS_BSY) {
4750		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4751		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4752		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4753		    nvp->nvp_altstatus);
4754		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4755		nv_reset(nvp, "intr_pio_in");
4756
4757		return;
4758	}
4759
4760	/*
4761	 * check for errors
4762	 */
4763	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4764	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4765		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4766		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4767		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4768
4769		return;
4770	}
4771
4772	/*
4773	 * read the next chunk of data (if any)
4774	 */
4775	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4776
4777	/*
4778	 * read count bytes
4779	 */
4780	ASSERT(count != 0);
4781
4782	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4783	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4784
4785	nv_slotp->nvslot_v_addr += count;
4786	nv_slotp->nvslot_byte_count -= count;
4787
4788
4789	if (nv_slotp->nvslot_byte_count != 0) {
4790		/*
4791		 * more to transfer.  Wait for next interrupt.
4792		 */
4793		return;
4794	}
4795
4796	/*
4797	 * transfer is complete. wait for the busy bit to settle.
4798	 */
4799	NV_DELAY_NSEC(400);
4800
4801	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4802	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4803}
4804
4805
4806/*
4807 * ATA command PIO data out
4808 */
4809static void
4810nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4811{
4812	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4813	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4814	uchar_t status;
4815	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4816	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4817	int count;
4818
4819	/*
4820	 * clear the IRQ
4821	 */
4822	status = nv_get8(cmdhdl, nvp->nvp_status);
4823
4824	if (status & SATA_STATUS_BSY) {
4825		/*
4826		 * this should not happen
4827		 */
4828		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4829		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4830		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4831		    nvp->nvp_altstatus);
4832		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4833
4834		return;
4835	}
4836
4837	/*
4838	 * check for errors
4839	 */
4840	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4841		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4842		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4843		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4844
4845		return;
4846	}
4847
4848	/*
4849	 * this is the condition which signals the drive is
4850	 * no longer ready to transfer.  Likely that the transfer
4851	 * completed successfully, but check that byte_count is
4852	 * zero.
4853	 */
4854	if ((status & SATA_STATUS_DRQ) == 0) {
4855
4856		if (nv_slotp->nvslot_byte_count == 0) {
4857			/*
4858			 * complete; successful transfer
4859			 */
4860			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4861		} else {
4862			/*
4863			 * error condition, incomplete transfer
4864			 */
4865			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4866			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4867		}
4868		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4869
4870		return;
4871	}
4872
4873	/*
4874	 * write the next chunk of data
4875	 */
4876	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4877
4878	/*
4879	 * read or write count bytes
4880	 */
4881
4882	ASSERT(count != 0);
4883
4884	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4885	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4886
4887	nv_slotp->nvslot_v_addr += count;
4888	nv_slotp->nvslot_byte_count -= count;
4889}
4890
4891
4892/*
4893 * ATAPI PACKET command, PIO in/out interrupt
4894 *
4895 * Under normal circumstances, one of four different interrupt scenarios
4896 * will result in this function being called:
4897 *
4898 * 1. Packet command data transfer
4899 * 2. Packet command completion
4900 * 3. Request sense data transfer
4901 * 4. Request sense command completion
4902 */
4903static void
4904nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4905{
4906	uchar_t	status;
4907	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4908	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4909	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4910	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4911	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4912	uint16_t ctlr_count;
4913	int count;
4914
4915	/* ATAPI protocol state - HP2: Check_Status_B */
4916
4917	status = nv_get8(cmdhdl, nvp->nvp_status);
4918	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4919	    "nv_intr_pkt_pio: status 0x%x", status);
4920
4921	if (status & SATA_STATUS_BSY) {
4922		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4923			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4924			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4925		} else {
4926			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4927			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4928			nv_reset(nvp, "intr_pkt_pio");
4929		}
4930
4931		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4932		    "nv_intr_pkt_pio: busy - status 0x%x", status);
4933
4934		return;
4935	}
4936
4937	if ((status & SATA_STATUS_DF) != 0) {
4938		/*
4939		 * On device fault, just clean up and bail.  Request sense
4940		 * will just default to its NO SENSE initialized value.
4941		 */
4942
4943		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4944			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4945		}
4946
4947		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4948		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4949
4950		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4951		    nvp->nvp_altstatus);
4952		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4953		    nvp->nvp_error);
4954
4955		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4956		    "nv_intr_pkt_pio: device fault", NULL);
4957
4958		return;
4959	}
4960
4961	if ((status & SATA_STATUS_ERR) != 0) {
4962		/*
4963		 * On command error, figure out whether we are processing a
4964		 * request sense.  If so, clean up and bail.  Otherwise,
4965		 * do a REQUEST SENSE.
4966		 */
4967
4968		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4969			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4970			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4971			    NV_FAILURE) {
4972				nv_copy_registers(nvp, &spkt->satapkt_device,
4973				    spkt);
4974				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4975				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4976			}
4977
4978			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4979			    nvp->nvp_altstatus);
4980			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4981			    nvp->nvp_error);
4982		} else {
4983			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4984			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4985
4986			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4987		}
4988
4989		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4990		    "nv_intr_pkt_pio: error (status 0x%x)", status);
4991
4992		return;
4993	}
4994
4995	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4996		/*
4997		 * REQUEST SENSE command processing
4998		 */
4999
5000		if ((status & (SATA_STATUS_DRQ)) != 0) {
5001			/* ATAPI state - HP4: Transfer_Data */
5002
5003			/* read the byte count from the controller */
5004			ctlr_count =
5005			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5006			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5007
5008			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5009			    "nv_intr_pkt_pio: ctlr byte count - %d",
5010			    ctlr_count);
5011
5012			if (ctlr_count == 0) {
5013				/* no data to transfer - some devices do this */
5014
5015				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5016				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5017
5018				NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5019				    "nv_intr_pkt_pio: done (no data)", NULL);
5020
5021				return;
5022			}
5023
5024			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
5025
5026			/* transfer the data */
5027			ddi_rep_get16(cmdhdl,
5028			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
5029			    (ushort_t *)nvp->nvp_data, (count >> 1),
5030			    DDI_DEV_NO_AUTOINCR);
5031
5032			/* consume residual bytes */
5033			ctlr_count -= count;
5034
5035			if (ctlr_count > 0) {
5036				for (; ctlr_count > 0; ctlr_count -= 2)
5037					(void) ddi_get16(cmdhdl,
5038					    (ushort_t *)nvp->nvp_data);
5039			}
5040
5041			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5042			    "nv_intr_pkt_pio: transition to HP2", NULL);
5043		} else {
5044			/* still in ATAPI state - HP2 */
5045
5046			/*
5047			 * In order to avoid clobbering the rqsense data
5048			 * set by the SATA common module, the sense data read
5049			 * from the device is put in a separate buffer and
5050			 * copied into the packet after the request sense
5051			 * command successfully completes.
5052			 */
5053			bcopy(nv_slotp->nvslot_rqsense_buff,
5054			    spkt->satapkt_cmd.satacmd_rqsense,
5055			    SATA_ATAPI_RQSENSE_LEN);
5056
5057			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5058			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5059
5060			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5061			    "nv_intr_pkt_pio: request sense done", NULL);
5062		}
5063
5064		return;
5065	}
5066
5067	/*
5068	 * Normal command processing
5069	 */
5070
5071	if ((status & (SATA_STATUS_DRQ)) != 0) {
5072		/* ATAPI protocol state - HP4: Transfer_Data */
5073
5074		/* read the byte count from the controller */
5075		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5076		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5077
5078		if (ctlr_count == 0) {
5079			/* no data to transfer - some devices do this */
5080
5081			spkt->satapkt_reason = SATA_PKT_COMPLETED;
5082			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5083
5084			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5085			    "nv_intr_pkt_pio: done (no data)", NULL);
5086
5087			return;
5088		}
5089
5090		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
5091
5092		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5093		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count);
5094
5095		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5096		    "nv_intr_pkt_pio: byte_count 0x%x",
5097		    nv_slotp->nvslot_byte_count);
5098
5099		/* transfer the data */
5100
5101		if (direction == SATA_DIR_READ) {
5102			ddi_rep_get16(cmdhdl,
5103			    (ushort_t *)nv_slotp->nvslot_v_addr,
5104			    (ushort_t *)nvp->nvp_data, (count >> 1),
5105			    DDI_DEV_NO_AUTOINCR);
5106
5107			ctlr_count -= count;
5108
5109			if (ctlr_count > 0) {
5110				/* consume remaining bytes */
5111
5112				for (; ctlr_count > 0;
5113				    ctlr_count -= 2)
5114					(void) ddi_get16(cmdhdl,
5115					    (ushort_t *)nvp->nvp_data);
5116
5117				NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5118				    "nv_intr_pkt_pio: bytes remained", NULL);
5119			}
5120		} else {
5121			ddi_rep_put16(cmdhdl,
5122			    (ushort_t *)nv_slotp->nvslot_v_addr,
5123			    (ushort_t *)nvp->nvp_data, (count >> 1),
5124			    DDI_DEV_NO_AUTOINCR);
5125		}
5126
5127		nv_slotp->nvslot_v_addr += count;
5128		nv_slotp->nvslot_byte_count -= count;
5129
5130		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5131		    "nv_intr_pkt_pio: transition to HP2", NULL);
5132	} else {
5133		/* still in ATAPI state - HP2 */
5134
5135		spkt->satapkt_reason = SATA_PKT_COMPLETED;
5136		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5137
5138		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5139		    "nv_intr_pkt_pio: done", NULL);
5140	}
5141}
5142
5143
5144/*
5145 * ATA command, DMA data in/out
5146 */
5147static void
5148nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
5149{
5150	uchar_t status;
5151	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5152	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
5153	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5154	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5155	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
5156	uchar_t	bmicx;
5157	uchar_t bm_status;
5158
5159	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5160
5161	/*
5162	 * stop DMA engine.
5163	 */
5164	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
5165	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
5166
5167	/*
5168	 * get the status and clear the IRQ, and check for DMA error
5169	 */
5170	status = nv_get8(cmdhdl, nvp->nvp_status);
5171
5172	/*
5173	 * check for drive errors
5174	 */
5175	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
5176		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5177		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5178		(void) nv_bm_status_clear(nvp);
5179
5180		return;
5181	}
5182
5183	bm_status = nv_bm_status_clear(nvp);
5184
5185	/*
5186	 * check for bus master errors
5187	 */
5188
5189	if (bm_status & BMISX_IDERR) {
5190		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
5191		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
5192		    nvp->nvp_altstatus);
5193		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5194		nv_reset(nvp, "intr_dma");
5195
5196		return;
5197	}
5198
5199	spkt->satapkt_reason = SATA_PKT_COMPLETED;
5200}
5201
5202
5203/*
5204 * Wait for a register of a controller to achieve a specific state.
5205 * To return normally, all the bits in the first sub-mask must be ON,
5206 * all the bits in the second sub-mask must be OFF.
5207 * If timeout_usec microseconds pass without the controller achieving
5208 * the desired bit configuration, return TRUE, else FALSE.
5209 *
5210 * hybrid waiting algorithm: if not in interrupt context, busy looping will
5211 * occur for the first 250 us, then switch over to a sleeping wait.
5212 *
5213 */
5214int
5215nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
5216    int type_wait)
5217{
5218	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5219	hrtime_t end, cur, start_sleep, start;
5220	int first_time = B_TRUE;
5221	ushort_t val;
5222
5223	for (;;) {
5224		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5225
5226		if ((val & onbits) == onbits && (val & offbits) == 0) {
5227
5228			return (B_TRUE);
5229		}
5230
5231		cur = gethrtime();
5232
5233		/*
5234		 * store the start time and calculate the end
5235		 * time.  also calculate "start_sleep" which is
5236		 * the point after which the driver will stop busy
5237		 * waiting and change to sleep waiting.
5238		 */
5239		if (first_time) {
5240			first_time = B_FALSE;
5241			/*
5242			 * start and end are in nanoseconds
5243			 */
5244			start = cur;
5245			end = start + timeout_usec * 1000;
5246			/*
5247			 * add 1 ms to start
5248			 */
5249			start_sleep =  start + 250000;
5250
5251			if (servicing_interrupt()) {
5252				type_wait = NV_NOSLEEP;
5253			}
5254		}
5255
5256		if (cur > end) {
5257
5258			break;
5259		}
5260
5261		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5262#if ! defined(__lock_lint)
5263			delay(1);
5264#endif
5265		} else {
5266			drv_usecwait(nv_usec_delay);
5267		}
5268	}
5269
5270	return (B_FALSE);
5271}
5272
5273
5274/*
5275 * This is a slightly more complicated version that checks
5276 * for error conditions and bails-out rather than looping
5277 * until the timeout is exceeded.
5278 *
5279 * hybrid waiting algorithm: if not in interrupt context, busy looping will
5280 * occur for the first 250 us, then switch over to a sleeping wait.
5281 */
5282int
5283nv_wait3(
5284	nv_port_t	*nvp,
5285	uchar_t		onbits1,
5286	uchar_t		offbits1,
5287	uchar_t		failure_onbits2,
5288	uchar_t		failure_offbits2,
5289	uchar_t		failure_onbits3,
5290	uchar_t		failure_offbits3,
5291	uint_t		timeout_usec,
5292	int		type_wait)
5293{
5294	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5295	hrtime_t end, cur, start_sleep, start;
5296	int first_time = B_TRUE;
5297	ushort_t val;
5298
5299	for (;;) {
5300		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5301
5302		/*
5303		 * check for expected condition
5304		 */
5305		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
5306
5307			return (B_TRUE);
5308		}
5309
5310		/*
5311		 * check for error conditions
5312		 */
5313		if ((val & failure_onbits2) == failure_onbits2 &&
5314		    (val & failure_offbits2) == 0) {
5315
5316			return (B_FALSE);
5317		}
5318
5319		if ((val & failure_onbits3) == failure_onbits3 &&
5320		    (val & failure_offbits3) == 0) {
5321
5322			return (B_FALSE);
5323		}
5324
5325		/*
5326		 * store the start time and calculate the end
5327		 * time.  also calculate "start_sleep" which is
5328		 * the point after which the driver will stop busy
5329		 * waiting and change to sleep waiting.
5330		 */
5331		if (first_time) {
5332			first_time = B_FALSE;
5333			/*
5334			 * start and end are in nanoseconds
5335			 */
5336			cur = start = gethrtime();
5337			end = start + timeout_usec * 1000;
5338			/*
5339			 * add 1 ms to start
5340			 */
5341			start_sleep =  start + 250000;
5342
5343			if (servicing_interrupt()) {
5344				type_wait = NV_NOSLEEP;
5345			}
5346		} else {
5347			cur = gethrtime();
5348		}
5349
5350		if (cur > end) {
5351
5352			break;
5353		}
5354
5355		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5356#if ! defined(__lock_lint)
5357			delay(1);
5358#endif
5359		} else {
5360			drv_usecwait(nv_usec_delay);
5361		}
5362	}
5363
5364	return (B_FALSE);
5365}
5366
5367
5368/*
5369 * nv_port_state_change() reports the state of the port to the
5370 * sata module by calling sata_hba_event_notify().  This
5371 * function is called any time the state of the port is changed
5372 */
5373static void
5374nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5375{
5376	sata_device_t sd;
5377
5378	NVLOG(NVDBG_EVENT, nvp->nvp_ctlp, nvp,
5379	    "nv_port_state_change: event 0x%x type 0x%x state 0x%x "
5380	    "lbolt %ld (ticks)", event, addr_type, state, ddi_get_lbolt());
5381
5382	if (ddi_in_panic() != 0) {
5383
5384		return;
5385	}
5386
5387	bzero((void *)&sd, sizeof (sata_device_t));
5388	sd.satadev_rev = SATA_DEVICE_REV;
5389	nv_copy_registers(nvp, &sd, NULL);
5390
5391	/*
5392	 * When NCQ is implemented sactive and snotific field need to be
5393	 * updated.
5394	 */
5395	sd.satadev_addr.cport = nvp->nvp_port_num;
5396	sd.satadev_addr.qual = addr_type;
5397	sd.satadev_state = state;
5398
5399	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5400}
5401
5402
5403/*
5404 * Monitor reset progress and signature gathering.
5405 */
5406static clock_t
5407nv_monitor_reset(nv_port_t *nvp)
5408{
5409	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5410	uint32_t sstatus;
5411
5412	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
5413
5414	sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5415
5416	/*
5417	 * Check the link status. The link needs to be active before
5418	 * checking the link's status.
5419	 */
5420	if ((SSTATUS_GET_IPM(sstatus) != SSTATUS_IPM_ACTIVE) ||
5421	    (SSTATUS_GET_DET(sstatus) != SSTATUS_DET_DEVPRE_PHYCOM)) {
5422		/*
5423		 * Either link is not active or there is no device
5424		 * If the link remains down for more than NV_LINK_EVENT_DOWN
5425		 * (milliseconds), abort signature acquisition and complete
5426		 * reset processing.  The link will go down when COMRESET is
5427		 * sent by nv_reset().
5428		 */
5429
5430		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5431		    NV_LINK_EVENT_DOWN) {
5432
5433			nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5434			    "nv_monitor_reset: no link - ending signature "
5435			    "acquisition; time after reset %ldms",
5436			    TICK_TO_MSEC(ddi_get_lbolt() -
5437			    nvp->nvp_reset_time));
5438
5439			DTRACE_PROBE(no_link_reset_giving_up_f);
5440
5441			/*
5442			 * If the drive was previously present and configured
5443			 * and then subsequently removed, then send a removal
5444			 * event to sata common module.
5445			 */
5446			if (nvp->nvp_type != SATA_DTYPE_NONE) {
5447				nv_port_state_change(nvp,
5448				    SATA_EVNT_DEVICE_DETACHED,
5449				    SATA_ADDR_CPORT, 0);
5450			}
5451
5452			nvp->nvp_type = SATA_DTYPE_NONE;
5453			nvp->nvp_signature = NV_NO_SIG;
5454			nvp->nvp_state &= ~(NV_DEACTIVATED);
5455
5456#ifdef SGPIO_SUPPORT
5457			nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5458			    SGP_CTLR_PORT_TO_DRV(
5459			    nvp->nvp_ctlp->nvc_ctlr_num,
5460			    nvp->nvp_port_num));
5461#endif
5462
5463			cv_signal(&nvp->nvp_reset_cv);
5464
5465			return (0);
5466		}
5467
5468		DTRACE_PROBE(link_lost_reset_keep_trying_p);
5469
5470		return (nvp->nvp_wait_sig);
5471	}
5472
5473	NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5474	    "nv_monitor_reset: link up.  time since reset %ldms",
5475	    TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time));
5476
5477	nv_read_signature(nvp);
5478
5479
5480	if (nvp->nvp_signature != NV_NO_SIG) {
5481		/*
5482		 * signature has been acquired, send the appropriate
5483		 * event to the sata common module.
5484		 */
5485		if (nvp->nvp_state & (NV_ATTACH|NV_HOTPLUG)) {
5486			char *source;
5487
5488			if (nvp->nvp_state & NV_HOTPLUG) {
5489
5490				source = "hotplugged";
5491				nv_port_state_change(nvp,
5492				    SATA_EVNT_DEVICE_ATTACHED,
5493				    SATA_ADDR_CPORT, SATA_DSTATE_PWR_ACTIVE);
5494				DTRACE_PROBE1(got_sig_for_hotplugged_device_h,
5495				    int, nvp->nvp_state);
5496
5497			} else {
5498				source = "activated or attached";
5499				DTRACE_PROBE1(got_sig_for_existing_device_h,
5500				    int, nvp->nvp_state);
5501			}
5502
5503			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5504			    "signature acquired for %s device. sig:"
5505			    " 0x%x state: 0x%x nvp_type: 0x%x", source,
5506			    nvp->nvp_signature, nvp->nvp_state, nvp->nvp_type);
5507
5508
5509			nvp->nvp_state &= ~(NV_RESET|NV_ATTACH|NV_HOTPLUG);
5510
5511#ifdef SGPIO_SUPPORT
5512			if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5513				nv_sgp_drive_connect(nvp->nvp_ctlp,
5514				    SGP_CTLR_PORT_TO_DRV(
5515				    nvp->nvp_ctlp->nvc_ctlr_num,
5516				    nvp->nvp_port_num));
5517			} else {
5518				nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5519				    SGP_CTLR_PORT_TO_DRV(
5520				    nvp->nvp_ctlp->nvc_ctlr_num,
5521				    nvp->nvp_port_num));
5522			}
5523#endif
5524
5525			cv_signal(&nvp->nvp_reset_cv);
5526
5527			return (0);
5528		}
5529
5530		/*
5531		 * Since this was not an attach, it was a reset of an
5532		 * existing device
5533		 */
5534		nvp->nvp_state &= ~NV_RESET;
5535		nvp->nvp_state |= NV_RESTORE;
5536
5537
5538
5539		DTRACE_PROBE(got_signature_reset_complete_p);
5540		DTRACE_PROBE1(nvp_signature_h, int, nvp->nvp_signature);
5541		DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
5542
5543		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5544		    "signature acquired reset complete. sig: 0x%x"
5545		    " state: 0x%x", nvp->nvp_signature, nvp->nvp_state);
5546
5547		/*
5548		 * interrupts may have been disabled so just make sure
5549		 * they are cleared and re-enabled.
5550		 */
5551
5552		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5553		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5554
5555		nv_port_state_change(nvp, SATA_EVNT_DEVICE_RESET,
5556		    SATA_ADDR_DCPORT,
5557		    SATA_DSTATE_RESET | SATA_DSTATE_PWR_ACTIVE);
5558
5559		return (0);
5560	}
5561
5562
5563	if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >
5564	    NV_RETRY_RESET_SIG) {
5565
5566
5567		if (nvp->nvp_reset_retry_count >= NV_MAX_RESET_RETRY) {
5568
5569			nvp->nvp_state |= NV_FAILED;
5570			nvp->nvp_state &= ~(NV_RESET|NV_ATTACH|NV_HOTPLUG);
5571
5572			DTRACE_PROBE(reset_exceeded_waiting_for_sig_p);
5573			DTRACE_PROBE(reset_exceeded_waiting_for_sig_f);
5574			DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
5575			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5576			    "reset time exceeded waiting for sig nvp_state %x",
5577			    nvp->nvp_state);
5578
5579			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
5580			    SATA_ADDR_CPORT, 0);
5581
5582			cv_signal(&nvp->nvp_reset_cv);
5583
5584			return (0);
5585		}
5586
5587		nv_reset(nvp, "retry");
5588
5589		return (nvp->nvp_wait_sig);
5590	}
5591
5592	/*
5593	 * signature not received, keep trying
5594	 */
5595	DTRACE_PROBE(no_sig_keep_waiting_p);
5596
5597	/*
5598	 * double the wait time for sig since the last try but cap it off at
5599	 * 1 second.
5600	 */
5601	nvp->nvp_wait_sig = nvp->nvp_wait_sig * 2;
5602
5603	return (nvp->nvp_wait_sig > NV_ONE_SEC ? NV_ONE_SEC :
5604	    nvp->nvp_wait_sig);
5605}
5606
5607
5608/*
5609 * timeout processing:
5610 *
5611 * Check if any packets have crossed a timeout threshold.  If so,
5612 * abort the packet.  This function is not NCQ-aware.
5613 *
5614 * If reset is in progress, call reset monitoring function.
5615 *
5616 * Timeout frequency may be lower for checking packet timeout
5617 * and higher for reset monitoring.
5618 *
5619 */
5620static void
5621nv_timeout(void *arg)
5622{
5623	nv_port_t *nvp = arg;
5624	nv_slot_t *nv_slotp;
5625	clock_t next_timeout_us = NV_ONE_SEC;
5626	uint16_t int_status;
5627	uint8_t status, bmstatus;
5628	static int intr_warn_once = 0;
5629	uint32_t serror;
5630
5631
5632	ASSERT(nvp != NULL);
5633
5634	mutex_enter(&nvp->nvp_mutex);
5635	nvp->nvp_timeout_id = 0;
5636
5637	if (nvp->nvp_state & (NV_DEACTIVATED|NV_FAILED)) {
5638		next_timeout_us = 0;
5639
5640		goto finished;
5641	}
5642
5643	if (nvp->nvp_state & NV_RESET) {
5644		next_timeout_us = nv_monitor_reset(nvp);
5645
5646		goto finished;
5647	}
5648
5649	if (nvp->nvp_state & NV_LINK_EVENT) {
5650		boolean_t device_present = B_FALSE;
5651		uint32_t sstatus;
5652		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5653
5654		if (TICK_TO_USEC(ddi_get_lbolt() -
5655		    nvp->nvp_link_event_time) < NV_LINK_EVENT_SETTLE) {
5656
5657			next_timeout_us = 10 * NV_ONE_MSEC;
5658
5659			DTRACE_PROBE(link_event_set_no_timeout_keep_waiting_p);
5660
5661			goto finished;
5662		}
5663
5664		DTRACE_PROBE(link_event_settled_now_process_p);
5665
5666		nvp->nvp_state &= ~NV_LINK_EVENT;
5667
5668		/*
5669		 * ck804 routinely reports the wrong hotplug/unplug event,
5670		 * and it's been seen on mcp55 when there are signal integrity
5671		 * issues.  Therefore need to infer the event from the
5672		 * current link status.
5673		 */
5674
5675		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5676
5677		if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
5678		    (SSTATUS_GET_DET(sstatus) ==
5679		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5680			device_present = B_TRUE;
5681		}
5682
5683		if ((nvp->nvp_signature != NV_NO_SIG) &&
5684		    (device_present == B_FALSE)) {
5685
5686			NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5687			    "nv_timeout: device detached", NULL);
5688
5689			DTRACE_PROBE(device_detached_p);
5690
5691			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
5692			    B_FALSE);
5693
5694			nv_port_state_change(nvp, SATA_EVNT_DEVICE_DETACHED,
5695			    SATA_ADDR_CPORT, 0);
5696
5697			nvp->nvp_signature = NV_NO_SIG;
5698			nvp->nvp_rem_time = ddi_get_lbolt();
5699			nvp->nvp_type = SATA_DTYPE_NONE;
5700			next_timeout_us = 0;
5701
5702#ifdef SGPIO_SUPPORT
5703			nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5704			    SGP_CTLR_PORT_TO_DRV(nvp->nvp_ctlp->nvc_ctlr_num,
5705			    nvp->nvp_port_num));
5706#endif
5707
5708			goto finished;
5709		}
5710
5711		/*
5712		 * if the device was already present, and it's still present,
5713		 * then abort any outstanding command and issue a reset.
5714		 * This may result from transient link errors.
5715		 */
5716
5717		if ((nvp->nvp_signature != NV_NO_SIG) &&
5718		    (device_present == B_TRUE)) {
5719
5720			NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5721			    "nv_timeout: spurious link event", NULL);
5722			DTRACE_PROBE(spurious_link_event_p);
5723
5724			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
5725			    B_FALSE);
5726
5727			nvp->nvp_signature = NV_NO_SIG;
5728			nvp->nvp_trans_link_time = ddi_get_lbolt();
5729			nvp->nvp_trans_link_count++;
5730			next_timeout_us = 0;
5731
5732			nv_reset(nvp, "transient link event");
5733
5734			goto finished;
5735		}
5736
5737
5738		/*
5739		 * a new device has been inserted
5740		 */
5741		if ((nvp->nvp_signature == NV_NO_SIG) &&
5742		    (device_present == B_TRUE)) {
5743			NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5744			    "nv_timeout: device attached", NULL);
5745
5746			DTRACE_PROBE(device_attached_p);
5747			nvp->nvp_add_time = ddi_get_lbolt();
5748			next_timeout_us = 0;
5749			nvp->nvp_reset_count = 0;
5750			nvp->nvp_state = NV_HOTPLUG;
5751			nvp->nvp_type = SATA_DTYPE_UNKNOWN;
5752			nv_reset(nvp, "hotplug");
5753
5754			goto finished;
5755		}
5756
5757		/*
5758		 * no link, and no prior device.  Nothing to do, but
5759		 * log this.
5760		 */
5761		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5762		    "nv_timeout: delayed hot processing no link no prior"
5763		    " device", NULL);
5764		DTRACE_PROBE(delayed_hotplug_no_link_no_prior_device_p);
5765
5766		nvp->nvp_trans_link_time = ddi_get_lbolt();
5767		nvp->nvp_trans_link_count++;
5768		next_timeout_us = 0;
5769
5770		goto finished;
5771	}
5772
5773	/*
5774	 * Not yet NCQ-aware - there is only one command active.
5775	 */
5776	nv_slotp = &(nvp->nvp_slot[0]);
5777
5778	/*
5779	 * perform timeout checking and processing only if there is an
5780	 * active packet on the port
5781	 */
5782	if (nv_slotp != NULL && nv_slotp->nvslot_spkt != NULL)  {
5783		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5784		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5785		uint8_t cmd = satacmd->satacmd_cmd_reg;
5786		uint64_t lba;
5787
5788#if ! defined(__lock_lint) && defined(DEBUG)
5789
5790		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5791		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5792		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5793		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5794		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5795		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5796#endif
5797
5798		/*
5799		 * timeout not needed if there is a polling thread
5800		 */
5801		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5802			next_timeout_us = 0;
5803
5804			goto finished;
5805		}
5806
5807		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5808		    spkt->satapkt_time) {
5809
5810			serror = nv_get32(nvp->nvp_ctlp->nvc_bar_hdl[5],
5811			    nvp->nvp_serror);
5812			status = nv_get8(nvp->nvp_ctl_hdl,
5813			    nvp->nvp_altstatus);
5814			bmstatus = nv_get8(nvp->nvp_bm_hdl,
5815			    nvp->nvp_bmisx);
5816
5817			nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5818			    "nv_timeout: aborting: "
5819			    "nvslot_stime: %ld max ticks till timeout: %ld "
5820			    "cur_time: %ld cmd = 0x%x lba = %d seq = %d",
5821			    nv_slotp->nvslot_stime,
5822			    drv_usectohz(MICROSEC *
5823			    spkt->satapkt_time), ddi_get_lbolt(),
5824			    cmd, lba, nvp->nvp_seq);
5825
5826			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5827			    "nv_timeout: altstatus = 0x%x  bmicx = 0x%x "
5828			    "serror = 0x%x previous_cmd = "
5829			    "0x%x", status, bmstatus, serror,
5830			    nvp->nvp_previous_cmd);
5831
5832
5833			DTRACE_PROBE1(nv_timeout_packet_p, int, nvp);
5834
5835			if (nvp->nvp_mcp5x_int_status != NULL) {
5836
5837				int_status = nv_get16(
5838				    nvp->nvp_ctlp->nvc_bar_hdl[5],
5839				    nvp->nvp_mcp5x_int_status);
5840				NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5841				    "int_status = 0x%x", int_status);
5842
5843				if (int_status & MCP5X_INT_COMPLETE) {
5844					/*
5845					 * Completion interrupt was missed.
5846					 * Issue warning message once.
5847					 */
5848					if (!intr_warn_once) {
5849
5850						nv_cmn_err(CE_WARN,
5851						    nvp->nvp_ctlp,
5852						    nvp,
5853						    "nv_sata: missing command "
5854						    "completion interrupt");
5855						intr_warn_once = 1;
5856
5857					}
5858
5859					NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp,
5860					    nvp, "timeout detected with "
5861					    "interrupt ready - calling "
5862					    "int directly", NULL);
5863
5864					mutex_exit(&nvp->nvp_mutex);
5865					(void) mcp5x_intr_port(nvp);
5866					mutex_enter(&nvp->nvp_mutex);
5867
5868				} else {
5869					/*
5870					 * True timeout and not a missing
5871					 * interrupt.
5872					 */
5873					DTRACE_PROBE1(timeout_abort_active_p,
5874					    int *, nvp);
5875					(void) nv_abort_active(nvp, spkt,
5876					    SATA_PKT_TIMEOUT, B_TRUE);
5877				}
5878			} else {
5879				(void) nv_abort_active(nvp, spkt,
5880				    SATA_PKT_TIMEOUT, B_TRUE);
5881			}
5882
5883		} else {
5884			NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5885			    "nv_timeout:"
5886			    " still in use so restarting timeout",
5887			    NULL);
5888
5889			next_timeout_us = NV_ONE_SEC;
5890		}
5891	} else {
5892		/*
5893		 * there was no active packet, so do not re-enable timeout
5894		 */
5895		next_timeout_us = 0;
5896		NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5897		    "nv_timeout: no active packet so not re-arming "
5898		    "timeout", NULL);
5899	}
5900
5901finished:
5902
5903	nv_setup_timeout(nvp, next_timeout_us);
5904
5905	mutex_exit(&nvp->nvp_mutex);
5906}
5907
5908
5909/*
5910 * enable or disable the 3 interrupt types the driver is
5911 * interested in: completion, add and remove.
5912 */
5913static void
5914ck804_set_intr(nv_port_t *nvp, int flag)
5915{
5916	nv_ctl_t *nvc = nvp->nvp_ctlp;
5917	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5918	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5919	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5920	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5921	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5922	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5923
5924	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5925		int_en = nv_get8(bar5_hdl,
5926		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5927		int_en &= ~intr_bits[port];
5928		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5929		    int_en);
5930		return;
5931	}
5932
5933	ASSERT(mutex_owned(&nvp->nvp_mutex));
5934
5935	/*
5936	 * controller level lock also required since access to an 8-bit
5937	 * interrupt register is shared between both channels.
5938	 */
5939	mutex_enter(&nvc->nvc_mutex);
5940
5941	if (flag & NV_INTR_CLEAR_ALL) {
5942		NVLOG(NVDBG_INTR, nvc, nvp,
5943		    "ck804_set_intr: NV_INTR_CLEAR_ALL", NULL);
5944
5945		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5946		    (uint8_t *)(nvc->nvc_ck804_int_status));
5947
5948		if (intr_status & clear_all_bits[port]) {
5949
5950			nv_put8(nvc->nvc_bar_hdl[5],
5951			    (uint8_t *)(nvc->nvc_ck804_int_status),
5952			    clear_all_bits[port]);
5953
5954			NVLOG(NVDBG_INTR, nvc, nvp,
5955			    "interrupt bits cleared %x",
5956			    intr_status & clear_all_bits[port]);
5957		}
5958	}
5959
5960	if (flag & NV_INTR_DISABLE) {
5961		NVLOG(NVDBG_INTR, nvc, nvp,
5962		    "ck804_set_intr: NV_INTR_DISABLE", NULL);
5963		int_en = nv_get8(bar5_hdl,
5964		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5965		int_en &= ~intr_bits[port];
5966		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5967		    int_en);
5968	}
5969
5970	if (flag & NV_INTR_ENABLE) {
5971		NVLOG(NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE",
5972		    NULL);
5973		int_en = nv_get8(bar5_hdl,
5974		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5975		int_en |= intr_bits[port];
5976		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5977		    int_en);
5978	}
5979
5980	mutex_exit(&nvc->nvc_mutex);
5981}
5982
5983
5984/*
5985 * enable or disable the 3 interrupts the driver is interested in:
5986 * completion interrupt, hot add, and hot remove interrupt.
5987 */
5988static void
5989mcp5x_set_intr(nv_port_t *nvp, int flag)
5990{
5991	nv_ctl_t *nvc = nvp->nvp_ctlp;
5992	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5993	uint16_t intr_bits =
5994	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5995	uint16_t int_en;
5996
5997	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5998		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5999		int_en &= ~intr_bits;
6000		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6001		return;
6002	}
6003
6004	ASSERT(mutex_owned(&nvp->nvp_mutex));
6005
6006	NVLOG(NVDBG_INTR, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag);
6007
6008	if (flag & NV_INTR_CLEAR_ALL) {
6009		NVLOG(NVDBG_INTR, nvc, nvp,
6010		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL", NULL);
6011		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
6012	}
6013
6014	if (flag & NV_INTR_ENABLE) {
6015		NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE",
6016		    NULL);
6017		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
6018		int_en |= intr_bits;
6019		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6020	}
6021
6022	if (flag & NV_INTR_DISABLE) {
6023		NVLOG(NVDBG_INTR, nvc, nvp,
6024		    "mcp5x_set_intr: NV_INTR_DISABLE", NULL);
6025		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
6026		int_en &= ~intr_bits;
6027		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6028	}
6029}
6030
6031
6032static void
6033nv_resume(nv_port_t *nvp)
6034{
6035	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()", NULL);
6036
6037	mutex_enter(&nvp->nvp_mutex);
6038
6039	if (nvp->nvp_state & NV_DEACTIVATED) {
6040		mutex_exit(&nvp->nvp_mutex);
6041
6042		return;
6043	}
6044
6045	/* Enable interrupt */
6046	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
6047
6048	/*
6049	 * Power may have been removed to the port and the
6050	 * drive, and/or a drive may have been added or removed.
6051	 * Force a reset which will cause a probe and re-establish
6052	 * any state needed on the drive.
6053	 */
6054	nv_reset(nvp, "resume");
6055
6056	mutex_exit(&nvp->nvp_mutex);
6057}
6058
6059
6060static void
6061nv_suspend(nv_port_t *nvp)
6062{
6063	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()", NULL);
6064
6065	mutex_enter(&nvp->nvp_mutex);
6066
6067#ifdef SGPIO_SUPPORT
6068	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
6069		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
6070		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
6071	}
6072#endif
6073
6074	if (nvp->nvp_state & NV_DEACTIVATED) {
6075		mutex_exit(&nvp->nvp_mutex);
6076
6077		return;
6078	}
6079
6080	/*
6081	 * Stop the timeout handler.
6082	 * (It will be restarted in nv_reset() during nv_resume().)
6083	 */
6084	if (nvp->nvp_timeout_id) {
6085		(void) untimeout(nvp->nvp_timeout_id);
6086		nvp->nvp_timeout_id = 0;
6087	}
6088
6089	/* Disable interrupt */
6090	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
6091	    NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
6092
6093	mutex_exit(&nvp->nvp_mutex);
6094}
6095
6096
6097static void
6098nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
6099{
6100	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6101	sata_cmd_t *scmd = &spkt->satapkt_cmd;
6102	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
6103	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6104	uchar_t status;
6105	struct sata_cmd_flags flags;
6106
6107	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6108	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
6109	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6110
6111	if (spkt == NULL) {
6112
6113		return;
6114	}
6115
6116	/*
6117	 * in the error case, implicitly set the return of regs needed
6118	 * for error handling.
6119	 */
6120	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
6121	    nvp->nvp_altstatus);
6122
6123	flags = scmd->satacmd_flags;
6124
6125	if (status & SATA_STATUS_ERR) {
6126		flags.sata_copy_out_lba_low_msb = B_TRUE;
6127		flags.sata_copy_out_lba_mid_msb = B_TRUE;
6128		flags.sata_copy_out_lba_high_msb = B_TRUE;
6129		flags.sata_copy_out_lba_low_lsb = B_TRUE;
6130		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
6131		flags.sata_copy_out_lba_high_lsb = B_TRUE;
6132		flags.sata_copy_out_error_reg = B_TRUE;
6133		flags.sata_copy_out_sec_count_msb = B_TRUE;
6134		flags.sata_copy_out_sec_count_lsb = B_TRUE;
6135		scmd->satacmd_status_reg = status;
6136	}
6137
6138	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
6139
6140		/*
6141		 * set HOB so that high byte will be read
6142		 */
6143		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
6144
6145		/*
6146		 * get the requested high bytes
6147		 */
6148		if (flags.sata_copy_out_sec_count_msb) {
6149			scmd->satacmd_sec_count_msb =
6150			    nv_get8(cmdhdl, nvp->nvp_count);
6151		}
6152
6153		if (flags.sata_copy_out_lba_low_msb) {
6154			scmd->satacmd_lba_low_msb =
6155			    nv_get8(cmdhdl, nvp->nvp_sect);
6156		}
6157
6158		if (flags.sata_copy_out_lba_mid_msb) {
6159			scmd->satacmd_lba_mid_msb =
6160			    nv_get8(cmdhdl, nvp->nvp_lcyl);
6161		}
6162
6163		if (flags.sata_copy_out_lba_high_msb) {
6164			scmd->satacmd_lba_high_msb =
6165			    nv_get8(cmdhdl, nvp->nvp_hcyl);
6166		}
6167	}
6168
6169	/*
6170	 * disable HOB so that low byte is read
6171	 */
6172	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
6173
6174	/*
6175	 * get the requested low bytes
6176	 */
6177	if (flags.sata_copy_out_sec_count_lsb) {
6178		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
6179	}
6180
6181	if (flags.sata_copy_out_lba_low_lsb) {
6182		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
6183	}
6184
6185	if (flags.sata_copy_out_lba_mid_lsb) {
6186		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
6187	}
6188
6189	if (flags.sata_copy_out_lba_high_lsb) {
6190		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
6191	}
6192
6193	/*
6194	 * get the device register if requested
6195	 */
6196	if (flags.sata_copy_out_device_reg) {
6197		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
6198	}
6199
6200	/*
6201	 * get the error register if requested
6202	 */
6203	if (flags.sata_copy_out_error_reg) {
6204		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
6205	}
6206}
6207
6208
6209/*
6210 * hot plug and remove interrupts can occur when the device is reset.
6211 * Masking the interrupt doesn't always work well because if a
6212 * different interrupt arrives on the other port, the driver can still
6213 * end up checking the state of the other port and discover the hot
6214 * interrupt flag is set even though it was masked.  Also, when there are
6215 * errors on the link there can be transient link events which need to be
6216 * masked and eliminated as well.
6217 */
6218static void
6219nv_link_event(nv_port_t *nvp, int flag)
6220{
6221
6222	NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_link_event: flag: %s",
6223	    flag ? "add" : "remove");
6224
6225	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
6226
6227	nvp->nvp_link_event_time = ddi_get_lbolt();
6228
6229	/*
6230	 * if a port has been deactivated, ignore all link events
6231	 */
6232	if (nvp->nvp_state & NV_DEACTIVATED) {
6233		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "ignoring link event"
6234		    " port deactivated", NULL);
6235		DTRACE_PROBE(ignoring_link_port_deactivated_p);
6236
6237		return;
6238	}
6239
6240	/*
6241	 * if the drive has been reset, ignore any transient events.  If it's
6242	 * a real removal event, nv_monitor_reset() will handle it.
6243	 */
6244	if (nvp->nvp_state & NV_RESET) {
6245		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "ignoring link event"
6246		    " during reset", NULL);
6247		DTRACE_PROBE(ignoring_link_event_during_reset_p);
6248
6249		return;
6250	}
6251
6252	/*
6253	 * if link event processing is already enabled, nothing to
6254	 * do.
6255	 */
6256	if (nvp->nvp_state & NV_LINK_EVENT) {
6257
6258		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6259		    "received link event while processing already in "
6260		    "progress", NULL);
6261		DTRACE_PROBE(nv_link_event_already_set_p);
6262
6263		return;
6264	}
6265
6266	DTRACE_PROBE1(link_event_p, int, nvp);
6267
6268	nvp->nvp_state |= NV_LINK_EVENT;
6269
6270	nv_setup_timeout(nvp, NV_LINK_EVENT_SETTLE);
6271}
6272
6273
6274/*
6275 * Get request sense data and stuff it the command's sense buffer.
6276 * Start a request sense command in order to get sense data to insert
6277 * in the sata packet's rqsense buffer.  The command completion
6278 * processing is in nv_intr_pkt_pio.
6279 *
6280 * The sata common module provides a function to allocate and set-up a
6281 * request sense packet command. The reasons it is not being used here is:
6282 * a) it cannot be called in an interrupt context and this function is
6283 *    called in an interrupt context.
6284 * b) it allocates DMA resources that are not used here because this is
6285 *    implemented using PIO.
6286 *
6287 * If, in the future, this is changed to use DMA, the sata common module
6288 * should be used to allocate and set-up the error retrieval (request sense)
6289 * command.
6290 */
6291static int
6292nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
6293{
6294	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
6295	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
6296	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6297	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
6298
6299	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6300	    "nv_start_rqsense_pio: start", NULL);
6301
6302	/* clear the local request sense buffer before starting the command */
6303	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
6304
6305	/* Write the request sense PACKET command */
6306
6307	/* select the drive */
6308	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
6309
6310	/* make certain the drive selected */
6311	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
6312	    NV_SEC2USEC(5), 0) == B_FALSE) {
6313		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6314		    "nv_start_rqsense_pio: drive select failed", NULL);
6315		return (NV_FAILURE);
6316	}
6317
6318	/* set up the command */
6319	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
6320	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
6321	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
6322	nv_put8(cmdhdl, nvp->nvp_sect, 0);
6323	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
6324
6325	/* initiate the command by writing the command register last */
6326	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
6327
6328	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
6329	NV_DELAY_NSEC(400);
6330
6331	/*
6332	 * Wait for the device to indicate that it is ready for the command
6333	 * ATAPI protocol state - HP0: Check_Status_A
6334	 */
6335
6336	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
6337	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
6338	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
6339	    4000000, 0) == B_FALSE) {
6340		if (nv_get8(cmdhdl, nvp->nvp_status) &
6341		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
6342			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
6343			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6344			    "nv_start_rqsense_pio: rqsense dev error (HP0)",
6345			    NULL);
6346		} else {
6347			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
6348			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6349			    "nv_start_rqsense_pio: rqsense timeout (HP0)",
6350			    NULL);
6351		}
6352
6353		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
6354		nv_complete_io(nvp, spkt, 0);
6355		nv_reset(nvp, "rqsense_pio");
6356
6357		return (NV_FAILURE);
6358	}
6359
6360	/*
6361	 * Put the ATAPI command in the data register
6362	 * ATAPI protocol state - HP1: Send_Packet
6363	 */
6364
6365	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
6366	    (ushort_t *)nvp->nvp_data,
6367	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
6368
6369	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6370	    "nv_start_rqsense_pio: exiting into HP3", NULL);
6371
6372	return (NV_SUCCESS);
6373}
6374
6375/*
6376 * quiesce(9E) entry point.
6377 *
6378 * This function is called when the system is single-threaded at high
6379 * PIL with preemption disabled. Therefore, this function must not be
6380 * blocked.
6381 *
6382 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6383 * DDI_FAILURE indicates an error condition and should almost never happen.
6384 */
6385static int
6386nv_quiesce(dev_info_t *dip)
6387{
6388	int port, instance = ddi_get_instance(dip);
6389	nv_ctl_t *nvc;
6390
6391	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
6392		return (DDI_FAILURE);
6393
6394	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
6395		nv_port_t *nvp = &(nvc->nvc_port[port]);
6396		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6397		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6398		uint32_t sctrl;
6399
6400		/*
6401		 * Stop the controllers from generating interrupts.
6402		 */
6403		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
6404
6405		/*
6406		 * clear signature registers
6407		 */
6408		nv_put8(cmdhdl, nvp->nvp_sect, 0);
6409		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
6410		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
6411		nv_put8(cmdhdl, nvp->nvp_count, 0);
6412
6413		nvp->nvp_signature = NV_NO_SIG;
6414		nvp->nvp_type = SATA_DTYPE_NONE;
6415		nvp->nvp_state |= NV_RESET;
6416		nvp->nvp_reset_time = ddi_get_lbolt();
6417
6418		/*
6419		 * assert reset in PHY by writing a 1 to bit 0 scontrol
6420		 */
6421		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6422
6423		nv_put32(bar5_hdl, nvp->nvp_sctrl,
6424		    sctrl | SCONTROL_DET_COMRESET);
6425
6426		/*
6427		 * wait 1ms
6428		 */
6429		drv_usecwait(1000);
6430
6431		/*
6432		 * de-assert reset in PHY
6433		 */
6434		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
6435	}
6436
6437	return (DDI_SUCCESS);
6438}
6439
6440
6441#ifdef SGPIO_SUPPORT
6442/*
6443 * NVIDIA specific SGPIO LED support
6444 * Please refer to the NVIDIA documentation for additional details
6445 */
6446
6447/*
6448 * nv_sgp_led_init
6449 * Detect SGPIO support.  If present, initialize.
6450 */
6451static void
6452nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
6453{
6454	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
6455	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
6456	nv_sgp_cmn_t *cmn;	/* shared data structure */
6457	int i;
6458	char tqname[SGPIO_TQ_NAME_LEN];
6459	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
6460
6461	/*
6462	 * Initialize with appropriately invalid values in case this function
6463	 * exits without initializing SGPIO (for example, there is no SGPIO
6464	 * support).
6465	 */
6466	nvc->nvc_sgp_csr = 0;
6467	nvc->nvc_sgp_cbp = NULL;
6468	nvc->nvc_sgp_cmn = NULL;
6469
6470	/*
6471	 * Only try to initialize SGPIO LED support if this property
6472	 * indicates it should be.
6473	 */
6474	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
6475	    "enable-sgpio-leds", 0) != 1)
6476		return;
6477
6478	/*
6479	 * CK804 can pass the sgpio_detect test even though it does not support
6480	 * SGPIO, so don't even look at a CK804.
6481	 */
6482	if (nvc->nvc_mcp5x_flag != B_TRUE)
6483		return;
6484
6485	/*
6486	 * The NVIDIA SGPIO support can nominally handle 6 drives.
6487	 * However, the current implementation only supports 4 drives.
6488	 * With two drives per controller, that means only look at the
6489	 * first two controllers.
6490	 */
6491	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
6492		return;
6493
6494	/* confirm that the SGPIO registers are there */
6495	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
6496		NVLOG(NVDBG_INIT, nvc, NULL,
6497		    "SGPIO registers not detected", NULL);
6498		return;
6499	}
6500
6501	/* save off the SGPIO_CSR I/O address */
6502	nvc->nvc_sgp_csr = csrp;
6503
6504	/* map in Control Block */
6505	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
6506	    sizeof (nv_sgp_cb_t), PROT_READ |