1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include "sdhost.h"
27 
28 typedef	struct sdslot	sdslot_t;
29 typedef	struct sdhost	sdhost_t;
30 
31 /*
32  * Per slot state.
33  */
34 struct sdslot {
35 	sda_host_t		*ss_host;
36 	int			ss_num;
37 	ddi_acc_handle_t	ss_acch;
38 	caddr_t 		ss_regva;
39 	kmutex_t		ss_lock;
40 	uint32_t		ss_capab;
41 	uint32_t		ss_baseclk;	/* Hz */
42 	uint32_t		ss_cardclk;	/* Hz */
43 	uint8_t			ss_tmoutclk;
44 	uint32_t		ss_tmusecs;	/* timeout units in usecs */
45 	uint32_t		ss_ocr;		/* OCR formatted voltages */
46 	uint16_t		ss_mode;
47 	boolean_t		ss_suspended;
48 
49 	/*
50 	 * Command in progress
51 	 */
52 	uint8_t			*ss_kvaddr;
53 	ddi_dma_cookie_t	*ss_dmacs;
54 	uint_t			ss_ndmac;
55 	int			ss_blksz;
56 	uint16_t		ss_resid;	/* in blocks */
57 
58 	/* scratch buffer, to receive extra PIO data */
59 	uint32_t		ss_bounce[2048 / 4];
60 };
61 
62 /*
63  * Per controller state.
64  */
65 struct sdhost {
66 	int			sh_numslots;
67 	ddi_dma_attr_t		sh_dmaattr;
68 	sdslot_t		sh_slots[SDHOST_MAXSLOTS];
69 	sda_host_t		*sh_host;
70 
71 	/*
72 	 * Interrupt related information.
73 	 */
74 	ddi_intr_handle_t	sh_ihandle;
75 	int			sh_icap;
76 	uint_t			sh_ipri;
77 };
78 
79 
80 static int sdhost_attach(dev_info_t *, ddi_attach_cmd_t);
81 static int sdhost_detach(dev_info_t *, ddi_detach_cmd_t);
82 static int sdhost_suspend(dev_info_t *);
83 static int sdhost_resume(dev_info_t *);
84 
85 static void sdhost_enable_interrupts(sdslot_t *);
86 static void sdhost_disable_interrupts(sdslot_t *);
87 static int sdhost_setup_intr(dev_info_t *, sdhost_t *);
88 static uint_t sdhost_intr(caddr_t, caddr_t);
89 static int sdhost_init_slot(dev_info_t *, sdhost_t *, int, int);
90 static void sdhost_uninit_slot(sdhost_t *, int);
91 static sda_err_t sdhost_soft_reset(sdslot_t *, uint8_t);
92 static sda_err_t sdhost_set_clock(sdslot_t *, uint32_t);
93 static void sdhost_xfer_done(sdslot_t *, sda_err_t);
94 static sda_err_t sdhost_wait_cmd(sdslot_t *, sda_cmd_t *);
95 static uint_t sdhost_slot_intr(sdslot_t *);
96 
97 static sda_err_t sdhost_cmd(void *, sda_cmd_t *);
98 static sda_err_t sdhost_getprop(void *, sda_prop_t, uint32_t *);
99 static sda_err_t sdhost_setprop(void *, sda_prop_t, uint32_t);
100 static sda_err_t sdhost_poll(void *);
101 static sda_err_t sdhost_reset(void *);
102 static sda_err_t sdhost_halt(void *);
103 
104 static struct dev_ops sdhost_dev_ops = {
105 	DEVO_REV,			/* devo_rev */
106 	0,				/* devo_refcnt */
107 	ddi_no_info,			/* devo_getinfo */
108 	nulldev,			/* devo_identify */
109 	nulldev,			/* devo_probe */
110 	sdhost_attach,			/* devo_attach */
111 	sdhost_detach,			/* devo_detach */
112 	nodev,				/* devo_reset */
113 	NULL,				/* devo_cb_ops */
114 	NULL,				/* devo_bus_ops */
115 	NULL				/* devo_power */
116 };
117 
118 static struct modldrv sdhost_modldrv = {
119 	&mod_driverops,			/* drv_modops */
120 	"Standard SD Host Controller",	/* drv_linkinfo */
121 	&sdhost_dev_ops			/* drv_dev_ops */
122 };
123 
124 static struct modlinkage modlinkage = {
125 	MODREV_1,			/* ml_rev */
126 	{ &sdhost_modldrv, NULL }	/* ml_linkage */
127 };
128 
129 static struct sda_ops sdhost_ops = {
130 	SDA_OPS_VERSION,
131 	sdhost_cmd,			/* so_cmd */
132 	sdhost_getprop,			/* so_getprop */
133 	sdhost_setprop,			/* so_setprop */
134 	sdhost_poll,			/* so_poll */
135 	sdhost_reset,			/* so_reset */
136 	sdhost_halt,			/* so_halt */
137 };
138 
139 static ddi_device_acc_attr_t sdhost_regattr = {
140 	DDI_DEVICE_ATTR_V0,	/* devacc_attr_version */
141 	DDI_STRUCTURE_LE_ACC,	/* devacc_attr_endian_flags */
142 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder */
143 	DDI_DEFAULT_ACC,	/* devacc_attr_access */
144 };
145 
146 #define	GET16(ss, reg)	\
147 	ddi_get16(ss->ss_acch, (void *)(ss->ss_regva + reg))
148 #define	PUT16(ss, reg, val)	\
149 	ddi_put16(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
150 #define	GET32(ss, reg)	\
151 	ddi_get32(ss->ss_acch, (void *)(ss->ss_regva + reg))
152 #define	PUT32(ss, reg, val)	\
153 	ddi_put32(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
154 #define	GET64(ss, reg)	\
155 	ddi_get64(ss->ss_acch, (void *)(ss->ss_regva + reg))
156 
157 #define	GET8(ss, reg)	\
158 	ddi_get8(ss->ss_acch, (void *)(ss->ss_regva + reg))
159 #define	PUT8(ss, reg, val)	\
160 	ddi_put8(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
161 
162 #define	CLR8(ss, reg, mask)	PUT8(ss, reg, GET8(ss, reg) & ~(mask))
163 #define	SET8(ss, reg, mask)	PUT8(ss, reg, GET8(ss, reg) | (mask))
164 
165 /*
166  * If ever anyone uses PIO on SPARC, we have to endian-swap.  But we
167  * think that SD Host Controllers are likely to be uncommon on SPARC,
168  * and hopefully when they exist at all they will be able to use DMA.
169  */
170 #ifdef	_BIG_ENDIAN
171 #define	sw32(x)		ddi_swap32(x)
172 #define	sw16(x)		ddi_swap16(x)
173 #else
174 #define	sw32(x)		(x)
175 #define	sw16(x)		(x)
176 #endif
177 
178 #define	GETDATA32(ss)		sw32(GET32(ss, REG_DATA))
179 #define	GETDATA16(ss)		sw16(GET16(ss, REG_DATA))
180 #define	GETDATA8(ss)		GET8(ss, REG_DATA)
181 
182 #define	PUTDATA32(ss, val)	PUT32(ss, REG_DATA, sw32(val))
183 #define	PUTDATA16(ss, val)	PUT16(ss, REG_DATA, sw16(val))
184 #define	PUTDATA8(ss, val)	PUT8(ss, REG_DATA, val)
185 
186 #define	CHECK_STATE(ss, nm)	\
187 	((GET32(ss, REG_PRS) & PRS_ ## nm) != 0)
188 
189 int
190 _init(void)
191 {
192 	int	rv;
193 
194 	sda_host_init_ops(&sdhost_dev_ops);
195 
196 	if ((rv = mod_install(&modlinkage)) != 0) {
197 		sda_host_fini_ops(&sdhost_dev_ops);
198 	}
199 
200 	return (rv);
201 }
202 
203 int
204 _fini(void)
205 {
206 	int	rv;
207 
208 	if ((rv = mod_remove(&modlinkage)) == 0) {
209 		sda_host_fini_ops(&sdhost_dev_ops);
210 	}
211 	return (rv);
212 }
213 
214 int
215 _info(struct modinfo *modinfop)
216 {
217 	return (mod_info(&modlinkage, modinfop));
218 }
219 
220 int
221 sdhost_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
222 {
223 	sdhost_t		*shp;
224 	ddi_acc_handle_t	pcih;
225 	uint8_t			slotinfo;
226 	uint8_t			bar;
227 	int			i;
228 
229 	switch (cmd) {
230 	case DDI_ATTACH:
231 		break;
232 
233 	case DDI_RESUME:
234 		return (sdhost_resume(dip));
235 
236 	default:
237 		return (DDI_FAILURE);
238 	}
239 
240 	/*
241 	 * Soft state allocation.
242 	 */
243 	shp = kmem_zalloc(sizeof (*shp), KM_SLEEP);
244 	ddi_set_driver_private(dip, shp);
245 
246 	/*
247 	 * Initialize DMA attributes.  For now we initialize as for
248 	 * SDMA.  If we add ADMA support we can improve this.
249 	 */
250 	shp->sh_dmaattr.dma_attr_version = DMA_ATTR_V0;
251 	shp->sh_dmaattr.dma_attr_addr_lo = 0;
252 	shp->sh_dmaattr.dma_attr_addr_hi = 0xffffffffU;
253 	shp->sh_dmaattr.dma_attr_count_max = 0xffffffffU;
254 	shp->sh_dmaattr.dma_attr_align = 1;
255 	shp->sh_dmaattr.dma_attr_burstsizes = 0;	/* for now! */
256 	shp->sh_dmaattr.dma_attr_minxfer = 1;
257 	shp->sh_dmaattr.dma_attr_maxxfer = 0xffffffffU;
258 	shp->sh_dmaattr.dma_attr_sgllen = -1;		/* unlimited! */
259 	shp->sh_dmaattr.dma_attr_seg = 0xfff;		/* 4K segments */
260 	shp->sh_dmaattr.dma_attr_granular = 1;
261 	shp->sh_dmaattr.dma_attr_flags = 0;
262 
263 	/*
264 	 * PCI configuration access to figure out number of slots present.
265 	 */
266 	if (pci_config_setup(dip, &pcih) != DDI_SUCCESS) {
267 		cmn_err(CE_WARN, "pci_config_setup failed");
268 		goto failed;
269 	}
270 
271 	slotinfo = pci_config_get8(pcih, SLOTINFO);
272 	shp->sh_numslots = SLOTINFO_NSLOT(slotinfo);
273 
274 	if (shp->sh_numslots > SDHOST_MAXSLOTS) {
275 		cmn_err(CE_WARN, "Host reports to have too many slots: %d",
276 		    shp->sh_numslots);
277 		goto failed;
278 	}
279 
280 	/*
281 	 * Enable master accesses and DMA.
282 	 */
283 	pci_config_put16(pcih, PCI_CONF_COMM,
284 	    pci_config_get16(pcih, PCI_CONF_COMM) |
285 	    PCI_COMM_MAE | PCI_COMM_ME);
286 
287 	/*
288 	 * Figure out which BAR to use.  Note that we number BARs from
289 	 * 1, although PCI and SD Host numbers from 0.  (We number
290 	 * from 1, because register number 0 means PCI configuration
291 	 * space in Solaris.)
292 	 */
293 	bar = SLOTINFO_BAR(slotinfo) + 1;
294 
295 	pci_config_teardown(&pcih);
296 
297 	/*
298 	 * Setup interrupts ... supports the new DDI interrupt API.  This
299 	 * will support MSI or MSI-X interrupts if a device is found to
300 	 * support it.
301 	 */
302 	if (sdhost_setup_intr(dip, shp) != DDI_SUCCESS) {
303 		cmn_err(CE_WARN, "Failed to setup interrupts");
304 		goto failed;
305 	}
306 
307 	shp->sh_host = sda_host_alloc(dip, shp->sh_numslots, &sdhost_ops,
308 	    &shp->sh_dmaattr);
309 	if (shp->sh_host == NULL) {
310 		cmn_err(CE_WARN, "Failed allocating SD host structure");
311 		goto failed;
312 	}
313 
314 	/*
315 	 * Configure slots, this also maps registers, enables
316 	 * interrupts, etc.  Most of the hardware setup is done here.
317 	 */
318 	for (i = 0; i < shp->sh_numslots; i++) {
319 		if (sdhost_init_slot(dip, shp, i, bar + i) != DDI_SUCCESS) {
320 			cmn_err(CE_WARN, "Failed initializing slot %d", i);
321 			goto failed;
322 		}
323 	}
324 
325 	ddi_report_dev(dip);
326 
327 	/*
328 	 * Enable device interrupts at the DDI layer.
329 	 */
330 	(void) ddi_intr_enable(shp->sh_ihandle);
331 
332 	/*
333 	 * Mark the slots online with the framework.  This will cause
334 	 * the framework to probe them for the presence of cards.
335 	 */
336 	if (sda_host_attach(shp->sh_host) != DDI_SUCCESS) {
337 		cmn_err(CE_WARN, "Failed attaching to SDA framework");
338 		(void) ddi_intr_disable(shp->sh_ihandle);
339 		goto failed;
340 	}
341 
342 	return (DDI_SUCCESS);
343 
344 failed:
345 	if (shp->sh_ihandle != NULL) {
346 		(void) ddi_intr_remove_handler(shp->sh_ihandle);
347 		(void) ddi_intr_free(shp->sh_ihandle);
348 	}
349 	for (i = 0; i < shp->sh_numslots; i++)
350 		sdhost_uninit_slot(shp, i);
351 	kmem_free(shp, sizeof (*shp));
352 
353 	return (DDI_FAILURE);
354 }
355 
356 int
357 sdhost_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
358 {
359 	sdhost_t	*shp;
360 	int		i;
361 
362 	switch (cmd) {
363 	case DDI_DETACH:
364 		break;
365 
366 	case DDI_SUSPEND:
367 		return (sdhost_suspend(dip));
368 
369 	default:
370 		return (DDI_FAILURE);
371 	}
372 
373 	shp = ddi_get_driver_private(dip);
374 	if (shp == NULL)
375 		return (DDI_FAILURE);
376 
377 	/*
378 	 * Take host offline with the framework.
379 	 */
380 	sda_host_detach(shp->sh_host);
381 
382 	/*
383 	 * Tear down interrupts.
384 	 */
385 	if (shp->sh_ihandle != NULL) {
386 		(void) ddi_intr_disable(shp->sh_ihandle);
387 		(void) ddi_intr_remove_handler(shp->sh_ihandle);
388 		(void) ddi_intr_free(shp->sh_ihandle);
389 	}
390 
391 	/*
392 	 * Tear down register mappings, etc.
393 	 */
394 	for (i = 0; i < shp->sh_numslots; i++)
395 		sdhost_uninit_slot(shp, i);
396 	kmem_free(shp, sizeof (*shp));
397 
398 	return (DDI_SUCCESS);
399 }
400 
401 int
402 sdhost_suspend(dev_info_t *dip)
403 {
404 	sdhost_t	*shp;
405 	sdslot_t	*ss;
406 	int		i;
407 
408 	shp = ddi_get_driver_private(dip);
409 	if (shp == NULL)
410 		return (DDI_FAILURE);
411 
412 	/* disable the interrupts */
413 	(void) ddi_intr_disable(shp->sh_ihandle);
414 
415 	for (i = 0; i < shp->sh_numslots; i++) {
416 		ss = &shp->sh_slots[i];
417 		mutex_enter(&ss->ss_lock);
418 		ss->ss_suspended = B_TRUE;
419 		sdhost_disable_interrupts(ss);
420 		(void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
421 		mutex_exit(&ss->ss_lock);
422 	}
423 	return (DDI_SUCCESS);
424 }
425 
426 int
427 sdhost_resume(dev_info_t *dip)
428 {
429 	sdhost_t	*shp;
430 	sdslot_t	*ss;
431 	int		i;
432 
433 	shp = ddi_get_driver_private(dip);
434 	if (shp == NULL)
435 		return (DDI_FAILURE);
436 
437 	for (i = 0; i < shp->sh_numslots; i++) {
438 		ss = &shp->sh_slots[i];
439 		mutex_enter(&ss->ss_lock);
440 		ss->ss_suspended = B_FALSE;
441 		(void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
442 		sdhost_enable_interrupts(ss);
443 		mutex_exit(&ss->ss_lock);
444 	}
445 
446 	/* re-enable the interrupts */
447 	(void) ddi_intr_enable(shp->sh_ihandle);
448 
449 	/* kick off a new card detect task */
450 	for (i = 0; i < shp->sh_numslots; i++) {
451 		ss = &shp->sh_slots[i];
452 		sda_host_detect(ss->ss_host, ss->ss_num);
453 	}
454 	return (DDI_SUCCESS);
455 }
456 
457 sda_err_t
458 sdhost_set_clock(sdslot_t *ss, uint32_t hz)
459 {
460 	uint16_t	div;
461 	uint32_t	val;
462 	uint32_t	clk;
463 	int		count;
464 
465 	/*
466 	 * Shut off the clock to begin.
467 	 */
468 	ss->ss_cardclk = 0;
469 	PUT16(ss, REG_CLOCK_CONTROL, 0);
470 	if (hz == 0) {
471 		return (SDA_EOK);
472 	}
473 
474 	if (ss->ss_baseclk == 0) {
475 		sda_host_log(ss->ss_host, ss->ss_num,
476 		    "Base clock frequency not established.");
477 		return (SDA_EINVAL);
478 	}
479 
480 	if ((hz > 25000000) && ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0)) {
481 		/* this clock requires high speed timings! */
482 		SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
483 	} else {
484 		/* don't allow clock to run faster than 25MHz */
485 		hz = min(hz, 25000000);
486 		CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
487 	}
488 
489 	/* figure out the divider */
490 	clk = ss->ss_baseclk;
491 	div  = 1;
492 	while (clk > hz) {
493 		if (div > 0x80)
494 			break;
495 		clk >>= 1;	/* divide clock by two */
496 		div <<= 1;	/* divider goes up by one */
497 	}
498 	div >>= 1;	/* 0 == divide by 1, 1 = divide by 2 */
499 
500 	/*
501 	 * Set the internal clock divider first, without enabling the
502 	 * card clock yet.
503 	 */
504 	PUT16(ss, REG_CLOCK_CONTROL,
505 	    (div << CLOCK_CONTROL_FREQ_SHIFT) | CLOCK_CONTROL_INT_CLOCK_EN);
506 
507 	/*
508 	 * Wait up to 100 msec for the internal clock to stabilize.
509 	 * (The spec does not seem to indicate a maximum timeout, but
510 	 * it also suggests that an infinite loop be used, which is
511 	 * not appropriate for hardened Solaris drivers.)
512 	 */
513 	for (count = 100000; count; count -= 10) {
514 
515 		val = GET16(ss, REG_CLOCK_CONTROL);
516 
517 		if (val & CLOCK_CONTROL_INT_CLOCK_STABLE) {
518 			/* if clock is stable, enable the SD clock pin */
519 			PUT16(ss, REG_CLOCK_CONTROL, val |
520 			    CLOCK_CONTROL_SD_CLOCK_EN);
521 
522 			ss->ss_cardclk = clk;
523 			return (SDA_EOK);
524 		}
525 
526 		drv_usecwait(10);
527 	}
528 
529 	return (SDA_ETIME);
530 }
531 
532 sda_err_t
533 sdhost_soft_reset(sdslot_t *ss, uint8_t bits)
534 {
535 	int	count;
536 
537 	/*
538 	 * There appears to be a bug where Ricoh hosts might have a
539 	 * problem if the host frequency is not set.  If the card
540 	 * isn't present, or we are doing a master reset, just enable
541 	 * the internal clock at its native speed.  (No dividers, and
542 	 * not exposed to card.).
543 	 */
544 	if ((bits == SOFT_RESET_ALL) || !(CHECK_STATE(ss, CARD_INSERTED))) {
545 		PUT16(ss, REG_CLOCK_CONTROL, CLOCK_CONTROL_INT_CLOCK_EN);
546 		/* simple 1msec wait, don't wait for clock to stabilize */
547 		drv_usecwait(1000);
548 	}
549 
550 	PUT8(ss, REG_SOFT_RESET, bits);
551 	for (count = 100000; count != 0; count -= 10) {
552 		if ((GET8(ss, REG_SOFT_RESET) & bits) == 0) {
553 			return (SDA_EOK);
554 		}
555 		drv_usecwait(10);
556 	}
557 
558 	return (SDA_ETIME);
559 }
560 
561 void
562 sdhost_disable_interrupts(sdslot_t *ss)
563 {
564 	/* disable slot interrupts for card insert and remove */
565 	PUT16(ss, REG_INT_MASK, 0);
566 	PUT16(ss, REG_INT_EN, 0);
567 
568 	/* disable error interrupts */
569 	PUT16(ss, REG_ERR_MASK, 0);
570 	PUT16(ss, REG_ERR_EN, 0);
571 }
572 
573 void
574 sdhost_enable_interrupts(sdslot_t *ss)
575 {
576 	/*
577 	 * Note that we want to enable reading of the CMD related
578 	 * bits, but we do not want them to generate an interrupt.
579 	 * (The busy wait for typical CMD stuff will normally be less
580 	 * than 10usec, so its simpler/easier to just poll.  Even in
581 	 * the worst case of 100 kHz, the poll is at worst 2 msec.)
582 	 */
583 
584 	/* enable slot interrupts for card insert and remove */
585 	PUT16(ss, REG_INT_MASK, INT_MASK);
586 	PUT16(ss, REG_INT_EN, INT_ENAB);
587 
588 	/* enable error interrupts */
589 	PUT16(ss, REG_ERR_MASK, ERR_MASK);
590 	PUT16(ss, REG_ERR_EN, ERR_ENAB);
591 }
592 
593 int
594 sdhost_setup_intr(dev_info_t *dip, sdhost_t *shp)
595 {
596 	int		itypes;
597 	int		itype;
598 
599 	/*
600 	 * Set up interrupt handler.
601 	 */
602 	if (ddi_intr_get_supported_types(dip, &itypes) != DDI_SUCCESS) {
603 		cmn_err(CE_WARN, "ddi_intr_get_supported_types failed");
604 		return (DDI_FAILURE);
605 	}
606 
607 	/*
608 	 * Interrupt types are bits in a mask.  We know about these ones:
609 	 * FIXED = 1
610 	 * MSI = 2
611 	 * MSIX = 4
612 	 */
613 	for (itype = DDI_INTR_TYPE_MSIX; itype != 0; itype >>= 1) {
614 
615 		int			count;
616 
617 		if ((itypes & itype) == 0) {
618 			/* this type is not supported on this device! */
619 			continue;
620 		}
621 
622 		if ((ddi_intr_get_nintrs(dip, itype, &count) != DDI_SUCCESS) ||
623 		    (count == 0)) {
624 			cmn_err(CE_WARN, "ddi_intr_get_nintrs failed");
625 			continue;
626 		}
627 
628 		/*
629 		 * We have not seen a host device with multiple
630 		 * interrupts (one per slot?), and the spec does not
631 		 * indicate that they exist.  But if one ever occurs,
632 		 * we spew a warning to help future debugging/support
633 		 * efforts.
634 		 */
635 		if (count > 1) {
636 			cmn_err(CE_WARN, "Controller offers %d interrupts, "
637 			    "but driver only supports one", count);
638 			continue;
639 		}
640 
641 		if ((ddi_intr_alloc(dip, &shp->sh_ihandle, itype, 0, 1,
642 		    &count, DDI_INTR_ALLOC_NORMAL) != DDI_SUCCESS) ||
643 		    (count != 1)) {
644 			cmn_err(CE_WARN, "ddi_intr_alloc failed");
645 			continue;
646 		}
647 
648 		if (ddi_intr_get_pri(shp->sh_ihandle, &shp->sh_ipri) !=
649 		    DDI_SUCCESS) {
650 			cmn_err(CE_WARN, "ddi_intr_get_pri failed");
651 			(void) ddi_intr_free(shp->sh_ihandle);
652 			shp->sh_ihandle = NULL;
653 			continue;
654 		}
655 
656 		if (shp->sh_ipri >= ddi_intr_get_hilevel_pri()) {
657 			cmn_err(CE_WARN, "Hi level interrupt not supported");
658 			(void) ddi_intr_free(shp->sh_ihandle);
659 			shp->sh_ihandle = NULL;
660 			continue;
661 		}
662 
663 		if (ddi_intr_get_cap(shp->sh_ihandle, &shp->sh_icap) !=
664 		    DDI_SUCCESS) {
665 			cmn_err(CE_WARN, "ddi_intr_get_cap failed");
666 			(void) ddi_intr_free(shp->sh_ihandle);
667 			shp->sh_ihandle = NULL;
668 			continue;
669 		}
670 
671 		if (ddi_intr_add_handler(shp->sh_ihandle, sdhost_intr,
672 		    shp, NULL) != DDI_SUCCESS) {
673 			cmn_err(CE_WARN, "ddi_intr_add_handler failed");
674 			(void) ddi_intr_free(shp->sh_ihandle);
675 			shp->sh_ihandle = NULL;
676 			continue;
677 		}
678 
679 		return (DDI_SUCCESS);
680 	}
681 
682 	return (DDI_FAILURE);
683 }
684 
685 void
686 sdhost_xfer_done(sdslot_t *ss, sda_err_t errno)
687 {
688 	if ((errno == SDA_EOK) && (ss->ss_resid != 0)) {
689 		/* an unexpected partial transfer was found */
690 		errno = SDA_ERESID;
691 	}
692 	ss->ss_blksz = 0;
693 	ss->ss_resid = 0;
694 
695 	if (errno != SDA_EOK) {
696 		(void) sdhost_soft_reset(ss, SOFT_RESET_CMD);
697 		(void) sdhost_soft_reset(ss, SOFT_RESET_DAT);
698 
699 		/* send a STOP command if necessary */
700 		if (ss->ss_mode & XFR_MODE_AUTO_CMD12) {
701 			PUT32(ss, REG_ARGUMENT, 0);
702 			PUT16(ss, REG_COMMAND,
703 			    (CMD_STOP_TRANSMIT << 8) |
704 			    COMMAND_TYPE_NORM | COMMAND_INDEX_CHECK_EN |
705 			    COMMAND_CRC_CHECK_EN | COMMAND_RESP_48_BUSY);
706 		}
707 	}
708 
709 	sda_host_transfer(ss->ss_host, ss->ss_num, errno);
710 }
711 
712 uint_t
713 sdhost_slot_intr(sdslot_t *ss)
714 {
715 	uint16_t	intr;
716 	uint16_t	errs;
717 	uint8_t		*data;
718 	int		count;
719 
720 	mutex_enter(&ss->ss_lock);
721 
722 	if (ss->ss_suspended) {
723 		mutex_exit(&ss->ss_lock);
724 		return (DDI_INTR_UNCLAIMED);
725 	}
726 
727 	intr = GET16(ss, REG_INT_STAT);
728 	if (intr == 0) {
729 		mutex_exit(&ss->ss_lock);
730 		return (DDI_INTR_UNCLAIMED);
731 	}
732 	errs = GET16(ss, REG_ERR_STAT);
733 
734 	if (intr & (INT_REM | INT_INS)) {
735 
736 		PUT16(ss, REG_INT_STAT, intr);
737 		mutex_exit(&ss->ss_lock);
738 
739 		sda_host_detect(ss->ss_host, ss->ss_num);
740 		/* no further interrupt processing this cycle */
741 		return (DDI_INTR_CLAIMED);
742 	}
743 
744 	if (intr & INT_DMA) {
745 		/*
746 		 * We have crossed a DMA/page boundary.  Cope with it.
747 		 */
748 		if (ss->ss_ndmac) {
749 			ss->ss_ndmac--;
750 			ss->ss_dmacs++;
751 			PUT16(ss, REG_INT_STAT, INT_DMA);
752 			PUT32(ss, REG_SDMA_ADDR, ss->ss_dmacs->dmac_address);
753 
754 		} else {
755 			/*
756 			 * Apparently some sdhost controllers issue a
757 			 * final DMA interrupt if the DMA completes on
758 			 * a boundary, even though there is no further
759 			 * data to transfer.
760 			 *
761 			 * There might be a risk here of the
762 			 * controller continuing to access the same
763 			 * data over and over again, but we accept the
764 			 * risk.
765 			 */
766 			PUT16(ss, REG_INT_STAT, INT_DMA);
767 		}
768 	}
769 
770 	if (intr & INT_RD) {
771 		/*
772 		 * PIO read!  PIO is quite suboptimal, but we expect
773 		 * performance critical applications to use DMA
774 		 * whenever possible.  We have to stage this through
775 		 * the bounce buffer to meet alignment considerations.
776 		 */
777 
778 		PUT16(ss, REG_INT_STAT, INT_RD);
779 
780 		while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_RD_EN)) {
781 
782 			data = (void *)ss->ss_bounce;
783 			count = ss->ss_blksz;
784 
785 			ASSERT(count > 0);
786 			ASSERT(ss->ss_kvaddr != NULL);
787 
788 			while (count >= sizeof (uint32_t)) {
789 				*(uint32_t *)(void *)data = GETDATA32(ss);
790 				data += sizeof (uint32_t);
791 				count -= sizeof (uint32_t);
792 			}
793 			while (count >= sizeof (uint16_t)) {
794 				*(uint16_t *)(void *)data = GETDATA16(ss);
795 				data += sizeof (uint16_t);
796 				count -= sizeof (uint16_t);
797 			}
798 			while (count >= sizeof (uint8_t)) {
799 				*(uint8_t *)data = GETDATA8(ss);
800 				data += sizeof (uint8_t);
801 				count -= sizeof (uint8_t);
802 			}
803 
804 			bcopy(ss->ss_bounce, ss->ss_kvaddr, ss->ss_blksz);
805 			ss->ss_kvaddr += ss->ss_blksz;
806 			ss->ss_resid--;
807 		}
808 	}
809 
810 	if (intr & INT_WR) {
811 		/*
812 		 * PIO write!  PIO is quite suboptimal, but we expect
813 		 * performance critical applications to use DMA
814 		 * whenever possible.  We have to stage this trhough
815 		 * the bounce buffer to meet alignment considerations.
816 		 */
817 
818 		PUT16(ss, REG_INT_STAT, INT_WR);
819 
820 		while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_WR_EN)) {
821 
822 			data = (void *)ss->ss_bounce;
823 			count = ss->ss_blksz;
824 
825 			ASSERT(count > 0);
826 			ASSERT(ss->ss_kvaddr != NULL);
827 
828 			bcopy(ss->ss_kvaddr, data, count);
829 			while (count >= sizeof (uint32_t)) {
830 				PUTDATA32(ss, *(uint32_t *)(void *)data);
831 				data += sizeof (uint32_t);
832 				count -= sizeof (uint32_t);
833 			}
834 			while (count >= sizeof (uint16_t)) {
835 				PUTDATA16(ss, *(uint16_t *)(void *)data);
836 				data += sizeof (uint16_t);
837 				count -= sizeof (uint16_t);
838 			}
839 			while (count >= sizeof (uint8_t)) {
840 				PUTDATA8(ss, *(uint8_t *)data);
841 				data += sizeof (uint8_t);
842 				count -= sizeof (uint8_t);
843 			}
844 
845 			ss->ss_kvaddr += ss->ss_blksz;
846 			ss->ss_resid--;
847 		}
848 	}
849 
850 	if (intr & INT_XFR) {
851 		PUT16(ss, REG_INT_STAT, INT_XFR);
852 
853 		sdhost_xfer_done(ss, SDA_EOK);
854 	}
855 
856 	if (intr & INT_ERR) {
857 		PUT16(ss, REG_ERR_STAT, errs);
858 		PUT16(ss, REG_INT_STAT, INT_ERR);
859 
860 		if (errs & ERR_DAT) {
861 			if ((errs & ERR_DAT_END) == ERR_DAT_END) {
862 				sdhost_xfer_done(ss, SDA_EPROTO);
863 			} else if ((errs & ERR_DAT_CRC) == ERR_DAT_CRC) {
864 				sdhost_xfer_done(ss, SDA_ECRC7);
865 			} else {
866 				sdhost_xfer_done(ss, SDA_ETIME);
867 			}
868 
869 		} else if (errs & ERR_ACMD12) {
870 			/*
871 			 * Generally, this is bad news.  we need a full
872 			 * reset to recover properly.
873 			 */
874 			sdhost_xfer_done(ss, SDA_ECMD12);
875 		}
876 
877 		/*
878 		 * This asynchronous error leaves the slot more or less
879 		 * useless.  Report it to the framework.
880 		 */
881 		if (errs & ERR_CURRENT) {
882 			sda_host_fault(ss->ss_host, ss->ss_num,
883 			    SDA_FAULT_CURRENT);
884 		}
885 	}
886 
887 	mutex_exit(&ss->ss_lock);
888 
889 	return (DDI_INTR_CLAIMED);
890 }
891 
892 /*ARGSUSED1*/
893 uint_t
894 sdhost_intr(caddr_t arg1, caddr_t arg2)
895 {
896 	sdhost_t	*shp = (void *)arg1;
897 	int		rv = DDI_INTR_UNCLAIMED;
898 	int		num;
899 
900 	/* interrupt for each of the slots present in the system */
901 	for (num = 0; num < shp->sh_numslots; num++) {
902 		if (sdhost_slot_intr(&shp->sh_slots[num]) ==
903 		    DDI_INTR_CLAIMED) {
904 			rv = DDI_INTR_CLAIMED;
905 		}
906 	}
907 	return (rv);
908 }
909 
910 int
911 sdhost_init_slot(dev_info_t *dip, sdhost_t *shp, int num, int bar)
912 {
913 	sdslot_t	*ss;
914 	uint32_t	capab;
915 	uint32_t	clk;
916 
917 	/*
918 	 * Register the private state.
919 	 */
920 	ss = &shp->sh_slots[num];
921 	ss->ss_host = shp->sh_host;
922 	ss->ss_num = num;
923 	sda_host_set_private(shp->sh_host, num, ss);
924 
925 	/*
926 	 * Initialize core data structure, locks, etc.
927 	 */
928 	mutex_init(&ss->ss_lock, NULL, MUTEX_DRIVER,
929 	    DDI_INTR_PRI(shp->sh_ipri));
930 
931 	if (ddi_regs_map_setup(dip, bar, &ss->ss_regva, 0, 0, &sdhost_regattr,
932 	    &ss->ss_acch) != DDI_SUCCESS) {
933 		return (DDI_FAILURE);
934 	}
935 
936 	/* reset before reading capabilities */
937 	if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK)
938 		return (DDI_FAILURE);
939 
940 	capab = GET64(ss, REG_CAPAB) & 0xffffffffU; /* upper bits reserved */
941 	ss->ss_capab = capab;
942 
943 	/* host voltages in OCR format */
944 	ss->ss_ocr = 0;
945 	if (capab & CAPAB_18V)
946 		ss->ss_ocr |= OCR_18_19V;	/* 1.8V */
947 	if (capab & CAPAB_30V)
948 		ss->ss_ocr |= OCR_30_31V;
949 	if (capab & CAPAB_33V)
950 		ss->ss_ocr |= OCR_32_33V;
951 
952 	/* base clock */
953 	ss->ss_baseclk =
954 	    ((capab & CAPAB_BASE_FREQ_MASK) >> CAPAB_BASE_FREQ_SHIFT);
955 	ss->ss_baseclk *= 1000000;
956 
957 	/*
958 	 * Timeout clock.  We can calculate this using the following
959 	 * formula:
960 	 *
961 	 * (1000000 usec/1sec) * (1sec/tmoutclk) * base factor = clock time
962 	 *
963 	 * Clock time is the length of the base clock in usecs.
964 	 *
965 	 * Our base factor is 2^13, which is the shortest clock we
966 	 * can count.
967 	 *
968 	 * To simplify the math and avoid overflow, we cancel out the
969 	 * zeros for kHz or MHz.  Since we want to wait more clocks, not
970 	 * less, on error, we truncate the result rather than rounding
971 	 * up.
972 	 */
973 	clk = ((capab & CAPAB_TIMEOUT_FREQ_MASK) >> CAPAB_TIMEOUT_FREQ_SHIFT);
974 	if ((ss->ss_baseclk == 0) || (clk == 0)) {
975 		cmn_err(CE_WARN, "Unable to determine clock frequencies");
976 		return (DDI_FAILURE);
977 	}
978 
979 	if (capab & CAPAB_TIMEOUT_UNITS) {
980 		/* MHz */
981 		ss->ss_tmusecs = (1 << 13) / clk;
982 		clk *= 1000000;
983 	} else {
984 		/* kHz */
985 		ss->ss_tmusecs = (1000 * (1 << 13)) / clk;
986 		clk *= 1000;
987 	}
988 
989 	/*
990 	 * Calculation of the timeout.
991 	 *
992 	 * SDIO cards use a 1sec timeout, and SDHC cards use fixed
993 	 * 100msec for read and 250 msec for write.
994 	 *
995 	 * Legacy cards running at 375kHz have a worst case of about
996 	 * 15 seconds.  Running at 25MHz (the standard speed) it is
997 	 * about 100msec for read, and about 3.2 sec for write.
998 	 * Typical values are 1/100th that, or about 1msec for read,
999 	 * and 32 msec for write.
1000 	 *
1001 	 * No transaction at full speed should ever take more than 4
1002 	 * seconds.  (Some slow legacy cards might have trouble, but
1003 	 * we'll worry about them if they ever are seen.  Nobody wants
1004 	 * to wait 4 seconds to access a single block anyway!)
1005 	 *
1006 	 * To get to 4 seconds, we continuously double usec until we
1007 	 * get to the maximum value, or a timeout greater than 4
1008 	 * seconds.
1009 	 *
1010 	 * Note that for high-speed timeout clocks, we might not be
1011 	 * able to get to the full 4 seconds.  E.g. with a 48MHz
1012 	 * timeout clock, we can only get to about 2.8 seconds.  Its
1013 	 * possible that there could be some slow MMC cards that will
1014 	 * timeout at this clock rate, but it seems unlikely.  (The
1015 	 * device would have to be pressing the very worst times,
1016 	 * against the 100-fold "permissive" window allowed, and
1017 	 * running at only 12.5MHz.)
1018 	 *
1019 	 * XXX: this could easily be a tunable.  Someone dealing with only
1020 	 * reasonable cards could set this to just 1 second.
1021 	 */
1022 	for (ss->ss_tmoutclk = 0; ss->ss_tmoutclk < 14; ss->ss_tmoutclk++) {
1023 		if ((ss->ss_tmusecs * (1 << ss->ss_tmoutclk)) >= 4000000) {
1024 			break;
1025 		}
1026 	}
1027 
1028 	/*
1029 	 * Enable slot interrupts.
1030 	 */
1031 	sdhost_enable_interrupts(ss);
1032 
1033 	return (DDI_SUCCESS);
1034 }
1035 
1036 void
1037 sdhost_uninit_slot(sdhost_t *shp, int num)
1038 {
1039 	sdslot_t	*ss;
1040 
1041 	ss = &shp->sh_slots[num];
1042 	if (ss->ss_acch == NULL)
1043 		return;
1044 
1045 	(void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
1046 
1047 	ddi_regs_map_free(&ss->ss_acch);
1048 	mutex_destroy(&ss->ss_lock);
1049 }
1050 
1051 void
1052 sdhost_get_response(sdslot_t *ss, sda_cmd_t *cmdp)
1053 {
1054 	uint32_t	*resp = cmdp->sc_response;
1055 	int		i;
1056 
1057 	resp[0] = GET32(ss, REG_RESP1);
1058 	resp[1] = GET32(ss, REG_RESP2);
1059 	resp[2] = GET32(ss, REG_RESP3);
1060 	resp[3] = GET32(ss, REG_RESP4);
1061 
1062 	/*
1063 	 * Response 2 is goofy because the host drops the low
1064 	 * order CRC bits.  This makes it a bit awkward, so we
1065 	 * have to shift the bits to make it work out right.
1066 	 *
1067 	 * Note that the framework expects the 32 bit
1068 	 * words to be ordered in LE fashion.  (The
1069 	 * bits within the words are in native order).
1070 	 */
1071 	if (cmdp->sc_rtype == R2) {
1072 		for (i = 3; i > 0; i--) {
1073 			resp[i] <<= 8;
1074 			resp[i] |= (resp[i - 1] >> 24);
1075 		}
1076 		resp[0] <<= 8;
1077 	}
1078 }
1079 
1080 sda_err_t
1081 sdhost_wait_cmd(sdslot_t *ss, sda_cmd_t *cmdp)
1082 {
1083 	int		i;
1084 	uint16_t	errs;
1085 
1086 	/*
1087 	 * Worst case for 100kHz timeout is 2msec (200 clocks), we add
1088 	 * a tiny bit for safety.  (Generally timeout will be far, far
1089 	 * less than that.)
1090 	 *
1091 	 * Note that at more typical 12MHz (and normally it will be
1092 	 * even faster than that!) that the device timeout is only
1093 	 * 16.67 usec.  We could be smarter and reduce the delay time,
1094 	 * but that would require putting more intelligence into the
1095 	 * code, and we don't expect CMD timeout to normally occur
1096 	 * except during initialization.  (At which time we need the
1097 	 * full timeout anyway.)
1098 	 *
1099 	 * Checking the ERR_STAT will normally cause the timeout to
1100 	 * terminate to finish early if the device is healthy, anyway.
1101 	 */
1102 
1103 	for (i = 3000; i > 0; i -= 5) {
1104 		if (GET16(ss, REG_INT_STAT) & INT_CMD) {
1105 
1106 			PUT16(ss, REG_INT_STAT, INT_CMD);
1107 
1108 			/* command completed */
1109 			sdhost_get_response(ss, cmdp);
1110 			return (SDA_EOK);
1111 		}
1112 
1113 		if ((errs = (GET16(ss, REG_ERR_STAT) & ERR_CMD)) != 0) {
1114 			PUT16(ss, REG_ERR_STAT, errs);
1115 
1116 			/* command timeout isn't a host failure */
1117 			if ((errs & ERR_CMD_TMO) == ERR_CMD_TMO) {
1118 				return (SDA_ETIME);
1119 			}
1120 
1121 			if ((errs & ERR_CMD_CRC) == ERR_CMD_CRC) {
1122 				return (SDA_ECRC7);
1123 			} else {
1124 				return (SDA_EPROTO);
1125 			}
1126 		}
1127 
1128 		drv_usecwait(5);
1129 	}
1130 
1131 	return (SDA_ETIME);
1132 }
1133 
1134 sda_err_t
1135 sdhost_poll(void *arg)
1136 {
1137 	sdslot_t	*ss = arg;
1138 
1139 	(void) sdhost_slot_intr(ss);
1140 	return (SDA_EOK);
1141 }
1142 
1143 sda_err_t
1144 sdhost_cmd(void *arg, sda_cmd_t *cmdp)
1145 {
1146 	sdslot_t	*ss = arg;
1147 	uint16_t	command;
1148 	uint16_t	mode;
1149 	sda_err_t	rv;
1150 
1151 	/*
1152 	 * Command register:
1153 	 * bit 13-8	= command index
1154 	 * bit 7-6	= command type (always zero for us!)
1155 	 * bit 5	= data present select
1156 	 * bit 4	= command index check (always on!)
1157 	 * bit 3	= command CRC check enable
1158 	 * bit 2	= reserved
1159 	 * bit 1-0	= response type
1160 	 */
1161 
1162 	command = ((uint16_t)cmdp->sc_index << 8);
1163 	command |= COMMAND_TYPE_NORM |
1164 	    COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN;
1165 
1166 	switch (cmdp->sc_rtype) {
1167 	case R0:
1168 		command |= COMMAND_RESP_NONE;
1169 		break;
1170 	case R1:
1171 	case R5:
1172 	case R6:
1173 	case R7:
1174 		command |= COMMAND_RESP_48;
1175 		break;
1176 	case R1b:
1177 	case R5b:
1178 		command |= COMMAND_RESP_48_BUSY;
1179 		break;
1180 	case R2:
1181 		command |= COMMAND_RESP_136;
1182 		command &= ~(COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN);
1183 		break;
1184 	case R3:
1185 	case R4:
1186 		command |= COMMAND_RESP_48;
1187 		command &= ~COMMAND_CRC_CHECK_EN;
1188 		command &= ~COMMAND_INDEX_CHECK_EN;
1189 		break;
1190 	default:
1191 		return (SDA_EINVAL);
1192 	}
1193 
1194 	mutex_enter(&ss->ss_lock);
1195 	if (ss->ss_suspended) {
1196 		mutex_exit(&ss->ss_lock);
1197 		return (SDA_ESUSPENDED);
1198 	}
1199 
1200 	if (cmdp->sc_nblks != 0) {
1201 		uint16_t	blksz;
1202 		uint16_t	nblks;
1203 
1204 		blksz = cmdp->sc_blksz;
1205 		nblks = cmdp->sc_nblks;
1206 
1207 		/*
1208 		 * Ensure that we have good data.
1209 		 */
1210 		if ((blksz < 1) || (blksz > 2048)) {
1211 			mutex_exit(&ss->ss_lock);
1212 			return (SDA_EINVAL);
1213 		}
1214 		command |= COMMAND_DATA_PRESENT;
1215 
1216 		ss->ss_blksz = blksz;
1217 
1218 		/*
1219 		 * Only SDMA for now.  We can investigate ADMA2 later.
1220 		 * (Right now we don't have ADMA2 capable hardware.)
1221 		 */
1222 		if (((ss->ss_capab & CAPAB_SDMA) != 0) &&
1223 		    (cmdp->sc_ndmac != 0)) {
1224 			ddi_dma_cookie_t	*dmacs = cmdp->sc_dmacs;
1225 
1226 			ASSERT(dmacs != NULL);
1227 
1228 			ss->ss_kvaddr = NULL;
1229 			ss->ss_resid = 0;
1230 			ss->ss_dmacs = dmacs;
1231 			ss->ss_ndmac = cmdp->sc_ndmac - 1;
1232 
1233 			PUT32(ss, REG_SDMA_ADDR, dmacs->dmac_address);
1234 			mode = XFR_MODE_DMA_EN;
1235 			PUT16(ss, REG_BLKSZ, blksz);
1236 
1237 		} else {
1238 			ss->ss_kvaddr = (void *)cmdp->sc_kvaddr;
1239 			ss->ss_resid = nblks;
1240 			ss->ss_dmacs = NULL;
1241 			ss->ss_ndmac = 0;
1242 			mode = 0;
1243 			PUT16(ss, REG_BLKSZ, blksz);
1244 		}
1245 
1246 		if (nblks > 1) {
1247 			mode |= XFR_MODE_MULTI | XFR_MODE_COUNT;
1248 			if (cmdp->sc_flags & SDA_CMDF_AUTO_CMD12)
1249 				mode |= XFR_MODE_AUTO_CMD12;
1250 		}
1251 		if ((cmdp->sc_flags & SDA_CMDF_READ) != 0) {
1252 			mode |= XFR_MODE_READ;
1253 		}
1254 
1255 		ss->ss_mode = mode;
1256 
1257 		PUT8(ss, REG_TIMEOUT_CONTROL, ss->ss_tmoutclk);
1258 		PUT16(ss, REG_BLOCK_COUNT, nblks);
1259 		PUT16(ss, REG_XFR_MODE, mode);
1260 	}
1261 
1262 	PUT32(ss, REG_ARGUMENT, cmdp->sc_argument);
1263 	PUT16(ss, REG_COMMAND, command);
1264 
1265 	rv = sdhost_wait_cmd(ss, cmdp);
1266 
1267 	mutex_exit(&ss->ss_lock);
1268 
1269 	return (rv);
1270 }
1271 
1272 sda_err_t
1273 sdhost_getprop(void *arg, sda_prop_t prop, uint32_t *val)
1274 {
1275 	sdslot_t	*ss = arg;
1276 	sda_err_t	rv = 0;
1277 
1278 	mutex_enter(&ss->ss_lock);
1279 
1280 	if (ss->ss_suspended) {
1281 		mutex_exit(&ss->ss_lock);
1282 		return (SDA_ESUSPENDED);
1283 	}
1284 
1285 	switch (prop) {
1286 	case SDA_PROP_INSERTED:
1287 		if (CHECK_STATE(ss, CARD_INSERTED)) {
1288 			*val = B_TRUE;
1289 		} else {
1290 			*val = B_FALSE;
1291 		}
1292 		break;
1293 
1294 	case SDA_PROP_WPROTECT:
1295 		if (CHECK_STATE(ss, WRITE_ENABLE)) {
1296 			*val = B_FALSE;
1297 		} else {
1298 			*val = B_TRUE;
1299 		}
1300 		break;
1301 
1302 	case SDA_PROP_OCR:
1303 		*val = ss->ss_ocr;
1304 		break;
1305 
1306 	case SDA_PROP_CLOCK:
1307 		*val = ss->ss_cardclk;
1308 		break;
1309 
1310 	case SDA_PROP_CAP_HISPEED:
1311 		if ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0) {
1312 			*val = B_TRUE;
1313 		} else {
1314 			*val = B_FALSE;
1315 		}
1316 		break;
1317 
1318 	case SDA_PROP_CAP_4BITS:
1319 		*val = B_TRUE;
1320 		break;
1321 
1322 	case SDA_PROP_CAP_NOPIO:
1323 		if ((ss->ss_capab & CAPAB_SDMA) != 0) {
1324 			*val = B_TRUE;
1325 		} else {
1326 			*val = B_FALSE;
1327 		}
1328 		break;
1329 
1330 	case SDA_PROP_CAP_INTR:
1331 	case SDA_PROP_CAP_8BITS:
1332 		*val = B_FALSE;
1333 		break;
1334 
1335 	default:
1336 		rv = SDA_ENOTSUP;
1337 		break;
1338 	}
1339 	mutex_exit(&ss->ss_lock);
1340 
1341 	return (rv);
1342 }
1343 
1344 sda_err_t
1345 sdhost_setprop(void *arg, sda_prop_t prop, uint32_t val)
1346 {
1347 	sdslot_t	*ss = arg;
1348 	sda_err_t	rv = SDA_EOK;
1349 
1350 	mutex_enter(&ss->ss_lock);
1351 
1352 	if (ss->ss_suspended) {
1353 		mutex_exit(&ss->ss_lock);
1354 		return (SDA_ESUSPENDED);
1355 	}
1356 
1357 	switch (prop) {
1358 	case SDA_PROP_LED:
1359 		if (val) {
1360 			SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON);
1361 		} else {
1362 			CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON);
1363 		}
1364 		break;
1365 
1366 	case SDA_PROP_CLOCK:
1367 		rv = sdhost_set_clock(arg, val);
1368 		break;
1369 
1370 	case SDA_PROP_BUSWIDTH:
1371 		switch (val) {
1372 		case 1:
1373 			CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH);
1374 			break;
1375 		case 4:
1376 			SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH);
1377 			break;
1378 		default:
1379 			rv = SDA_EINVAL;
1380 		}
1381 		break;
1382 
1383 	case SDA_PROP_OCR:
1384 		val &= ss->ss_ocr;
1385 
1386 		if (val & OCR_17_18V) {
1387 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V);
1388 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V |
1389 			    POWER_CONTROL_BUS_POWER);
1390 		} else if (val & OCR_29_30V) {
1391 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V);
1392 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V |
1393 			    POWER_CONTROL_BUS_POWER);
1394 		} else if (val & OCR_32_33V) {
1395 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V);
1396 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V |
1397 			    POWER_CONTROL_BUS_POWER);
1398 		} else if (val == 0) {
1399 			/* turn off power */
1400 			PUT8(ss, REG_POWER_CONTROL, 0);
1401 		} else {
1402 			rv = SDA_EINVAL;
1403 		}
1404 		break;
1405 
1406 	case SDA_PROP_HISPEED:
1407 		if (val) {
1408 			SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
1409 		} else {
1410 			CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
1411 		}
1412 		/* give clocks time to settle */
1413 		drv_usecwait(10);
1414 		break;
1415 
1416 	default:
1417 		rv = SDA_ENOTSUP;
1418 		break;
1419 	}
1420 
1421 	/*
1422 	 * Apparently some controllers (ENE) have issues with changing
1423 	 * certain parameters (bus width seems to be one), requiring
1424 	 * a reset of the DAT and CMD lines.
1425 	 */
1426 	if (rv == SDA_EOK) {
1427 		(void) sdhost_soft_reset(ss, SOFT_RESET_CMD);
1428 		(void) sdhost_soft_reset(ss, SOFT_RESET_DAT);
1429 	}
1430 	mutex_exit(&ss->ss_lock);
1431 	return (rv);
1432 }
1433 
1434 sda_err_t
1435 sdhost_reset(void *arg)
1436 {
1437 	sdslot_t	*ss = arg;
1438 
1439 	mutex_enter(&ss->ss_lock);
1440 	if (!ss->ss_suspended) {
1441 		if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) {
1442 			mutex_exit(&ss->ss_lock);
1443 			return (SDA_ETIME);
1444 		}
1445 		sdhost_enable_interrupts(ss);
1446 	}
1447 	mutex_exit(&ss->ss_lock);
1448 	return (SDA_EOK);
1449 }
1450 
1451 sda_err_t
1452 sdhost_halt(void *arg)
1453 {
1454 	sdslot_t	*ss = arg;
1455 
1456 	mutex_enter(&ss->ss_lock);
1457 	if (!ss->ss_suspended) {
1458 		sdhost_disable_interrupts(ss);
1459 		/* this has the side effect of removing power from the card */
1460 		if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) {
1461 			mutex_exit(&ss->ss_lock);
1462 			return (SDA_ETIME);
1463 		}
1464 	}
1465 	mutex_exit(&ss->ss_lock);
1466 	return (SDA_EOK);
1467 }
1468