xref: /illumos-gate/usr/src/uts/sun/io/dada/targets/dad.c (revision 033fe550)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 
27 /*
28  * Direct Attached  disk driver for SPARC machines.
29  */
30 
31 /*
32  * Includes, Declarations and Local Data
33  */
34 #include <sys/dada/dada.h>
35 #include <sys/dkbad.h>
36 #include <sys/dklabel.h>
37 #include <sys/dkio.h>
38 #include <sys/cdio.h>
39 #include <sys/vtoc.h>
40 #include <sys/dada/targets/daddef.h>
41 #include <sys/dada/targets/dadpriv.h>
42 #include <sys/file.h>
43 #include <sys/stat.h>
44 #include <sys/kstat.h>
45 #include <sys/vtrace.h>
46 #include <sys/aio_req.h>
47 #include <sys/note.h>
48 #include <sys/cmlb.h>
49 
50 /*
51  * Global Error Levels for Error Reporting
52  */
53 int dcd_error_level	= DCD_ERR_RETRYABLE;
54 /*
55  * Local Static Data
56  */
57 
58 static int dcd_io_time		= DCD_IO_TIME;
59 static int dcd_retry_count	= DCD_RETRY_COUNT;
60 #ifndef lint
61 static int dcd_report_pfa = 1;
62 #endif
63 static int dcd_rot_delay = 4;
64 static int dcd_poll_busycnt = DCD_POLL_TIMEOUT;
65 
66 /*
67  * Local Function Prototypes
68  */
69 
70 static int dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p);
71 static int dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p);
72 static int dcdstrategy(struct buf *bp);
73 static int dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
74 static int dcdioctl(dev_t, int, intptr_t, int, cred_t *, int *);
75 static int dcdread(dev_t dev, struct uio *uio, cred_t *cred_p);
76 static int dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p);
77 static int dcd_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int,
78     char *, caddr_t, int *);
79 static int dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
80 static int dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
81 
82 
83 static void dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi);
84 static int dcd_doattach(dev_info_t *devi, int (*f)());
85 static int dcd_validate_geometry(struct dcd_disk *un);
86 static ddi_devid_t dcd_get_devid(struct dcd_disk *un);
87 static ddi_devid_t  dcd_create_devid(struct dcd_disk *un);
88 static int dcd_make_devid_from_serial(struct dcd_disk *un);
89 static void dcd_validate_model_serial(char *str, int *retlen, int totallen);
90 static int dcd_read_deviceid(struct dcd_disk *un);
91 static int dcd_write_deviceid(struct dcd_disk *un);
92 static int dcd_poll(struct dcd_pkt *pkt);
93 static char *dcd_rname(int reason);
94 static void dcd_flush_cache(struct dcd_disk *un);
95 
96 static int dcd_compute_dk_capacity(struct dcd_device *devp,
97     diskaddr_t *capacity);
98 static int dcd_send_lb_rw_cmd(dev_info_t *devinfo, void *bufaddr,
99     diskaddr_t start_block, size_t reqlength, uchar_t cmd);
100 
101 static void dcdmin(struct buf *bp);
102 
103 static int dcdioctl_cmd(dev_t, struct udcd_cmd *,
104     enum uio_seg, enum uio_seg);
105 
106 static void dcdstart(struct dcd_disk *un);
107 static void dcddone_and_mutex_exit(struct dcd_disk *un, struct buf *bp);
108 static void make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*f)());
109 static void dcdudcdmin(struct buf *bp);
110 
111 static int dcdrunout(caddr_t);
112 static int dcd_check_wp(dev_t dev);
113 static int dcd_unit_ready(dev_t dev);
114 static void dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp,
115     struct dcd_disk *un);
116 static void dcdintr(struct dcd_pkt *pkt);
117 static int dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp);
118 static void dcd_offline(struct dcd_disk *un, int bechatty);
119 static int dcd_ready_and_valid(dev_t dev, struct dcd_disk *un);
120 static void dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt);
121 static void dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp);
122 static int dcdflushdone(struct buf *bp);
123 
124 /* Function prototypes for cmlb */
125 
126 static int dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
127     diskaddr_t start_block, size_t reqlength, void *tg_cookie);
128 
129 static int dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp);
130 static int dcd_lb_getinfo(dev_info_t *devi, int cmd, void *arg,
131     void *tg_cookie);
132 
133 
134 static cmlb_tg_ops_t dcd_lb_ops = {
135 	TG_DK_OPS_VERSION_1,
136 	dcd_lb_rdwr,
137 	dcd_lb_getinfo
138 };
139 
140 /*
141  * Error and Logging Functions
142  */
143 #ifndef lint
144 static void clean_print(dev_info_t *dev, char *label, uint_t level,
145     char *title, char *data, int len);
146 static void dcdrestart(void *arg);
147 #endif /* lint */
148 
149 static int dcd_check_error(struct dcd_disk *un, struct buf *bp);
150 
151 /*
152  * Error statistics create/update functions
153  */
154 static int dcd_create_errstats(struct dcd_disk *, int);
155 
156 
157 
158 /*PRINTFLIKE4*/
159 extern void dcd_log(dev_info_t *, char *, uint_t, const char *, ...)
160     __KPRINTFLIKE(4);
161 extern void makecommand(struct dcd_pkt *, int, uchar_t, uint32_t,
162     uchar_t, uint32_t, uchar_t, uchar_t);
163 
164 
165 /*
166  * Configuration Routines
167  */
168 static int dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
169     void **result);
170 static int dcdprobe(dev_info_t *devi);
171 static int dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
172 static int dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
173 static int dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd);
174 static int dcd_dr_detach(dev_info_t *devi);
175 static int dcdpower(dev_info_t *devi, int component, int level);
176 
177 static void *dcd_state;
178 static int dcd_max_instance;
179 static char *dcd_label = "dad";
180 
181 static char *diskokay = "disk okay\n";
182 
183 #if DEBUG || lint
184 #define	DCDDEBUG
185 #endif
186 
187 int dcd_test_flag = 0;
188 /*
189  * Debugging macros
190  */
191 #ifdef	DCDDEBUG
192 static int dcddebug = 0;
193 #define	DEBUGGING	(dcddebug > 1)
194 #define	DAD_DEBUG	if (dcddebug == 1) dcd_log
195 #define	DAD_DEBUG2	if (dcddebug > 1) dcd_log
196 #else	/* DCDDEBUG */
197 #define	dcddebug		(0)
198 #define	DEBUGGING	(0)
199 #define	DAD_DEBUG	if (0) dcd_log
200 #define	DAD_DEBUG2	if (0) dcd_log
201 #endif
202 
203 /*
204  * we use pkt_private area for storing bp and retry_count
205  * XXX: Really is this usefull.
206  */
207 struct dcd_pkt_private {
208 	struct buf	*dcdpp_bp;
209 	short		 dcdpp_retry_count;
210 	short		 dcdpp_victim_retry_count;
211 };
212 
213 
214 _NOTE(SCHEME_PROTECTS_DATA("Unique per pkt", dcd_pkt_private buf))
215 
216 #define	PP_LEN	(sizeof (struct dcd_pkt_private))
217 
218 #define	PKT_SET_BP(pkt, bp)	\
219 	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp = bp
220 #define	PKT_GET_BP(pkt) \
221 	(((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp)
222 
223 
224 #define	PKT_SET_RETRY_CNT(pkt, n) \
225 	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count = n
226 
227 #define	PKT_GET_RETRY_CNT(pkt) \
228 	(((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count)
229 
230 #define	PKT_INCR_RETRY_CNT(pkt, n) \
231 	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count += n
232 
233 #define	PKT_SET_VICTIM_RETRY_CNT(pkt, n) \
234 	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
235 			= n
236 
237 #define	PKT_GET_VICTIM_RETRY_CNT(pkt) \
238 	(((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count)
239 #define	PKT_INCR_VICTIM_RETRY_CNT(pkt, n) \
240 	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
241 			+= n
242 
243 #define	DISK_NOT_READY_RETRY_COUNT	(dcd_retry_count / 2)
244 
245 
246 /*
247  * Urk!
248  */
249 #define	SET_BP_ERROR(bp, err)	\
250 	bioerror(bp, err);
251 
252 #define	IOSP			KSTAT_IO_PTR(un->un_stats)
253 #define	IO_PARTITION_STATS	un->un_pstats[DCDPART(bp->b_edev)]
254 #define	IOSP_PARTITION		KSTAT_IO_PTR(IO_PARTITION_STATS)
255 
256 #define	DCD_DO_KSTATS(un, kstat_function, bp) \
257 	ASSERT(mutex_owned(DCD_MUTEX)); \
258 	if (bp != un->un_sbufp) { \
259 		if (un->un_stats) { \
260 			kstat_function(IOSP); \
261 		} \
262 		if (IO_PARTITION_STATS) { \
263 			kstat_function(IOSP_PARTITION); \
264 		} \
265 	}
266 
267 #define	DCD_DO_ERRSTATS(un, x) \
268 	if (un->un_errstats) { \
269 		struct dcd_errstats *dtp; \
270 		dtp = (struct dcd_errstats *)un->un_errstats->ks_data; \
271 		dtp->x.value.ui32++; \
272 	}
273 
274 #define	GET_SOFT_STATE(dev)						\
275 	struct dcd_disk *un;					\
276 	int instance, part;					\
277 	minor_t minor = getminor(dev);				\
278 									\
279 	part = minor & DCDPART_MASK;					\
280 	instance = minor >> DCDUNIT_SHIFT;				\
281 	if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL)	\
282 		return (ENXIO);
283 
284 #define	LOGICAL_BLOCK_ALIGN(blkno, blknoshift) \
285 		(((blkno) & ((1 << (blknoshift)) - 1)) == 0)
286 
287 /*
288  * After the following number of sectors, the cylinder number spills over
289  * 0xFFFF if sectors = 63 and heads = 16.
290  */
291 #define	NUM_SECTORS_32G	0x3EFFC10
292 
293 /*
294  * Configuration Data
295  */
296 
297 /*
298  * Device driver ops vector
299  */
300 
301 static struct cb_ops dcd_cb_ops = {
302 	dcdopen,		/* open */
303 	dcdclose,		/* close */
304 	dcdstrategy,		/* strategy */
305 	nodev,			/* print */
306 	dcddump,		/* dump */
307 	dcdread,		/* read */
308 	dcdwrite,		/* write */
309 	dcdioctl,		/* ioctl */
310 	nodev,			/* devmap */
311 	nodev,			/* mmap */
312 	nodev,			/* segmap */
313 	nochpoll,		/* poll */
314 	dcd_prop_op,		/* cb_prop_op */
315 	0,			/* streamtab  */
316 	D_64BIT | D_MP | D_NEW,	/* Driver compatibility flag */
317 	CB_REV,			/* cb_rev */
318 	dcdaread,		/* async I/O read entry point */
319 	dcdawrite		/* async I/O write entry point */
320 };
321 
322 static struct dev_ops dcd_ops = {
323 	DEVO_REV,		/* devo_rev, */
324 	0,			/* refcnt  */
325 	dcdinfo,		/* info */
326 	nulldev,		/* identify */
327 	dcdprobe,		/* probe */
328 	dcdattach,		/* attach */
329 	dcddetach,		/* detach */
330 	dcdreset,		/* reset */
331 	&dcd_cb_ops,		/* driver operations */
332 	(struct bus_ops *)0,	/* bus operations */
333 	dcdpower,		/* power */
334 	ddi_quiesce_not_supported,	/* devo_quiesce */
335 };
336 
337 
338 /*
339  * This is the loadable module wrapper.
340  */
341 #include <sys/modctl.h>
342 
343 static struct modldrv modldrv = {
344 	&mod_driverops,		/* Type of module. This one is a driver */
345 	"DAD Disk Driver",	/* Name of the module. */
346 	&dcd_ops,	/* driver ops */
347 };
348 
349 
350 
351 static struct modlinkage modlinkage = {
352 	MODREV_1, &modldrv, NULL
353 };
354 
355 /*
356  * the dcd_attach_mutex only protects dcd_max_instance in multi-threaded
357  * attach situations
358  */
359 static kmutex_t dcd_attach_mutex;
360 
361 int
_init(void)362 _init(void)
363 {
364 	int e;
365 
366 	if ((e = ddi_soft_state_init(&dcd_state, sizeof (struct dcd_disk),
367 	    DCD_MAXUNIT)) != 0)
368 		return (e);
369 
370 	mutex_init(&dcd_attach_mutex, NULL, MUTEX_DRIVER, NULL);
371 	e = mod_install(&modlinkage);
372 	if (e != 0) {
373 		mutex_destroy(&dcd_attach_mutex);
374 		ddi_soft_state_fini(&dcd_state);
375 		return (e);
376 	}
377 
378 	return (e);
379 }
380 
381 int
_fini(void)382 _fini(void)
383 {
384 	int e;
385 
386 	if ((e = mod_remove(&modlinkage)) != 0)
387 		return (e);
388 
389 	ddi_soft_state_fini(&dcd_state);
390 	mutex_destroy(&dcd_attach_mutex);
391 
392 	return (e);
393 }
394 
395 int
_info(struct modinfo * modinfop)396 _info(struct modinfo *modinfop)
397 {
398 
399 	return (mod_info(&modlinkage, modinfop));
400 }
401 
402 static int
dcdprobe(dev_info_t * devi)403 dcdprobe(dev_info_t *devi)
404 {
405 	struct dcd_device *devp;
406 	int rval = DDI_PROBE_PARTIAL;
407 	int instance;
408 
409 	devp = ddi_get_driver_private(devi);
410 	instance = ddi_get_instance(devi);
411 
412 	/*
413 	 * Keep a count of how many disks (ie. highest instance no) we have
414 	 * XXX currently not used but maybe useful later again
415 	 */
416 	mutex_enter(&dcd_attach_mutex);
417 	if (instance > dcd_max_instance)
418 		dcd_max_instance = instance;
419 	mutex_exit(&dcd_attach_mutex);
420 
421 	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "dcdprobe:\n");
422 
423 	if (ddi_get_soft_state(dcd_state, instance) != NULL)
424 		return (DDI_PROBE_PARTIAL);
425 
426 	/*
427 	 * Turn around and call utility probe routine
428 	 * to see whether we actually have a disk at
429 	 */
430 
431 	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
432 	    "dcdprobe: %x\n", dcd_probe(devp, NULL_FUNC));
433 
434 	switch (dcd_probe(devp, NULL_FUNC)) {
435 	default:
436 	case DCDPROBE_NORESP:
437 	case DCDPROBE_NONCCS:
438 	case DCDPROBE_NOMEM:
439 	case DCDPROBE_FAILURE:
440 	case DCDPROBE_BUSY:
441 		break;
442 
443 	case DCDPROBE_EXISTS:
444 		/*
445 		 * Check whether it is a ATA device and then
446 		 * return  SUCCESS.
447 		 */
448 		DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
449 		    "config %x\n", devp->dcd_ident->dcd_config);
450 		if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
451 			if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
452 				rval = DDI_PROBE_SUCCESS;
453 			} else
454 				rval = DDI_PROBE_FAILURE;
455 		} else {
456 			rval = DDI_PROBE_FAILURE;
457 		}
458 		break;
459 	}
460 	dcd_unprobe(devp);
461 
462 	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
463 	    "dcdprobe returns %x\n", rval);
464 
465 	return (rval);
466 }
467 
468 
469 /*ARGSUSED*/
470 static int
dcdattach(dev_info_t * devi,ddi_attach_cmd_t cmd)471 dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
472 {
473 	int instance, rval;
474 	struct dcd_device *devp;
475 	struct dcd_disk *un;
476 	struct diskhd *dp;
477 	char	*pm_comp[] =
478 	    { "NAME=ide-disk", "0=standby", "1=idle", "2=active" };
479 
480 	/* CONSTCOND */
481 	ASSERT(NO_COMPETING_THREADS);
482 
483 
484 	devp = ddi_get_driver_private(devi);
485 	instance = ddi_get_instance(devi);
486 	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "Attach Started\n");
487 
488 	switch (cmd) {
489 	case DDI_ATTACH:
490 		break;
491 
492 	case DDI_RESUME:
493 		if (!(un = ddi_get_soft_state(dcd_state, instance)))
494 			return (DDI_FAILURE);
495 		mutex_enter(DCD_MUTEX);
496 		Restore_state(un);
497 		/*
498 		 * Restore the state which was saved to give the
499 		 * the right state in un_last_state
500 		 */
501 		un->un_last_state = un->un_save_state;
502 		un->un_throttle = 2;
503 		cv_broadcast(&un->un_suspend_cv);
504 		/*
505 		 * Raise the power level of the device to active.
506 		 */
507 		mutex_exit(DCD_MUTEX);
508 		(void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE);
509 		mutex_enter(DCD_MUTEX);
510 
511 		/*
512 		 * start unit - if this is a low-activity device
513 		 * commands in queue will have to wait until new
514 		 * commands come in, which may take awhile.
515 		 * Also, we specifically don't check un_ncmds
516 		 * because we know that there really are no
517 		 * commands in progress after the unit was suspended
518 		 * and we could have reached the throttle level, been
519 		 * suspended, and have no new commands coming in for
520 		 * awhile.  Highly unlikely, but so is the low-
521 		 * activity disk scenario.
522 		 */
523 		dp = &un->un_utab;
524 		if (dp->b_actf && (dp->b_forw == NULL)) {
525 			dcdstart(un);
526 		}
527 
528 		mutex_exit(DCD_MUTEX);
529 		return (DDI_SUCCESS);
530 
531 	default:
532 		return (DDI_FAILURE);
533 	}
534 
535 	if (dcd_doattach(devi, SLEEP_FUNC) == DDI_FAILURE) {
536 		return (DDI_FAILURE);
537 	}
538 
539 	if (!(un = (struct dcd_disk *)
540 	    ddi_get_soft_state(dcd_state, instance))) {
541 		return (DDI_FAILURE);
542 	}
543 	devp->dcd_private = (ataopaque_t)un;
544 
545 	/*
546 	 * Add a zero-length attribute to tell the world we support
547 	 * kernel ioctls (for layered drivers)
548 	 */
549 	(void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
550 	    DDI_KERNEL_IOCTL, NULL, 0);
551 
552 	/*
553 	 * Since the dad device does not have the 'reg' property,
554 	 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
555 	 * The following code is to tell cpr that this device
556 	 * does need to be suspended and resumed.
557 	 */
558 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
559 	    "pm-hardware-state", (caddr_t)"needs-suspend-resume");
560 
561 	/*
562 	 * Initialize power management bookkeeping;
563 	 * Create components - In IDE case there are 3 levels and one
564 	 * component. The levels being - active, idle, standby.
565 	 */
566 
567 	rval = ddi_prop_update_string_array(DDI_DEV_T_NONE,
568 	    devi, "pm-components", pm_comp, 4);
569 	if (rval == DDI_PROP_SUCCESS) {
570 		/*
571 		 * Ignore the return value of pm_raise_power
572 		 * Even if we check the return values and
573 		 * remove the property created above, PM
574 		 * framework will not honour the change after
575 		 * first call to pm_raise_power. Hence, the
576 		 * removal of that property does not help if
577 		 * pm_raise_power fails.
578 		 */
579 		(void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE);
580 	}
581 
582 	ddi_report_dev(devi);
583 
584 	cmlb_alloc_handle(&un->un_dklbhandle);
585 
586 	if (cmlb_attach(devi,
587 	    &dcd_lb_ops,
588 	    0,
589 	    B_FALSE,
590 	    B_FALSE,
591 	    DDI_NT_BLOCK_CHAN,
592 	    CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8,
593 	    un->un_dklbhandle,
594 	    0) != 0) {
595 		cmlb_free_handle(&un->un_dklbhandle);
596 		dcd_free_softstate(un, devi);
597 		return (DDI_FAILURE);
598 	}
599 
600 	mutex_enter(DCD_MUTEX);
601 	(void) dcd_validate_geometry(un);
602 
603 	/* Get devid; create a devid ONLY IF could not get ID */
604 	if (dcd_get_devid(un) == NULL) {
605 		/* Create the fab'd devid */
606 		(void) dcd_create_devid(un);
607 	}
608 	mutex_exit(DCD_MUTEX);
609 
610 	return (DDI_SUCCESS);
611 }
612 
613 static void
dcd_free_softstate(struct dcd_disk * un,dev_info_t * devi)614 dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi)
615 {
616 	struct dcd_device		*devp;
617 	int instance = ddi_get_instance(devi);
618 
619 	devp = ddi_get_driver_private(devi);
620 
621 	if (un) {
622 		sema_destroy(&un->un_semoclose);
623 		cv_destroy(&un->un_sbuf_cv);
624 		cv_destroy(&un->un_state_cv);
625 		cv_destroy(&un->un_disk_busy_cv);
626 		cv_destroy(&un->un_suspend_cv);
627 
628 		/*
629 		 * Deallocate command packet resources.
630 		 */
631 		if (un->un_sbufp)
632 			freerbuf(un->un_sbufp);
633 		if (un->un_dp) {
634 			kmem_free((caddr_t)un->un_dp, sizeof (*un->un_dp));
635 		}
636 		/*
637 		 * Unregister the devid and free devid resources allocated
638 		 */
639 		ddi_devid_unregister(DCD_DEVINFO);
640 		if (un->un_devid) {
641 			ddi_devid_free(un->un_devid);
642 			un->un_devid = NULL;
643 		}
644 
645 		/*
646 		 * Delete kstats. Kstats for non CD devices are deleted
647 		 * in dcdclose.
648 		 */
649 		if (un->un_stats) {
650 			kstat_delete(un->un_stats);
651 		}
652 
653 	}
654 
655 	/*
656 	 * Cleanup scsi_device resources.
657 	 */
658 	ddi_soft_state_free(dcd_state, instance);
659 	devp->dcd_private = (ataopaque_t)0;
660 	/* unprobe scsi device */
661 	dcd_unprobe(devp);
662 
663 	/* Remove properties created during attach */
664 	ddi_prop_remove_all(devi);
665 }
666 
667 static int
dcddetach(dev_info_t * devi,ddi_detach_cmd_t cmd)668 dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
669 {
670 	int instance;
671 	struct dcd_disk *un;
672 	clock_t	wait_cmds_complete;
673 	instance = ddi_get_instance(devi);
674 
675 	if (!(un = ddi_get_soft_state(dcd_state, instance)))
676 		return (DDI_FAILURE);
677 
678 	switch (cmd) {
679 	case DDI_DETACH:
680 		return (dcd_dr_detach(devi));
681 
682 	case DDI_SUSPEND:
683 		mutex_enter(DCD_MUTEX);
684 		if (un->un_state == DCD_STATE_SUSPENDED) {
685 			mutex_exit(DCD_MUTEX);
686 			return (DDI_SUCCESS);
687 		}
688 		un->un_throttle = 0;
689 		/*
690 		 * Save the last state first
691 		 */
692 		un->un_save_state = un->un_last_state;
693 
694 		New_state(un, DCD_STATE_SUSPENDED);
695 
696 		/*
697 		 * wait till current operation completed. If we are
698 		 * in the resource wait state (with an intr outstanding)
699 		 * then we need to wait till the intr completes and
700 		 * starts the next cmd. We wait for
701 		 * DCD_WAIT_CMDS_COMPLETE seconds before failing the
702 		 * DDI_SUSPEND.
703 		 */
704 		wait_cmds_complete = ddi_get_lbolt();
705 		wait_cmds_complete +=
706 		    DCD_WAIT_CMDS_COMPLETE * drv_usectohz(1000000);
707 
708 		while (un->un_ncmds) {
709 			if (cv_timedwait(&un->un_disk_busy_cv,
710 			    DCD_MUTEX, wait_cmds_complete) == -1) {
711 				/*
712 				 * commands Didn't finish in the
713 				 * specified time, fail the DDI_SUSPEND.
714 				 */
715 				DAD_DEBUG2(DCD_DEVINFO, dcd_label,
716 				    DCD_DEBUG, "dcddetach: SUSPEND "
717 				    "failed due to outstanding cmds\n");
718 				Restore_state(un);
719 				mutex_exit(DCD_MUTEX);
720 				return (DDI_FAILURE);
721 			}
722 		}
723 		mutex_exit(DCD_MUTEX);
724 		return (DDI_SUCCESS);
725 	}
726 	return (DDI_FAILURE);
727 }
728 
729 /*
730  * The reset entry point gets invoked at the system shutdown time or through
731  * CPR code at system suspend.
732  * Will be flushing the cache and expect this to be last I/O operation to the
733  * disk before system reset/power off.
734  */
735 /*ARGSUSED*/
736 static int
dcdreset(dev_info_t * dip,ddi_reset_cmd_t cmd)737 dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd)
738 {
739 	struct dcd_disk *un;
740 	int instance;
741 
742 	instance = ddi_get_instance(dip);
743 
744 	if (!(un = ddi_get_soft_state(dcd_state, instance)))
745 		return (DDI_FAILURE);
746 
747 	dcd_flush_cache(un);
748 
749 	return (DDI_SUCCESS);
750 }
751 
752 
753 static int
dcd_dr_detach(dev_info_t * devi)754 dcd_dr_detach(dev_info_t *devi)
755 {
756 	struct dcd_device	*devp;
757 	struct dcd_disk		*un;
758 
759 	/*
760 	 * Get scsi_device structure for this instance.
761 	 */
762 	if ((devp = ddi_get_driver_private(devi)) == NULL)
763 		return (DDI_FAILURE);
764 
765 	/*
766 	 * Get dcd_disk structure containing target 'private' information
767 	 */
768 	un = (struct dcd_disk *)devp->dcd_private;
769 
770 	/*
771 	 * Verify there are NO outstanding commands issued to this device.
772 	 * ie, un_ncmds == 0.
773 	 * It's possible to have outstanding commands through the physio
774 	 * code path, even though everything's closed.
775 	 */
776 #ifndef lint
777 	_NOTE(COMPETING_THREADS_NOW);
778 #endif
779 	mutex_enter(DCD_MUTEX);
780 	if (un->un_ncmds) {
781 		mutex_exit(DCD_MUTEX);
782 		_NOTE(NO_COMPETING_THREADS_NOW);
783 		return (DDI_FAILURE);
784 	}
785 
786 	mutex_exit(DCD_MUTEX);
787 
788 	cmlb_detach(un->un_dklbhandle, 0);
789 	cmlb_free_handle(&un->un_dklbhandle);
790 
791 
792 	/*
793 	 * Lower the power state of the device
794 	 * i.e. the minimum power consumption state - sleep.
795 	 */
796 	(void) pm_lower_power(DCD_DEVINFO, 0, DCD_DEVICE_STANDBY);
797 
798 	_NOTE(NO_COMPETING_THREADS_NOW);
799 
800 	/*
801 	 * at this point there are no competing threads anymore
802 	 * release active MT locks and all device resources.
803 	 */
804 	dcd_free_softstate(un, devi);
805 
806 	return (DDI_SUCCESS);
807 }
808 
809 static int
dcdpower(dev_info_t * devi,int component,int level)810 dcdpower(dev_info_t *devi, int component, int level)
811 {
812 	struct dcd_pkt *pkt;
813 	struct dcd_disk *un;
814 	int	instance;
815 	uchar_t	cmd;
816 
817 
818 	instance = ddi_get_instance(devi);
819 
820 	if (!(un = ddi_get_soft_state(dcd_state, instance)) ||
821 	    (DCD_DEVICE_STANDBY > level) || (level > DCD_DEVICE_ACTIVE) ||
822 	    component != 0) {
823 		return (DDI_FAILURE);
824 	}
825 
826 	mutex_enter(DCD_MUTEX);
827 	/*
828 	 * if there are active commands for the device or device will be
829 	 * active soon. At the same time there is request to lower power
830 	 * return failure.
831 	 */
832 	if ((un->un_ncmds) && (level != DCD_DEVICE_ACTIVE)) {
833 		mutex_exit(DCD_MUTEX);
834 		return (DDI_FAILURE);
835 	}
836 
837 	if ((un->un_state == DCD_STATE_OFFLINE) ||
838 	    (un->un_state == DCD_STATE_FATAL)) {
839 		mutex_exit(DCD_MUTEX);
840 		return (DDI_FAILURE);
841 	}
842 
843 	if (level == DCD_DEVICE_ACTIVE) {
844 		/*
845 		 * No need to fire any command, just set the state structure
846 		 * to indicate previous state and set the level to active
847 		 */
848 		un->un_power_level = DCD_DEVICE_ACTIVE;
849 		if (un->un_state == DCD_STATE_PM_SUSPENDED)
850 			Restore_state(un);
851 		mutex_exit(DCD_MUTEX);
852 	} else {
853 		pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
854 		    NULL, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
855 		    PKT_CONSISTENT, NULL_FUNC, NULL);
856 
857 		if (pkt == (struct dcd_pkt *)NULL) {
858 			mutex_exit(DCD_MUTEX);
859 			return (DDI_FAILURE);
860 		}
861 
862 		switch (level) {
863 		case DCD_DEVICE_IDLE:
864 			cmd = ATA_IDLE_IMMEDIATE;
865 			break;
866 
867 		case DCD_DEVICE_STANDBY:
868 			cmd = ATA_STANDBY_IMMEDIATE;
869 			break;
870 		}
871 
872 		makecommand(pkt, 0, cmd, 0, 0, 0, NO_DATA_XFER, 0);
873 		mutex_exit(DCD_MUTEX);
874 		/*
875 		 * Issue the appropriate command
876 		 */
877 		if ((dcd_poll(pkt)) || (SCBP_C(pkt) != STATUS_GOOD)) {
878 			dcd_destroy_pkt(pkt);
879 			return (DDI_FAILURE);
880 		}
881 		dcd_destroy_pkt(pkt);
882 		mutex_enter(DCD_MUTEX);
883 		if (un->un_state != DCD_STATE_PM_SUSPENDED)
884 			New_state(un, DCD_STATE_PM_SUSPENDED);
885 		un->un_power_level = level;
886 		mutex_exit(DCD_MUTEX);
887 	}
888 
889 	return (DDI_SUCCESS);
890 }
891 
892 static int
dcd_doattach(dev_info_t * devi,int (* canwait)())893 dcd_doattach(dev_info_t *devi, int (*canwait)())
894 {
895 	struct dcd_device *devp;
896 	struct dcd_disk *un = (struct dcd_disk *)0;
897 	int instance;
898 	int km_flags = (canwait != NULL_FUNC)? KM_SLEEP : KM_NOSLEEP;
899 	int rval;
900 	char *prop_template = "target%x-dcd-options";
901 	int options;
902 	char    prop_str[32];
903 	int target;
904 	diskaddr_t capacity;
905 
906 	devp = ddi_get_driver_private(devi);
907 
908 	/*
909 	 * Call the routine scsi_probe to do some of the dirty work.
910 	 * If the INQUIRY command succeeds, the field dcd_inq in the
911 	 * device structure will be filled in. The dcd_sense structure
912 	 * will also be allocated.
913 	 */
914 
915 	switch (dcd_probe(devp, canwait)) {
916 	default:
917 		return (DDI_FAILURE);
918 
919 	case DCDPROBE_EXISTS:
920 		if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
921 			if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
922 				rval = DDI_SUCCESS;
923 			} else {
924 				rval = DDI_FAILURE;
925 				goto error;
926 			}
927 		} else {
928 			rval = DDI_FAILURE;
929 			goto error;
930 		}
931 	}
932 
933 
934 	instance = ddi_get_instance(devp->dcd_dev);
935 
936 	if (ddi_soft_state_zalloc(dcd_state, instance) != DDI_SUCCESS) {
937 		rval = DDI_FAILURE;
938 		goto error;
939 	}
940 
941 	un = ddi_get_soft_state(dcd_state, instance);
942 
943 	un->un_sbufp = getrbuf(km_flags);
944 	if (un->un_sbufp == (struct buf *)NULL) {
945 		rval = DDI_FAILURE;
946 		goto error;
947 	}
948 
949 
950 	un->un_dcd = devp;
951 	un->un_power_level = -1;
952 	un->un_tgattribute.media_is_writable = 1;
953 
954 	sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL);
955 	cv_init(&un->un_sbuf_cv, NULL, CV_DRIVER, NULL);
956 	cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
957 	/* Initialize power management conditional variable */
958 	cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
959 	cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
960 
961 	if (un->un_dp == 0) {
962 		/*
963 		 * Assume CCS drive, assume parity, but call
964 		 * it a CDROM if it is a RODIRECT device.
965 		 */
966 		un->un_dp = (struct dcd_drivetype *)
967 		    kmem_zalloc(sizeof (struct dcd_drivetype), km_flags);
968 		if (!un->un_dp) {
969 			rval = DDI_FAILURE;
970 			goto error;
971 		}
972 		if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
973 			if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
974 				un->un_dp->ctype = CTYPE_DISK;
975 			}
976 		} else  {
977 			rval = DDI_FAILURE;
978 			goto error;
979 		}
980 		un->un_dp->name = "CCS";
981 		un->un_dp->options = 0;
982 	}
983 
984 	/*
985 	 * Allow I/O requests at un_secsize offset in multiple of un_secsize.
986 	 */
987 	un->un_secsize = DEV_BSIZE;
988 
989 	/*
990 	 * If the device is not a removable media device, make sure that
991 	 * that the device is ready, by issuing the another identify but
992 	 * not needed. Get the capacity from identify data and store here.
993 	 */
994 	if (dcd_compute_dk_capacity(devp, &capacity) == 0) {
995 		un->un_diskcapacity = capacity;
996 		un->un_lbasize = DEV_BSIZE;
997 	}
998 
999 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Geometry Data\n");
1000 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "cyls %x, heads %x",
1001 	    devp->dcd_ident->dcd_fixcyls,
1002 	    devp->dcd_ident->dcd_heads);
1003 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "sectors %x,",
1004 	    devp->dcd_ident->dcd_sectors);
1005 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %llx\n",
1006 	    capacity);
1007 
1008 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1009 	    "dcdprobe: drive selected\n");
1010 
1011 	/*
1012 	 * Check for the property target<n>-dcd-options to find the option
1013 	 * set by the HBA driver for this target so that we can set the
1014 	 * Unit structure variable so that we can send commands accordingly.
1015 	 */
1016 	target = devp->dcd_address->da_target;
1017 	(void) sprintf(prop_str, prop_template, target);
1018 	options = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_NOTPROM,
1019 	    prop_str, -1);
1020 	if (options < 0) {
1021 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1022 		    "No per target properties");
1023 	} else {
1024 		if ((options & DCD_DMA_MODE) == DCD_DMA_MODE) {
1025 			un->un_dp->options |= DMA_SUPPORTTED;
1026 			un->un_dp->dma_mode = (options >> 3) & 0x03;
1027 			DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1028 			    "mode %x\n", un->un_dp->dma_mode);
1029 		} else {
1030 			un->un_dp->options &= ~DMA_SUPPORTTED;
1031 			un->un_dp->pio_mode = options & 0x7;
1032 			if (options & DCD_BLOCK_MODE)
1033 				un->un_dp->options |= BLOCK_MODE;
1034 			DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1035 			    "mode %x\n", un->un_dp->pio_mode);
1036 		}
1037 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1038 		    "options %x,", un->un_dp->options);
1039 	}
1040 
1041 	un->un_throttle = 2;
1042 	/*
1043 	 * set default max_xfer_size - This should depend on whether the
1044 	 * Block mode is supported by the device or not.
1045 	 */
1046 	un->un_max_xfer_size = MAX_ATA_XFER_SIZE;
1047 
1048 	/*
1049 	 * Set write cache enable softstate
1050 	 *
1051 	 * WCE is only supported in ATAPI-4 or higher; for
1052 	 * lower rev devices, must assume write cache is
1053 	 * enabled.
1054 	 */
1055 	mutex_enter(DCD_MUTEX);
1056 	un->un_write_cache_enabled = (devp->dcd_ident->dcd_majvers == 0xffff) ||
1057 	    ((devp->dcd_ident->dcd_majvers & IDENTIFY_80_ATAPI_4) == 0) ||
1058 	    (devp->dcd_ident->dcd_features85 & IDENTIFY_85_WCE) != 0;
1059 	mutex_exit(DCD_MUTEX);
1060 
1061 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1062 	    "dcd_doattach returns good\n");
1063 
1064 	return (rval);
1065 
1066 error:
1067 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcd_doattach failed\n");
1068 	dcd_free_softstate(un, devi);
1069 	return (rval);
1070 }
1071 
1072 #ifdef NOTNEEDED
1073 /*
1074  * This routine is used to set the block mode of operation by issuing the
1075  * Set Block mode ata command with the maximum block mode possible
1076  */
dcd_set_multiple(struct dcd_disk * un)1077 dcd_set_multiple(struct dcd_disk *un)
1078 {
1079 	int status;
1080 	struct udcd_cmd ucmd;
1081 	struct dcd_cmd cdb;
1082 	dev_t	dev;
1083 
1084 
1085 	/* Zero all the required structure */
1086 	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
1087 
1088 	(void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
1089 
1090 	cdb.cmd = ATA_SET_MULTIPLE;
1091 	/*
1092 	 * Here we should pass what needs to go into sector count REGISTER.
1093 	 * Eventhough this field indicates the number of bytes to read we
1094 	 * need to specify the block factor in terms of bytes so that it
1095 	 * will be programmed by the HBA driver into the sector count register.
1096 	 */
1097 	cdb.size = un->un_lbasize * un->un_dp->block_factor;
1098 
1099 	cdb.sector_num.lba_num = 0;
1100 	cdb.address_mode = ADD_LBA_MODE;
1101 	cdb.direction = NO_DATA_XFER;
1102 
1103 	ucmd.udcd_flags = 0;
1104 	ucmd.udcd_cmd = &cdb;
1105 	ucmd.udcd_bufaddr = NULL;
1106 	ucmd.udcd_buflen = 0;
1107 	ucmd.udcd_flags |= UDCD_SILENT;
1108 
1109 	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
1110 	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
1111 
1112 
1113 	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
1114 
1115 	return (status);
1116 }
1117 /*
1118  * The following routine is used only for setting the transfer mode
1119  * and it is not designed for transferring any other features subcommand.
1120  */
dcd_set_features(struct dcd_disk * un,uchar_t mode)1121 dcd_set_features(struct dcd_disk *un, uchar_t mode)
1122 {
1123 	int status;
1124 	struct udcd_cmd ucmd;
1125 	struct dcd_cmd cdb;
1126 	dev_t	dev;
1127 
1128 
1129 	/* Zero all the required structure */
1130 	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
1131 
1132 	(void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
1133 
1134 	cdb.cmd = ATA_SET_FEATURES;
1135 	/*
1136 	 * Here we need to pass what needs to go into the sector count register
1137 	 * But in the case of SET FEATURES command the value taken in the
1138 	 * sector count register depends what type of subcommand is
1139 	 * passed in the features register. Since we have defined the size to
1140 	 * be the size in bytes in this context it does not indicate bytes
1141 	 * instead it indicates the mode to be programmed.
1142 	 */
1143 	cdb.size = un->un_lbasize * mode;
1144 
1145 	cdb.sector_num.lba_num = 0;
1146 	cdb.address_mode = ADD_LBA_MODE;
1147 	cdb.direction = NO_DATA_XFER;
1148 	cdb.features = ATA_FEATURE_SET_MODE;
1149 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1150 	    "size %x, features %x, cmd %x\n",
1151 	    cdb.size, cdb.features, cdb.cmd);
1152 
1153 	ucmd.udcd_flags = 0;
1154 	ucmd.udcd_cmd = &cdb;
1155 	ucmd.udcd_bufaddr = NULL;
1156 	ucmd.udcd_buflen = 0;
1157 	ucmd.udcd_flags |= UDCD_SILENT;
1158 
1159 	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
1160 	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
1161 
1162 	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
1163 
1164 	return (status);
1165 }
1166 #endif
1167 
1168 /*
1169  * Validate the geometry for this disk, e.g.,
1170  * see whether it has a valid label.
1171  */
1172 static int
dcd_validate_geometry(struct dcd_disk * un)1173 dcd_validate_geometry(struct dcd_disk *un)
1174 {
1175 	int secsize = 0;
1176 	struct  dcd_device *devp;
1177 	int secdiv;
1178 	int rval;
1179 
1180 	ASSERT(mutex_owned(DCD_MUTEX));
1181 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1182 	    "dcd_validate_geometry: started \n");
1183 
1184 	if (un->un_lbasize < 0) {
1185 		return (DCD_BAD_LABEL);
1186 	}
1187 
1188 	if (un->un_state == DCD_STATE_PM_SUSPENDED) {
1189 		mutex_exit(DCD_MUTEX);
1190 		if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE) !=
1191 		    DDI_SUCCESS) {
1192 			mutex_enter(DCD_MUTEX);
1193 			return (DCD_BAD_LABEL);
1194 		}
1195 		mutex_enter(DCD_MUTEX);
1196 	}
1197 
1198 	secsize = un->un_secsize;
1199 
1200 	/*
1201 	 * take a log base 2 of sector size (sorry)
1202 	 */
1203 	for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1204 		;
1205 	un->un_secdiv = secdiv;
1206 
1207 	/*
1208 	 * Only DIRECT ACCESS devices will have Sun labels.
1209 	 * CD's supposedly have a Sun label, too
1210 	 */
1211 
1212 	devp = un->un_dcd;
1213 
1214 	if (((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) &&
1215 	    (devp->dcd_ident->dcd_config & ATANON_REMOVABLE)) {
1216 		mutex_exit(DCD_MUTEX);
1217 		rval = cmlb_validate(un->un_dklbhandle, 0, 0);
1218 		mutex_enter(DCD_MUTEX);
1219 		if (rval == ENOMEM)
1220 			return (DCD_NO_MEM_FOR_LABEL);
1221 		else if (rval != 0)
1222 			return (DCD_BAD_LABEL);
1223 	} else {
1224 		/* it should never get here. */
1225 		return (DCD_BAD_LABEL);
1226 	}
1227 
1228 	/*
1229 	 * take a log base 2 of logical block size
1230 	 */
1231 	secsize = un->un_lbasize;
1232 	for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1233 		;
1234 	un->un_lbadiv = secdiv;
1235 
1236 	/*
1237 	 * take a log base 2 of the multiple of DEV_BSIZE blocks that
1238 	 * make up one logical block
1239 	 */
1240 	secsize = un->un_lbasize >> DEV_BSHIFT;
1241 	for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1242 		;
1243 	un->un_blknoshift = secdiv;
1244 	return (0);
1245 }
1246 
1247 /*
1248  * Unix Entry Points
1249  */
1250 
1251 /* ARGSUSED3 */
1252 static int
dcdopen(dev_t * dev_p,int flag,int otyp,cred_t * cred_p)1253 dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
1254 {
1255 	dev_t dev = *dev_p;
1256 	int rval = EIO;
1257 	int partmask;
1258 	int nodelay = (flag & (FNDELAY | FNONBLOCK));
1259 	int i;
1260 	char kstatname[KSTAT_STRLEN];
1261 	diskaddr_t lblocks;
1262 	char *partname;
1263 
1264 	GET_SOFT_STATE(dev);
1265 
1266 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1267 	    "Inside Open flag %x, otyp %x\n", flag, otyp);
1268 
1269 	if (otyp >= OTYPCNT) {
1270 		return (EINVAL);
1271 	}
1272 
1273 	partmask = 1 << part;
1274 
1275 	/*
1276 	 * We use a semaphore here in order to serialize
1277 	 * open and close requests on the device.
1278 	 */
1279 	sema_p(&un->un_semoclose);
1280 
1281 	mutex_enter(DCD_MUTEX);
1282 
1283 	if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL) {
1284 		rval = ENXIO;
1285 		goto done;
1286 	}
1287 
1288 	while (un->un_state == DCD_STATE_SUSPENDED) {
1289 		cv_wait(&un->un_suspend_cv, DCD_MUTEX);
1290 	}
1291 
1292 	if ((un->un_state == DCD_STATE_PM_SUSPENDED) && (!nodelay)) {
1293 		mutex_exit(DCD_MUTEX);
1294 		if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE)
1295 		    != DDI_SUCCESS) {
1296 			mutex_enter(DCD_MUTEX);
1297 			rval = EIO;
1298 			goto done;
1299 		}
1300 		mutex_enter(DCD_MUTEX);
1301 	}
1302 
1303 	/*
1304 	 * set make_dcd_cmd() flags and stat_size here since these
1305 	 * are unlikely to change
1306 	 */
1307 	un->un_cmd_flags = 0;
1308 
1309 	un->un_cmd_stat_size = 2;
1310 
1311 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdopen un=0x%p\n",
1312 	    (void *)un);
1313 	/*
1314 	 * check for previous exclusive open
1315 	 */
1316 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1317 	    "exclopen=%x, flag=%x, regopen=%x\n",
1318 	    un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
1319 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1320 	    "Exclusive open flag %x, partmask %x\n",
1321 	    un->un_exclopen, partmask);
1322 
1323 	if (un->un_exclopen & (partmask)) {
1324 failed_exclusive:
1325 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1326 		    "exclusive open fails\n");
1327 		rval = EBUSY;
1328 		goto done;
1329 	}
1330 
1331 	if (flag & FEXCL) {
1332 		int i;
1333 		if (un->un_ocmap.lyropen[part]) {
1334 			goto failed_exclusive;
1335 		}
1336 		for (i = 0; i < (OTYPCNT - 1); i++) {
1337 			if (un->un_ocmap.regopen[i] & (partmask)) {
1338 				goto failed_exclusive;
1339 			}
1340 		}
1341 	}
1342 	if (flag & FWRITE) {
1343 		mutex_exit(DCD_MUTEX);
1344 		if (dcd_check_wp(dev)) {
1345 			sema_v(&un->un_semoclose);
1346 			return (EROFS);
1347 		}
1348 		mutex_enter(DCD_MUTEX);
1349 	}
1350 
1351 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1352 	    "Check Write Protect handled\n");
1353 
1354 	if (!nodelay) {
1355 		mutex_exit(DCD_MUTEX);
1356 		if ((rval = dcd_ready_and_valid(dev, un)) != 0) {
1357 			rval = EIO;
1358 		}
1359 		(void) pm_idle_component(DCD_DEVINFO, 0);
1360 		/*
1361 		 * Fail if device is not ready or if the number of disk
1362 		 * blocks is zero or negative for non CD devices.
1363 		 */
1364 		if (rval || cmlb_partinfo(un->un_dklbhandle,
1365 		    part, &lblocks, NULL, &partname, NULL, 0) ||
1366 		    lblocks <= 0) {
1367 			rval = EIO;
1368 			mutex_enter(DCD_MUTEX);
1369 			goto done;
1370 		}
1371 		mutex_enter(DCD_MUTEX);
1372 	}
1373 
1374 	if (otyp == OTYP_LYR) {
1375 		un->un_ocmap.lyropen[part]++;
1376 	} else {
1377 		un->un_ocmap.regopen[otyp] |= partmask;
1378 	}
1379 
1380 	/*
1381 	 * set up open and exclusive open flags
1382 	 */
1383 	if (flag & FEXCL) {
1384 		un->un_exclopen |= (partmask);
1385 	}
1386 
1387 
1388 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1389 	    "open of part %d type %d\n",
1390 	    part, otyp);
1391 
1392 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1393 	    "Kstats getting updated\n");
1394 	/*
1395 	 * only create kstats for disks, CD kstats created in dcdattach
1396 	 */
1397 	_NOTE(NO_COMPETING_THREADS_NOW);
1398 	mutex_exit(DCD_MUTEX);
1399 	if (un->un_stats == (kstat_t *)0) {
1400 		un->un_stats = kstat_create("dad", instance,
1401 		    NULL, "disk", KSTAT_TYPE_IO, 1,
1402 		    KSTAT_FLAG_PERSISTENT);
1403 		if (un->un_stats) {
1404 			un->un_stats->ks_lock = DCD_MUTEX;
1405 			kstat_install(un->un_stats);
1406 		}
1407 
1408 		/*
1409 		 * set up partition statistics for each partition
1410 		 * with number of blocks > 0
1411 		 */
1412 		if (!nodelay) {
1413 			for (i = 0; i < NDKMAP; i++) {
1414 				if ((un->un_pstats[i] == (kstat_t *)0) &&
1415 				    (cmlb_partinfo(un->un_dklbhandle,
1416 				    i, &lblocks, NULL, &partname,
1417 				    NULL, 0) == 0) && lblocks > 0) {
1418 					(void) sprintf(kstatname, "dad%d,%s",
1419 					    instance, partname);
1420 					un->un_pstats[i] = kstat_create("dad",
1421 					    instance,
1422 					    kstatname,
1423 					    "partition",
1424 					    KSTAT_TYPE_IO,
1425 					    1,
1426 					    KSTAT_FLAG_PERSISTENT);
1427 					if (un->un_pstats[i]) {
1428 						un->un_pstats[i]->ks_lock =
1429 						    DCD_MUTEX;
1430 						kstat_install(un->un_pstats[i]);
1431 					}
1432 				}
1433 			}
1434 		}
1435 		/*
1436 		 * set up error kstats
1437 		 */
1438 		(void) dcd_create_errstats(un, instance);
1439 	}
1440 #ifndef lint
1441 	_NOTE(COMPETING_THREADS_NOW);
1442 #endif
1443 
1444 	sema_v(&un->un_semoclose);
1445 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Open success\n");
1446 	return (0);
1447 
1448 done:
1449 	mutex_exit(DCD_MUTEX);
1450 	sema_v(&un->un_semoclose);
1451 	return (rval);
1452 
1453 }
1454 
1455 /*
1456  * Test if disk is ready and has a valid geometry.
1457  */
1458 static int
dcd_ready_and_valid(dev_t dev,struct dcd_disk * un)1459 dcd_ready_and_valid(dev_t dev, struct dcd_disk *un)
1460 {
1461 	int rval = 1;
1462 	int g_error = 0;
1463 
1464 	mutex_enter(DCD_MUTEX);
1465 	/*
1466 	 * cmds outstanding
1467 	 */
1468 	if (un->un_ncmds == 0) {
1469 		(void) dcd_unit_ready(dev);
1470 	}
1471 
1472 	/*
1473 	 * If device is not yet ready here, inform it is offline
1474 	 */
1475 	if (un->un_state == DCD_STATE_NORMAL) {
1476 		rval = dcd_unit_ready(dev);
1477 		if (rval != 0 && rval != EACCES) {
1478 			dcd_offline(un, 1);
1479 			goto done;
1480 		}
1481 	}
1482 
1483 	if (un->un_format_in_progress == 0) {
1484 		g_error = dcd_validate_geometry(un);
1485 	}
1486 
1487 	/*
1488 	 * check if geometry was valid. We don't check the validity of
1489 	 * geometry for CDROMS.
1490 	 */
1491 
1492 	if (g_error == DCD_BAD_LABEL) {
1493 		rval = 1;
1494 		goto done;
1495 	}
1496 
1497 
1498 	/*
1499 	 * the state has changed; inform the media watch routines
1500 	 */
1501 	un->un_mediastate = DKIO_INSERTED;
1502 	cv_broadcast(&un->un_state_cv);
1503 	rval = 0;
1504 
1505 done:
1506 	mutex_exit(DCD_MUTEX);
1507 	return (rval);
1508 }
1509 
1510 
1511 /*ARGSUSED*/
1512 static int
dcdclose(dev_t dev,int flag,int otyp,cred_t * cred_p)1513 dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p)
1514 {
1515 	uchar_t *cp;
1516 	int i;
1517 
1518 	GET_SOFT_STATE(dev);
1519 
1520 
1521 	if (otyp >= OTYPCNT)
1522 		return (ENXIO);
1523 
1524 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1525 	    "close of part %d type %d\n",
1526 	    part, otyp);
1527 	sema_p(&un->un_semoclose);
1528 
1529 	mutex_enter(DCD_MUTEX);
1530 
1531 	if (un->un_exclopen & (1<<part)) {
1532 		un->un_exclopen &= ~(1<<part);
1533 	}
1534 
1535 	if (otyp == OTYP_LYR) {
1536 		un->un_ocmap.lyropen[part] -= 1;
1537 	} else {
1538 		un->un_ocmap.regopen[otyp] &= ~(1<<part);
1539 	}
1540 
1541 	cp = &un->un_ocmap.chkd[0];
1542 	while (cp < &un->un_ocmap.chkd[OCSIZE]) {
1543 		if (*cp != (uchar_t)0) {
1544 			break;
1545 		}
1546 		cp++;
1547 	}
1548 
1549 	if (cp == &un->un_ocmap.chkd[OCSIZE]) {
1550 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "last close\n");
1551 		if (un->un_state == DCD_STATE_OFFLINE) {
1552 			dcd_offline(un, 1);
1553 		}
1554 
1555 		mutex_exit(DCD_MUTEX);
1556 		(void) cmlb_close(un->un_dklbhandle, 0);
1557 
1558 		_NOTE(NO_COMPETING_THREADS_NOW);
1559 		if (un->un_stats) {
1560 			kstat_delete(un->un_stats);
1561 			un->un_stats = 0;
1562 		}
1563 		for (i = 0; i < NDKMAP; i++) {
1564 			if (un->un_pstats[i]) {
1565 				kstat_delete(un->un_pstats[i]);
1566 				un->un_pstats[i] = (kstat_t *)0;
1567 			}
1568 		}
1569 
1570 		if (un->un_errstats) {
1571 			kstat_delete(un->un_errstats);
1572 			un->un_errstats = (kstat_t *)0;
1573 		}
1574 		mutex_enter(DCD_MUTEX);
1575 
1576 #ifndef lint
1577 		_NOTE(COMPETING_THREADS_NOW);
1578 #endif
1579 	}
1580 
1581 	mutex_exit(DCD_MUTEX);
1582 	sema_v(&un->un_semoclose);
1583 	return (0);
1584 }
1585 
1586 static void
dcd_offline(struct dcd_disk * un,int bechatty)1587 dcd_offline(struct dcd_disk *un, int bechatty)
1588 {
1589 	if (bechatty)
1590 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "offline\n");
1591 
1592 	mutex_exit(DCD_MUTEX);
1593 	cmlb_invalidate(un->un_dklbhandle, 0);
1594 	mutex_enter(DCD_MUTEX);
1595 }
1596 
1597 /*
1598  * Given the device number return the devinfo pointer
1599  * from the scsi_device structure.
1600  */
1601 /*ARGSUSED*/
1602 static int
dcdinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)1603 dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
1604 {
1605 	dev_t dev;
1606 	struct dcd_disk *un;
1607 	int instance, error;
1608 
1609 
1610 	switch (infocmd) {
1611 	case DDI_INFO_DEVT2DEVINFO:
1612 		dev = (dev_t)arg;
1613 		instance = DCDUNIT(dev);
1614 		if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL)
1615 			return (DDI_FAILURE);
1616 		*result = (void *) DCD_DEVINFO;
1617 		error = DDI_SUCCESS;
1618 		break;
1619 	case DDI_INFO_DEVT2INSTANCE:
1620 		dev = (dev_t)arg;
1621 		instance = DCDUNIT(dev);
1622 		*result = (void *)(uintptr_t)instance;
1623 		error = DDI_SUCCESS;
1624 		break;
1625 	default:
1626 		error = DDI_FAILURE;
1627 	}
1628 	return (error);
1629 }
1630 
1631 /*
1632  * property operation routine.	return the number of blocks for the partition
1633  * in question or forward the request to the propery facilities.
1634  */
1635 static int
dcd_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp)1636 dcd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1637     char *name, caddr_t valuep, int *lengthp)
1638 {
1639 	struct dcd_disk	*un;
1640 
1641 	if ((un = ddi_get_soft_state(dcd_state, ddi_get_instance(dip))) == NULL)
1642 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1643 		    name, valuep, lengthp));
1644 
1645 	return (cmlb_prop_op(un->un_dklbhandle,
1646 	    dev, dip, prop_op, mod_flags, name, valuep, lengthp,
1647 	    DCDPART(dev), NULL));
1648 }
1649 
1650 /*
1651  * These routines perform raw i/o operations.
1652  */
1653 /*ARGSUSED*/
1654 void
dcduscsimin(struct buf * bp)1655 dcduscsimin(struct buf *bp)
1656 {
1657 
1658 }
1659 
1660 
1661 static void
dcdmin(struct buf * bp)1662 dcdmin(struct buf *bp)
1663 {
1664 	struct dcd_disk *un;
1665 	int instance;
1666 	minor_t minor = getminor(bp->b_edev);
1667 	instance = minor >> DCDUNIT_SHIFT;
1668 	un = ddi_get_soft_state(dcd_state, instance);
1669 
1670 	if (bp->b_bcount > un->un_max_xfer_size)
1671 		bp->b_bcount = un->un_max_xfer_size;
1672 }
1673 
1674 
1675 /* ARGSUSED2 */
1676 static int
dcdread(dev_t dev,struct uio * uio,cred_t * cred_p)1677 dcdread(dev_t dev, struct uio *uio, cred_t *cred_p)
1678 {
1679 	int secmask;
1680 	GET_SOFT_STATE(dev);
1681 #ifdef lint
1682 	part = part;
1683 #endif /* lint */
1684 	secmask = un->un_secsize - 1;
1685 
1686 	if (uio->uio_loffset & ((offset_t)(secmask))) {
1687 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1688 		    "file offset not modulo %d\n",
1689 		    un->un_secsize);
1690 		return (EINVAL);
1691 	} else if (uio->uio_iov->iov_len & (secmask)) {
1692 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1693 		    "transfer length not modulo %d\n", un->un_secsize);
1694 		return (EINVAL);
1695 	}
1696 	return (physio(dcdstrategy, (struct buf *)0, dev, B_READ, dcdmin, uio));
1697 }
1698 
1699 /* ARGSUSED2 */
1700 static int
dcdaread(dev_t dev,struct aio_req * aio,cred_t * cred_p)1701 dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
1702 {
1703 	int secmask;
1704 	struct uio *uio = aio->aio_uio;
1705 	GET_SOFT_STATE(dev);
1706 #ifdef lint
1707 	part = part;
1708 #endif /* lint */
1709 	secmask = un->un_secsize - 1;
1710 
1711 	if (uio->uio_loffset & ((offset_t)(secmask))) {
1712 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1713 		    "file offset not modulo %d\n",
1714 		    un->un_secsize);
1715 		return (EINVAL);
1716 	} else if (uio->uio_iov->iov_len & (secmask)) {
1717 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1718 		    "transfer length not modulo %d\n", un->un_secsize);
1719 		return (EINVAL);
1720 	}
1721 	return (aphysio(dcdstrategy, anocancel, dev, B_READ, dcdmin, aio));
1722 }
1723 
1724 /* ARGSUSED2 */
1725 static int
dcdwrite(dev_t dev,struct uio * uio,cred_t * cred_p)1726 dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
1727 {
1728 	int secmask;
1729 	GET_SOFT_STATE(dev);
1730 #ifdef lint
1731 	part = part;
1732 #endif /* lint */
1733 	secmask = un->un_secsize - 1;
1734 
1735 	if (uio->uio_loffset & ((offset_t)(secmask))) {
1736 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1737 		    "file offset not modulo %d\n",
1738 		    un->un_secsize);
1739 		return (EINVAL);
1740 	} else if (uio->uio_iov->iov_len & (secmask)) {
1741 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1742 		    "transfer length not modulo %d\n", un->un_secsize);
1743 		return (EINVAL);
1744 	}
1745 	return (physio(dcdstrategy, (struct buf *)0, dev, B_WRITE, dcdmin,
1746 	    uio));
1747 }
1748 
1749 /* ARGSUSED2 */
1750 static int
dcdawrite(dev_t dev,struct aio_req * aio,cred_t * cred_p)1751 dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
1752 {
1753 	int secmask;
1754 	struct uio *uio = aio->aio_uio;
1755 	GET_SOFT_STATE(dev);
1756 #ifdef lint
1757 	part = part;
1758 #endif /* lint */
1759 	secmask = un->un_secsize - 1;
1760 
1761 	if (uio->uio_loffset & ((offset_t)(secmask))) {
1762 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1763 		    "file offset not modulo %d\n",
1764 		    un->un_secsize);
1765 		return (EINVAL);
1766 	} else if (uio->uio_iov->iov_len & (secmask)) {
1767 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1768 		    "transfer length not modulo %d\n", un->un_secsize);
1769 		return (EINVAL);
1770 	}
1771 	return (aphysio(dcdstrategy, anocancel, dev, B_WRITE, dcdmin, aio));
1772 }
1773 
1774 /*
1775  * strategy routine
1776  */
1777 static int
dcdstrategy(struct buf * bp)1778 dcdstrategy(struct buf *bp)
1779 {
1780 	struct dcd_disk *un;
1781 	struct diskhd *dp;
1782 	int i;
1783 	minor_t minor = getminor(bp->b_edev);
1784 	diskaddr_t p_lblksrt;
1785 	diskaddr_t lblocks;
1786 	diskaddr_t bn;
1787 
1788 	if ((un = ddi_get_soft_state(dcd_state,
1789 	    minor >> DCDUNIT_SHIFT)) == NULL ||
1790 	    un->un_state == DCD_STATE_DUMPING ||
1791 	    ((un->un_state  & DCD_STATE_FATAL) == DCD_STATE_FATAL)) {
1792 		SET_BP_ERROR(bp, ((un) ? ENXIO : EIO));
1793 error:
1794 		bp->b_resid = bp->b_bcount;
1795 		biodone(bp);
1796 		return (0);
1797 	}
1798 
1799 	/*
1800 	 * If the request size (buf->b_bcount)is greater than the size
1801 	 * (un->un_max_xfer_size) supported by the target driver fail
1802 	 * the request with EINVAL error code.
1803 	 *
1804 	 * We are not supposed to receive requests exceeding
1805 	 * un->un_max_xfer_size size because the caller is expected to
1806 	 * check what is the maximum size that is supported by this
1807 	 * driver either through ioctl or dcdmin routine(which is private
1808 	 * to this driver).
1809 	 * But we have seen cases (like meta driver(md))where dcdstrategy
1810 	 * called with more than supported size and cause data corruption.
1811 	 */
1812 
1813 	if (bp->b_bcount > un->un_max_xfer_size) {
1814 		SET_BP_ERROR(bp, EINVAL);
1815 		goto error;
1816 	}
1817 
1818 	TRACE_2(TR_FAC_DADA, TR_DCDSTRATEGY_START,
1819 	    "dcdstrategy_start: bp 0x%p un 0x%p", bp, un);
1820 
1821 	/*
1822 	 * Commands may sneak in while we released the mutex in
1823 	 * DDI_SUSPEND, we should block new commands.
1824 	 */
1825 	mutex_enter(DCD_MUTEX);
1826 	while (un->un_state == DCD_STATE_SUSPENDED) {
1827 		cv_wait(&un->un_suspend_cv, DCD_MUTEX);
1828 	}
1829 
1830 	if (un->un_state == DCD_STATE_PM_SUSPENDED) {
1831 		mutex_exit(DCD_MUTEX);
1832 		(void) pm_idle_component(DCD_DEVINFO, 0);
1833 		if (pm_raise_power(DCD_DEVINFO, 0,
1834 		    DCD_DEVICE_ACTIVE) !=  DDI_SUCCESS) {
1835 			SET_BP_ERROR(bp, EIO);
1836 			goto error;
1837 		}
1838 		mutex_enter(DCD_MUTEX);
1839 	}
1840 	mutex_exit(DCD_MUTEX);
1841 
1842 	/*
1843 	 * Map-in the buffer in case starting address is not word aligned.
1844 	 */
1845 
1846 	if (((uintptr_t)bp->b_un.b_addr) & 0x1)
1847 		bp_mapin(bp);
1848 
1849 	bp->b_flags &= ~(B_DONE|B_ERROR);
1850 	bp->b_resid = 0;
1851 	bp->av_forw = 0;
1852 
1853 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1854 	    "bp->b_bcount %lx\n", bp->b_bcount);
1855 
1856 	if (bp != un->un_sbufp) {
1857 validated:	if (cmlb_partinfo(un->un_dklbhandle,
1858 		    minor & DCDPART_MASK,
1859 		    &lblocks,
1860 		    &p_lblksrt,
1861 		    NULL,
1862 		    NULL,
1863 		    0) == 0) {
1864 
1865 			bn = dkblock(bp);
1866 
1867 			DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1868 			    "dkblock(bp) is %llu\n", bn);
1869 
1870 			i = 0;
1871 			if (bn < 0) {
1872 				i = -1;
1873 			} else if (bn >= lblocks) {
1874 				/*
1875 				 * For proper comparison, file system block
1876 				 * number has to be scaled to actual CD
1877 				 * transfer size.
1878 				 * Since all the CDROM operations
1879 				 * that have Sun Labels are in the correct
1880 				 * block size this will work for CD's.	This
1881 				 * will have to change when we have different
1882 				 * sector sizes.
1883 				 *
1884 				 * if bn == lblocks,
1885 				 * Not an error, resid == count
1886 				 */
1887 				if (bn > lblocks) {
1888 					i = -1;
1889 				} else {
1890 					i = 1;
1891 				}
1892 			} else if (bp->b_bcount & (un->un_secsize-1)) {
1893 				/*
1894 				 * This should really be:
1895 				 *
1896 				 * ... if (bp->b_bcount & (un->un_lbasize-1))
1897 				 *
1898 				 */
1899 				i = -1;
1900 			} else {
1901 				if (!bp->b_bcount) {
1902 					printf("Waring : Zero read or Write\n");
1903 					goto error;
1904 				}
1905 				/*
1906 				 * sort by absolute block number.
1907 				 */
1908 				bp->b_resid = bn;
1909 				bp->b_resid += p_lblksrt;
1910 				/*
1911 				 * zero out av_back - this will be a signal
1912 				 * to dcdstart to go and fetch the resources
1913 				 */
1914 				bp->av_back = NO_PKT_ALLOCATED;
1915 			}
1916 
1917 			/*
1918 			 * Check to see whether or not we are done
1919 			 * (with or without errors).
1920 			 */
1921 
1922 			if (i != 0) {
1923 				if (i < 0) {
1924 					bp->b_flags |= B_ERROR;
1925 				}
1926 				goto error;
1927 			}
1928 		} else {
1929 			/*
1930 			 * opened in NDELAY/NONBLOCK mode?
1931 			 * Check if disk is ready and has a valid geometry
1932 			 */
1933 			if (dcd_ready_and_valid(bp->b_edev, un) == 0) {
1934 				goto validated;
1935 			} else {
1936 				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
1937 				    "i/o to invalid geometry\n");
1938 				SET_BP_ERROR(bp, EIO);
1939 				goto error;
1940 			}
1941 		}
1942 	} else if (BP_HAS_NO_PKT(bp)) {
1943 		struct udcd_cmd *tscmdp;
1944 		struct dcd_cmd *tcmdp;
1945 		/*
1946 		 * This indicates that it is a special buffer
1947 		 * This could be a udcd-cmd and hence call bp_mapin just
1948 		 * in case that it could be a PIO command issued.
1949 		 */
1950 		tscmdp = (struct udcd_cmd *)bp->b_forw;
1951 		tcmdp = tscmdp->udcd_cmd;
1952 		if ((tcmdp->cmd != ATA_READ_DMA) && (tcmdp->cmd != 0xc9) &&
1953 		    (tcmdp->cmd != ATA_WRITE_DMA) && (tcmdp->cmd != 0xcb) &&
1954 		    (tcmdp->cmd != IDENTIFY_DMA) &&
1955 		    (tcmdp->cmd != ATA_FLUSH_CACHE)) {
1956 			bp_mapin(bp);
1957 		}
1958 	}
1959 
1960 	/*
1961 	 * We are doing it a bit non-standard. That is, the
1962 	 * head of the b_actf chain is *not* the active command-
1963 	 * it is just the head of the wait queue. The reason
1964 	 * we do this is that the head of the b_actf chain is
1965 	 * guaranteed to not be moved by disksort(), so that
1966 	 * our restart command (pointed to by
1967 	 * b_forw) and the head of the wait queue (b_actf) can
1968 	 * have resources granted without it getting lost in
1969 	 * the queue at some later point (where we would have
1970 	 * to go and look for it).
1971 	 */
1972 	mutex_enter(DCD_MUTEX);
1973 
1974 	DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
1975 
1976 	dp = &un->un_utab;
1977 
1978 	if (dp->b_actf == NULL) {
1979 		dp->b_actf = bp;
1980 		dp->b_actl = bp;
1981 	} else if ((un->un_state == DCD_STATE_SUSPENDED) &&
1982 	    bp == un->un_sbufp) {
1983 		bp->b_actf = dp->b_actf;
1984 		dp->b_actf = bp;
1985 	} else {
1986 		TRACE_3(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_START,
1987 		    "dcdstrategy_disksort_start: dp 0x%p bp 0x%p un 0x%p",
1988 		    dp, bp, un);
1989 		disksort(dp, bp);
1990 		TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_END,
1991 		    "dcdstrategy_disksort_end");
1992 	}
1993 
1994 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1995 	    "ncmd %x , throttle %x, forw 0x%p\n",
1996 	    un->un_ncmds, un->un_throttle, (void *)dp->b_forw);
1997 	ASSERT(un->un_ncmds >= 0);
1998 	ASSERT(un->un_throttle >= 0);
1999 	if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) {
2000 		dcdstart(un);
2001 	} else if (BP_HAS_NO_PKT(dp->b_actf)) {
2002 		struct buf *cmd_bp;
2003 
2004 		cmd_bp = dp->b_actf;
2005 		cmd_bp->av_back = ALLOCATING_PKT;
2006 		mutex_exit(DCD_MUTEX);
2007 		/*
2008 		 * try and map this one
2009 		 */
2010 		TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_START,
2011 		    "dcdstrategy_small_window_call (begin)");
2012 
2013 		make_dcd_cmd(un, cmd_bp, NULL_FUNC);
2014 
2015 		TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_END,
2016 		    "dcdstrategy_small_window_call (end)");
2017 
2018 		/*
2019 		 * there is a small window where the active cmd
2020 		 * completes before make_dcd_cmd returns.
2021 		 * consequently, this cmd never gets started so
2022 		 * we start it from here
2023 		 */
2024 		mutex_enter(DCD_MUTEX);
2025 		if ((un->un_ncmds < un->un_throttle) &&
2026 		    (dp->b_forw == NULL)) {
2027 			dcdstart(un);
2028 		}
2029 	}
2030 	mutex_exit(DCD_MUTEX);
2031 
2032 done:
2033 	TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_END, "dcdstrategy_end");
2034 	return (0);
2035 }
2036 
2037 
2038 /*
2039  * Unit start and Completion
2040  * NOTE: we assume that the caller has at least checked for:
2041  *		(un->un_ncmds < un->un_throttle)
2042  *	if not, there is no real harm done, dcd_transport() will
2043  *	return BUSY
2044  */
2045 static void
dcdstart(struct dcd_disk * un)2046 dcdstart(struct dcd_disk *un)
2047 {
2048 	int status, sort_key;
2049 	struct buf *bp;
2050 	struct diskhd *dp;
2051 	uchar_t state = un->un_last_state;
2052 
2053 	TRACE_1(TR_FAC_DADA, TR_DCDSTART_START, "dcdstart_start: un 0x%p", un);
2054 
2055 retry:
2056 	ASSERT(mutex_owned(DCD_MUTEX));
2057 
2058 	dp = &un->un_utab;
2059 	if (((bp = dp->b_actf) == NULL) || (bp->av_back == ALLOCATING_PKT) ||
2060 	    (dp->b_forw != NULL)) {
2061 		TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_WORK_END,
2062 		    "dcdstart_end (no work)");
2063 		return;
2064 	}
2065 
2066 	/*
2067 	 * remove from active queue
2068 	 */
2069 	dp->b_actf = bp->b_actf;
2070 	bp->b_actf = 0;
2071 
2072 	/*
2073 	 * increment ncmds before calling dcd_transport because dcdintr
2074 	 * may be called before we return from dcd_transport!
2075 	 */
2076 	un->un_ncmds++;
2077 
2078 	/*
2079 	 * If measuring stats, mark exit from wait queue and
2080 	 * entrance into run 'queue' if and only if we are
2081 	 * going to actually start a command.
2082 	 * Normally the bp already has a packet at this point
2083 	 */
2084 	DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp);
2085 
2086 	mutex_exit(DCD_MUTEX);
2087 
2088 	if (BP_HAS_NO_PKT(bp)) {
2089 		make_dcd_cmd(un, bp, dcdrunout);
2090 		if (BP_HAS_NO_PKT(bp) && !(bp->b_flags & B_ERROR)) {
2091 			mutex_enter(DCD_MUTEX);
2092 			DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2093 
2094 			bp->b_actf = dp->b_actf;
2095 			dp->b_actf = bp;
2096 			New_state(un, DCD_STATE_RWAIT);
2097 			un->un_ncmds--;
2098 			TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_RESOURCES_END,
2099 			    "dcdstart_end (No Resources)");
2100 			goto done;
2101 
2102 		} else if (bp->b_flags & B_ERROR) {
2103 			mutex_enter(DCD_MUTEX);
2104 			DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2105 
2106 			un->un_ncmds--;
2107 			bp->b_resid = bp->b_bcount;
2108 			if (bp->b_error == 0) {
2109 				SET_BP_ERROR(bp, EIO);
2110 			}
2111 
2112 			/*
2113 			 * restore old state
2114 			 */
2115 			un->un_state = un->un_last_state;
2116 			un->un_last_state = state;
2117 
2118 			mutex_exit(DCD_MUTEX);
2119 
2120 			biodone(bp);
2121 			mutex_enter(DCD_MUTEX);
2122 			if (un->un_state == DCD_STATE_SUSPENDED) {
2123 				cv_broadcast(&un->un_disk_busy_cv);
2124 			}
2125 
2126 			if ((un->un_ncmds < un->un_throttle) &&
2127 			    (dp->b_forw == NULL)) {
2128 				goto retry;
2129 			} else {
2130 				goto done;
2131 			}
2132 		}
2133 	}
2134 
2135 	/*
2136 	 * Restore resid from the packet, b_resid had been the
2137 	 * disksort key.
2138 	 */
2139 	sort_key = bp->b_resid;
2140 	bp->b_resid = BP_PKT(bp)->pkt_resid;
2141 	BP_PKT(bp)->pkt_resid = 0;
2142 
2143 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2144 	    "bp->b_resid %lx, pkt_resid %lx\n",
2145 	    bp->b_resid, BP_PKT(bp)->pkt_resid);
2146 
2147 	/*
2148 	 * We used to check whether or not to try and link commands here.
2149 	 * Since we have found that there is no performance improvement
2150 	 * for linked commands, this has not made much sense.
2151 	 */
2152 	if ((status = dcd_transport((struct dcd_pkt *)BP_PKT(bp)))
2153 	    != TRAN_ACCEPT) {
2154 		mutex_enter(DCD_MUTEX);
2155 		un->un_ncmds--;
2156 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2157 		    "transport returned %x\n", status);
2158 		if (status == TRAN_BUSY) {
2159 			DCD_DO_ERRSTATS(un, dcd_transerrs);
2160 			DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2161 			dcd_handle_tran_busy(bp, dp, un);
2162 			if (un->un_ncmds > 0) {
2163 				bp->b_resid = sort_key;
2164 			}
2165 		} else {
2166 			DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2167 			mutex_exit(DCD_MUTEX);
2168 
2169 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2170 			    "transport rejected (%d)\n",
2171 			    status);
2172 			SET_BP_ERROR(bp, EIO);
2173 			bp->b_resid = bp->b_bcount;
2174 			if (bp != un->un_sbufp) {
2175 				dcd_destroy_pkt(BP_PKT(bp));
2176 			}
2177 			biodone(bp);
2178 
2179 			mutex_enter(DCD_MUTEX);
2180 			if (un->un_state == DCD_STATE_SUSPENDED) {
2181 				cv_broadcast(&un->un_disk_busy_cv);
2182 			}
2183 			if ((un->un_ncmds < un->un_throttle) &&
2184 			    (dp->b_forw == NULL)) {
2185 					goto retry;
2186 			}
2187 		}
2188 	} else {
2189 		mutex_enter(DCD_MUTEX);
2190 
2191 		if (dp->b_actf && BP_HAS_NO_PKT(dp->b_actf)) {
2192 			struct buf *cmd_bp;
2193 
2194 			cmd_bp = dp->b_actf;
2195 			cmd_bp->av_back = ALLOCATING_PKT;
2196 			mutex_exit(DCD_MUTEX);
2197 			/*
2198 			 * try and map this one
2199 			 */
2200 			TRACE_0(TR_FAC_DADA, TR_DCASTART_SMALL_WINDOW_START,
2201 			    "dcdstart_small_window_start");
2202 
2203 			make_dcd_cmd(un, cmd_bp, NULL_FUNC);
2204 
2205 			TRACE_0(TR_FAC_DADA, TR_DCDSTART_SMALL_WINDOW_END,
2206 			    "dcdstart_small_window_end");
2207 			/*
2208 			 * there is a small window where the active cmd
2209 			 * completes before make_dcd_cmd returns.
2210 			 * consequently, this cmd never gets started so
2211 			 * we start it from here
2212 			 */
2213 			mutex_enter(DCD_MUTEX);
2214 			if ((un->un_ncmds < un->un_throttle) &&
2215 			    (dp->b_forw == NULL)) {
2216 				goto retry;
2217 			}
2218 		}
2219 	}
2220 
2221 done:
2222 	ASSERT(mutex_owned(DCD_MUTEX));
2223 	TRACE_0(TR_FAC_DADA, TR_DCDSTART_END, "dcdstart_end");
2224 }
2225 
2226 /*
2227  * make_dcd_cmd: create a pkt
2228  */
2229 static void
make_dcd_cmd(struct dcd_disk * un,struct buf * bp,int (* func)())2230 make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*func)())
2231 {
2232 	auto int count, com, direction;
2233 	struct dcd_pkt *pkt;
2234 	int flags, tval;
2235 
2236 	_NOTE(DATA_READABLE_WITHOUT_LOCK(dcd_disk::un_dp))
2237 	TRACE_3(TR_FAC_DADA, TR_MAKE_DCD_CMD_START,
2238 	    "make_dcd_cmd_start: un 0x%p bp 0x%p un 0x%p", un, bp, un);
2239 
2240 
2241 	flags = un->un_cmd_flags;
2242 
2243 	if (bp != un->un_sbufp) {
2244 		int partition = DCDPART(bp->b_edev);
2245 		diskaddr_t p_lblksrt;
2246 		diskaddr_t lblocks;
2247 		long secnt;
2248 		uint32_t blkno;
2249 		int dkl_nblk, delta;
2250 		long resid;
2251 
2252 		if (cmlb_partinfo(un->un_dklbhandle,
2253 		    partition,
2254 		    &lblocks,
2255 		    &p_lblksrt,
2256 		    NULL,
2257 		    NULL,
2258 		    0) != 0) {
2259 			lblocks = 0;
2260 			p_lblksrt = 0;
2261 		}
2262 
2263 		dkl_nblk = (int)lblocks;
2264 
2265 		/*
2266 		 * Make sure we don't run off the end of a partition.
2267 		 *
2268 		 * Put this test here so that we can adjust b_count
2269 		 * to accurately reflect the actual amount we are
2270 		 * goint to transfer.
2271 		 */
2272 
2273 		/*
2274 		 * First, compute partition-relative block number
2275 		 */
2276 		blkno = dkblock(bp);
2277 		secnt = (bp->b_bcount + (un->un_secsize - 1)) >> un->un_secdiv;
2278 		count = MIN(secnt, dkl_nblk - blkno);
2279 		if (count != secnt) {
2280 			/*
2281 			 * We have an overrun
2282 			 */
2283 			resid = (secnt - count) << un->un_secdiv;
2284 			DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2285 			    "overrun by %ld sectors\n",
2286 			    secnt - count);
2287 			bp->b_bcount -= resid;
2288 		} else {
2289 			resid = 0;
2290 		}
2291 
2292 		/*
2293 		 * Adjust block number to absolute
2294 		 */
2295 		delta = (int)p_lblksrt;
2296 		blkno += delta;
2297 
2298 		mutex_enter(DCD_MUTEX);
2299 		/*
2300 		 * This is for devices having block size different from
2301 		 * from DEV_BSIZE (e.g. 2K CDROMs).
2302 		 */
2303 		if (un->un_lbasize != un->un_secsize) {
2304 			blkno >>= un->un_blknoshift;
2305 			count >>= un->un_blknoshift;
2306 		}
2307 		mutex_exit(DCD_MUTEX);
2308 
2309 		TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_START,
2310 		    "make_dcd_cmd_init_pkt_call (begin)");
2311 		pkt = dcd_init_pkt(ROUTE, NULL, bp,
2312 		    (uint32_t)sizeof (struct dcd_cmd),
2313 		    un->un_cmd_stat_size, PP_LEN, PKT_CONSISTENT,
2314 		    func, (caddr_t)un);
2315 		TRACE_1(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_END,
2316 		    "make_dcd_cmd_init_pkt_call (end): pkt 0x%p", pkt);
2317 		if (!pkt) {
2318 			bp->b_bcount += resid;
2319 			bp->av_back = NO_PKT_ALLOCATED;
2320 			TRACE_0(TR_FAC_DADA,
2321 			    TR_MAKE_DCD_CMD_NO_PKT_ALLOCATED1_END,
2322 			    "make_dcd_cmd_end (NO_PKT_ALLOCATED1)");
2323 			return;
2324 		}
2325 		if (bp->b_flags & B_READ) {
2326 			if ((un->un_dp->options & DMA_SUPPORTTED) ==
2327 			    DMA_SUPPORTTED) {
2328 				com = ATA_READ_DMA;
2329 			} else {
2330 				if (un->un_dp->options & BLOCK_MODE)
2331 					com = ATA_READ_MULTIPLE;
2332 				else
2333 					com = ATA_READ;
2334 			}
2335 			direction = DATA_READ;
2336 		} else {
2337 			if ((un->un_dp->options & DMA_SUPPORTTED) ==
2338 			    DMA_SUPPORTTED) {
2339 				com = ATA_WRITE_DMA;
2340 			} else {
2341 				if (un->un_dp->options & BLOCK_MODE)
2342 					com = ATA_WRITE_MULTIPLE;
2343 				else
2344 					com = ATA_WRITE;
2345 			}
2346 			direction = DATA_WRITE;
2347 		}
2348 
2349 		/*
2350 		 * Save the resid in the packet, temporarily until
2351 		 * we transport the command.
2352 		 */
2353 		pkt->pkt_resid = resid;
2354 
2355 		makecommand(pkt, flags, com, blkno, ADD_LBA_MODE,
2356 		    bp->b_bcount, direction, 0);
2357 		tval = dcd_io_time;
2358 	} else {
2359 
2360 		struct udcd_cmd *scmd = (struct udcd_cmd *)bp->b_forw;
2361 
2362 		/*
2363 		 * set options
2364 		 */
2365 		if ((scmd->udcd_flags & UDCD_SILENT) && !(DEBUGGING)) {
2366 			flags |= FLAG_SILENT;
2367 		}
2368 		if (scmd->udcd_flags &  UDCD_DIAGNOSE)
2369 			flags |= FLAG_DIAGNOSE;
2370 
2371 		if (scmd->udcd_flags & UDCD_NOINTR)
2372 			flags |= FLAG_NOINTR;
2373 
2374 		pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
2375 		    (bp->b_bcount)? bp: NULL,
2376 		    (uint32_t)sizeof (struct dcd_cmd),
2377 		    2, PP_LEN, PKT_CONSISTENT, func, (caddr_t)un);
2378 
2379 		if (!pkt) {
2380 			bp->av_back = NO_PKT_ALLOCATED;
2381 			return;
2382 		}
2383 
2384 		makecommand(pkt, 0, scmd->udcd_cmd->cmd,
2385 		    scmd->udcd_cmd->sector_num.lba_num,
2386 		    scmd->udcd_cmd->address_mode,
2387 		    scmd->udcd_cmd->size,
2388 		    scmd->udcd_cmd->direction, scmd->udcd_cmd->features);
2389 
2390 		pkt->pkt_flags = flags;
2391 		if (scmd->udcd_timeout == 0)
2392 			tval = dcd_io_time;
2393 		else
2394 			tval = scmd->udcd_timeout;
2395 		/* UDAD interface should be decided. */
2396 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2397 		    "udcd interface\n");
2398 	}
2399 
2400 	pkt->pkt_comp = dcdintr;
2401 	pkt->pkt_time = tval;
2402 	PKT_SET_BP(pkt, bp);
2403 	bp->av_back = (struct buf *)pkt;
2404 
2405 	TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_END, "make_dcd_cmd_end");
2406 }
2407 
2408 /*
2409  * Command completion processing
2410  */
2411 static void
dcdintr(struct dcd_pkt * pkt)2412 dcdintr(struct dcd_pkt *pkt)
2413 {
2414 	struct dcd_disk *un;
2415 	struct buf *bp;
2416 	int action;
2417 	int status;
2418 
2419 	bp = PKT_GET_BP(pkt);
2420 	un = ddi_get_soft_state(dcd_state, DCDUNIT(bp->b_edev));
2421 
2422 	TRACE_1(TR_FAC_DADA, TR_DCDINTR_START, "dcdintr_start: un 0x%p", un);
2423 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdintr\n");
2424 
2425 	mutex_enter(DCD_MUTEX);
2426 	un->un_ncmds--;
2427 	DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2428 	ASSERT(un->un_ncmds >= 0);
2429 
2430 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2431 	    "reason %x and Status %x\n", pkt->pkt_reason, SCBP_C(pkt));
2432 
2433 	/*
2434 	 * do most common case first
2435 	 */
2436 	if ((pkt->pkt_reason == CMD_CMPLT) && (SCBP_C(pkt) == 0)) {
2437 		int com = GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp);
2438 
2439 		if (un->un_state == DCD_STATE_OFFLINE) {
2440 			un->un_state = un->un_last_state;
2441 			dcd_log(DCD_DEVINFO, dcd_label, CE_NOTE,
2442 			    (const char *) diskokay);
2443 		}
2444 		/*
2445 		 * If the command is a read or a write, and we have
2446 		 * a non-zero pkt_resid, that is an error. We should
2447 		 * attempt to retry the operation if possible.
2448 		 */
2449 		action = COMMAND_DONE;
2450 		if (pkt->pkt_resid && (com == ATA_READ || com == ATA_WRITE)) {
2451 			DCD_DO_ERRSTATS(un, dcd_harderrs);
2452 			if ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count) {
2453 				PKT_INCR_RETRY_CNT(pkt, 1);
2454 				action = QUE_COMMAND;
2455 			} else {
2456 				/*
2457 				 * if we have exhausted retries
2458 				 * a command with a residual is in error in
2459 				 * this case.
2460 				 */
2461 				action = COMMAND_DONE_ERROR;
2462 			}
2463 			dcd_log(DCD_DEVINFO, dcd_label,
2464 			    CE_WARN, "incomplete %s- %s\n",
2465 			    (bp->b_flags & B_READ)? "read" : "write",
2466 			    (action == QUE_COMMAND)? "retrying" :
2467 			    "giving up");
2468 		}
2469 
2470 		/*
2471 		 * pkt_resid will reflect, at this point, a residual
2472 		 * of how many bytes left to be transferred there were
2473 		 * from the actual scsi command. Add this to b_resid i.e
2474 		 * the amount this driver could not see to transfer,
2475 		 * to get the total number of bytes not transfered.
2476 		 */
2477 		if (action != QUE_COMMAND) {
2478 			bp->b_resid += pkt->pkt_resid;
2479 		}
2480 
2481 	} else if (pkt->pkt_reason != CMD_CMPLT) {
2482 		action = dcd_handle_incomplete(un, bp);
2483 	}
2484 
2485 	/*
2486 	 * If we are in the middle of syncing or dumping, we have got
2487 	 * here because dcd_transport has called us explictly after
2488 	 * completing the command in a polled mode. We don't want to
2489 	 * have a recursive call into dcd_transport again.
2490 	 */
2491 	if (ddi_in_panic() && (action == QUE_COMMAND)) {
2492 		action = COMMAND_DONE_ERROR;
2493 	}
2494 
2495 	/*
2496 	 * save pkt reason; consecutive failures are not reported unless
2497 	 * fatal
2498 	 * do not reset last_pkt_reason when the cmd was retried and
2499 	 * succeeded because
2500 	 * there maybe more commands comming back with last_pkt_reason
2501 	 */
2502 	if ((un->un_last_pkt_reason != pkt->pkt_reason) &&
2503 	    ((pkt->pkt_reason != CMD_CMPLT) ||
2504 	    (PKT_GET_RETRY_CNT(pkt) == 0))) {
2505 		un->un_last_pkt_reason = pkt->pkt_reason;
2506 	}
2507 
2508 	switch (action) {
2509 	case COMMAND_DONE_ERROR:
2510 error:
2511 		if (bp->b_resid == 0) {
2512 			bp->b_resid = bp->b_bcount;
2513 		}
2514 		if (bp->b_error == 0) {
2515 			struct	dcd_cmd *cdbp = (struct dcd_cmd *)pkt->pkt_cdbp;
2516 			if (cdbp->cmd == ATA_FLUSH_CACHE &&
2517 			    (pkt->pkt_scbp[0] & STATUS_ATA_ERR) &&
2518 			    (pkt->pkt_scbp[1] & ERR_ABORT)) {
2519 				SET_BP_ERROR(bp, ENOTSUP);
2520 				un->un_flush_not_supported = 1;
2521 			} else {
2522 				SET_BP_ERROR(bp, EIO);
2523 			}
2524 		}
2525 		bp->b_flags |= B_ERROR;
2526 		/*FALLTHROUGH*/
2527 	case COMMAND_DONE:
2528 		dcddone_and_mutex_exit(un, bp);
2529 
2530 		TRACE_0(TR_FAC_DADA, TR_DCDINTR_COMMAND_DONE_END,
2531 		    "dcdintr_end (COMMAND_DONE)");
2532 		return;
2533 
2534 	case QUE_COMMAND:
2535 		if (un->un_ncmds >= un->un_throttle) {
2536 			struct diskhd *dp = &un->un_utab;
2537 
2538 			bp->b_actf = dp->b_actf;
2539 			dp->b_actf = bp;
2540 
2541 			DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
2542 
2543 			mutex_exit(DCD_MUTEX);
2544 			goto exit;
2545 		}
2546 
2547 		un->un_ncmds++;
2548 		/* reset the pkt reason again */
2549 		pkt->pkt_reason = 0;
2550 		DCD_DO_KSTATS(un, kstat_runq_enter, bp);
2551 		mutex_exit(DCD_MUTEX);
2552 		if ((status = dcd_transport(BP_PKT(bp))) != TRAN_ACCEPT) {
2553 			struct diskhd *dp = &un->un_utab;
2554 
2555 			mutex_enter(DCD_MUTEX);
2556 			un->un_ncmds--;
2557 			if (status == TRAN_BUSY) {
2558 				DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2559 				dcd_handle_tran_busy(bp, dp, un);
2560 				mutex_exit(DCD_MUTEX);
2561 				goto exit;
2562 			}
2563 			DCD_DO_ERRSTATS(un, dcd_transerrs);
2564 			DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2565 
2566 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2567 			    "requeue of command fails (%x)\n", status);
2568 			SET_BP_ERROR(bp, EIO);
2569 			bp->b_resid = bp->b_bcount;
2570 
2571 			dcddone_and_mutex_exit(un, bp);
2572 			goto exit;
2573 		}
2574 		break;
2575 
2576 	case JUST_RETURN:
2577 	default:
2578 		DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
2579 		mutex_exit(DCD_MUTEX);
2580 		break;
2581 	}
2582 
2583 exit:
2584 	TRACE_0(TR_FAC_DADA, TR_DCDINTR_END, "dcdintr_end");
2585 }
2586 
2587 
2588 /*
2589  * Done with a command.
2590  */
2591 static void
dcddone_and_mutex_exit(struct dcd_disk * un,register struct buf * bp)2592 dcddone_and_mutex_exit(struct dcd_disk *un, register struct buf *bp)
2593 {
2594 	struct diskhd *dp;
2595 
2596 	TRACE_1(TR_FAC_DADA, TR_DCDONE_START, "dcddone_start: un 0x%p", un);
2597 
2598 	_NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&un->un_dcd->dcd_mutex));
2599 
2600 	dp = &un->un_utab;
2601 	if (bp == dp->b_forw) {
2602 		dp->b_forw = NULL;
2603 	}
2604 
2605 	if (un->un_stats) {
2606 		ulong_t n_done = bp->b_bcount - bp->b_resid;
2607 		if (bp->b_flags & B_READ) {
2608 			IOSP->reads++;
2609 			IOSP->nread += n_done;
2610 		} else {
2611 			IOSP->writes++;
2612 			IOSP->nwritten += n_done;
2613 		}
2614 	}
2615 	if (IO_PARTITION_STATS) {
2616 		ulong_t n_done = bp->b_bcount - bp->b_resid;
2617 		if (bp->b_flags & B_READ) {
2618 			IOSP_PARTITION->reads++;
2619 			IOSP_PARTITION->nread += n_done;
2620 		} else {
2621 			IOSP_PARTITION->writes++;
2622 			IOSP_PARTITION->nwritten += n_done;
2623 		}
2624 	}
2625 
2626 	/*
2627 	 * Start the next one before releasing resources on this one
2628 	 */
2629 	if (un->un_state == DCD_STATE_SUSPENDED) {
2630 		cv_broadcast(&un->un_disk_busy_cv);
2631 	} else if (dp->b_actf && (un->un_ncmds < un->un_throttle) &&
2632 	    (dp->b_forw == NULL && un->un_state != DCD_STATE_SUSPENDED)) {
2633 		dcdstart(un);
2634 	}
2635 
2636 	mutex_exit(DCD_MUTEX);
2637 
2638 	if (bp != un->un_sbufp) {
2639 		dcd_destroy_pkt(BP_PKT(bp));
2640 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2641 		    "regular done: resid %ld\n", bp->b_resid);
2642 	} else {
2643 		ASSERT(un->un_sbuf_busy);
2644 	}
2645 	TRACE_0(TR_FAC_DADA, TR_DCDDONE_BIODONE_CALL, "dcddone_biodone_call");
2646 
2647 	biodone(bp);
2648 
2649 	(void) pm_idle_component(DCD_DEVINFO, 0);
2650 
2651 	TRACE_0(TR_FAC_DADA, TR_DCDDONE_END, "dcddone end");
2652 }
2653 
2654 
2655 /*
2656  * reset the disk unless the transport layer has already
2657  * cleared the problem
2658  */
2659 #define	C1	(STAT_ATA_BUS_RESET|STAT_ATA_DEV_RESET|STAT_ATA_ABORTED)
2660 static void
dcd_reset_disk(struct dcd_disk * un,struct dcd_pkt * pkt)2661 dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt)
2662 {
2663 
2664 	if ((pkt->pkt_statistics & C1) == 0) {
2665 		mutex_exit(DCD_MUTEX);
2666 		if (!dcd_reset(ROUTE, RESET_ALL)) {
2667 			DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2668 			    "Reset failed");
2669 		}
2670 		mutex_enter(DCD_MUTEX);
2671 	}
2672 }
2673 
2674 static int
dcd_handle_incomplete(struct dcd_disk * un,struct buf * bp)2675 dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp)
2676 {
2677 	static char *fail = "ATA transport failed: reason '%s': %s\n";
2678 	static char *notresp = "disk not responding to selection\n";
2679 	int rval = COMMAND_DONE_ERROR;
2680 	int action = COMMAND_SOFT_ERROR;
2681 	struct dcd_pkt *pkt = BP_PKT(bp);
2682 	int be_chatty = (un->un_state != DCD_STATE_SUSPENDED) &&
2683 	    (bp != un->un_sbufp || !(pkt->pkt_flags & FLAG_SILENT));
2684 
2685 	ASSERT(mutex_owned(DCD_MUTEX));
2686 
2687 	switch (pkt->pkt_reason) {
2688 
2689 	case CMD_TIMEOUT:
2690 		/*
2691 		 * This Indicates the already the HBA would  have reset
2692 		 * so Just indicate to retry the command
2693 		 */
2694 		break;
2695 
2696 	case CMD_INCOMPLETE:
2697 		action = dcd_check_error(un, bp);
2698 		DCD_DO_ERRSTATS(un, dcd_transerrs);
2699 		if (action == COMMAND_HARD_ERROR) {
2700 			(void) dcd_reset_disk(un, pkt);
2701 		}
2702 		break;
2703 
2704 	case CMD_FATAL:
2705 		/*
2706 		 * Something drastic has gone wrong
2707 		 */
2708 		break;
2709 	case CMD_DMA_DERR:
2710 	case CMD_DATA_OVR:
2711 		/* FALLTHROUGH */
2712 
2713 	default:
2714 		/*
2715 		 * the target may still be running the	command,
2716 		 * so we should try and reset that target.
2717 		 */
2718 		DCD_DO_ERRSTATS(un, dcd_transerrs);
2719 		if ((pkt->pkt_reason != CMD_RESET) &&
2720 		    (pkt->pkt_reason != CMD_ABORTED)) {
2721 			(void) dcd_reset_disk(un, pkt);
2722 		}
2723 		break;
2724 	}
2725 
2726 	/*
2727 	 * If pkt_reason is CMD_RESET/ABORTED, chances are that this pkt got
2728 	 * reset/aborted because another disk on this bus caused it.
2729 	 * The disk that caused it, should get CMD_TIMEOUT with pkt_statistics
2730 	 * of STAT_TIMEOUT/STAT_DEV_RESET
2731 	 */
2732 	if ((pkt->pkt_reason == CMD_RESET) ||(pkt->pkt_reason == CMD_ABORTED)) {
2733 		/* To be written : XXX */
2734 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2735 		    "Command aborted\n");
2736 	}
2737 
2738 	if (bp == un->un_sbufp && (pkt->pkt_flags & FLAG_DIAGNOSE)) {
2739 		rval = COMMAND_DONE_ERROR;
2740 	} else {
2741 		if ((rval == COMMAND_DONE_ERROR) &&
2742 		    (action == COMMAND_SOFT_ERROR) &&
2743 		    ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count)) {
2744 			PKT_INCR_RETRY_CNT(pkt, 1);
2745 			rval = QUE_COMMAND;
2746 		}
2747 	}
2748 
2749 	if (pkt->pkt_reason == CMD_INCOMPLETE && rval == COMMAND_DONE_ERROR) {
2750 		/*
2751 		 * Looks like someone turned off this shoebox.
2752 		 */
2753 		if (un->un_state != DCD_STATE_OFFLINE) {
2754 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2755 			    (const char *) notresp);
2756 			New_state(un, DCD_STATE_OFFLINE);
2757 		}
2758 	} else if (pkt->pkt_reason == CMD_FATAL) {
2759 		/*
2760 		 * Suppressing the following message for the time being
2761 		 * dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2762 		 * (const char *) notresp);
2763 		 */
2764 		PKT_INCR_RETRY_CNT(pkt, 6);
2765 		rval = COMMAND_DONE_ERROR;
2766 		New_state(un, DCD_STATE_FATAL);
2767 	} else if (be_chatty) {
2768 		int in_panic = ddi_in_panic();
2769 		if (!in_panic || (rval == COMMAND_DONE_ERROR)) {
2770 			if (((pkt->pkt_reason != un->un_last_pkt_reason) &&
2771 			    (pkt->pkt_reason != CMD_RESET)) ||
2772 			    (rval == COMMAND_DONE_ERROR) ||
2773 			    (dcd_error_level == DCD_ERR_ALL)) {
2774 				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2775 				    fail, dcd_rname(pkt->pkt_reason),
2776 				    (rval == COMMAND_DONE_ERROR) ?
2777 				    "giving up": "retrying command");
2778 				DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2779 				    "retrycount=%x\n",
2780 				    PKT_GET_RETRY_CNT(pkt));
2781 			}
2782 		}
2783 	}
2784 error:
2785 	return (rval);
2786 }
2787 
2788 static int
dcd_check_error(struct dcd_disk * un,struct buf * bp)2789 dcd_check_error(struct dcd_disk *un, struct buf *bp)
2790 {
2791 	struct diskhd *dp = &un->un_utab;
2792 	struct dcd_pkt *pkt = BP_PKT(bp);
2793 	int rval = 0;
2794 	unsigned char status;
2795 	unsigned char error;
2796 
2797 	TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_START, "dcd_check_error_start");
2798 	ASSERT(mutex_owned(DCD_MUTEX));
2799 
2800 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2801 	    "Pkt: 0x%p dp: 0x%p\n", (void *)pkt, (void *)dp);
2802 
2803 	/*
2804 	 * Here we need to check status first and then if error is indicated
2805 	 * Then the error register.
2806 	 */
2807 
2808 	status = (pkt->pkt_scbp)[0];
2809 	if ((status & STATUS_ATA_DWF) == STATUS_ATA_DWF) {
2810 		/*
2811 		 * There has been a Device Fault  - reason for such error
2812 		 * is vendor specific
2813 		 * Action to be taken is - Indicate error and reset device.
2814 		 */
2815 
2816 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "Device Fault\n");
2817 		rval = COMMAND_HARD_ERROR;
2818 	} else if ((status & STATUS_ATA_CORR) == STATUS_ATA_CORR) {
2819 
2820 		/*
2821 		 * The sector read or written is marginal and hence ECC
2822 		 * Correction has been applied. Indicate to repair
2823 		 * Here we need to probably re-assign based on the badblock
2824 		 * mapping.
2825 		 */
2826 
2827 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2828 		    "Soft Error on block %x\n",
2829 		    ((struct dcd_cmd *)pkt->pkt_cdbp)->sector_num.lba_num);
2830 		rval = COMMAND_SOFT_ERROR;
2831 	} else if ((status & STATUS_ATA_ERR) == STATUS_ATA_ERR) {
2832 		error = pkt->pkt_scbp[1];
2833 
2834 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2835 		    "Command:0x%x,Error:0x%x,Status:0x%x\n",
2836 		    GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp),
2837 		    error, status);
2838 		if ((error &  ERR_AMNF) == ERR_AMNF) {
2839 			/* Address make not found */
2840 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2841 			    "Address Mark Not Found");
2842 		} else if ((error & ERR_TKONF) == ERR_TKONF) {
2843 			/* Track 0 Not found */
2844 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2845 			    "Track 0 Not found \n");
2846 		} else if ((error & ERR_IDNF) == ERR_IDNF) {
2847 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2848 			    " ID not found \n");
2849 		} else if ((error &  ERR_UNC) == ERR_UNC) {
2850 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2851 			    "Uncorrectable data Error: Block %x\n",
2852 			    ((struct dcd_cmd *)pkt->pkt_cdbp)->
2853 			    sector_num.lba_num);
2854 		} else if ((error & ERR_BBK) == ERR_BBK) {
2855 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2856 			    "Bad block detected: Block %x\n",
2857 			    ((struct dcd_cmd *)pkt->pkt_cdbp)->
2858 			    sector_num.lba_num);
2859 		} else if ((error & ERR_ABORT) == ERR_ABORT) {
2860 			/* Aborted Command */
2861 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2862 			    " Aborted Command \n");
2863 		}
2864 		/*
2865 		 * Return the soft error so that the command
2866 		 * will be retried.
2867 		 */
2868 		rval = COMMAND_SOFT_ERROR;
2869 	}
2870 
2871 	TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_END, "dcd_check_error_end");
2872 	return (rval);
2873 }
2874 
2875 
2876 /*
2877  *	System Crash Dump routine
2878  */
2879 
2880 #define	NDUMP_RETRIES	5
2881 
2882 static int
dcddump(dev_t dev,caddr_t addr,daddr_t blkno,int nblk)2883 dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
2884 {
2885 	struct dcd_pkt *pkt;
2886 	int i;
2887 	struct buf local, *bp;
2888 	int err;
2889 	unsigned char com;
2890 	diskaddr_t p_lblksrt;
2891 	diskaddr_t lblocks;
2892 
2893 	GET_SOFT_STATE(dev);
2894 #ifdef lint
2895 	part = part;
2896 #endif /* lint */
2897 
2898 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
2899 
2900 	if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL)
2901 		return (ENXIO);
2902 
2903 	if (cmlb_partinfo(un->un_dklbhandle, DCDPART(dev),
2904 	    &lblocks, &p_lblksrt, NULL, NULL, 0))
2905 		return (ENXIO);
2906 
2907 	if (blkno+nblk > lblocks) {
2908 		return (EINVAL);
2909 	}
2910 
2911 
2912 	if ((un->un_state == DCD_STATE_SUSPENDED) ||
2913 	    (un->un_state == DCD_STATE_PM_SUSPENDED)) {
2914 		if (pm_raise_power(DCD_DEVINFO, 0,
2915 		    DCD_DEVICE_ACTIVE) != DDI_SUCCESS) {
2916 			return (EIO);
2917 		}
2918 	}
2919 
2920 	/*
2921 	 * When cpr calls dcddump, we know that dad is in a
2922 	 * a good state, so no bus reset is required
2923 	 */
2924 	un->un_throttle = 0;
2925 
2926 	if ((un->un_state != DCD_STATE_SUSPENDED) &&
2927 	    (un->un_state != DCD_STATE_DUMPING)) {
2928 
2929 		New_state(un, DCD_STATE_DUMPING);
2930 
2931 		/*
2932 		 * Reset the bus. I'd like to not have to do this,
2933 		 * but this is the safest thing to do...
2934 		 */
2935 
2936 		if (dcd_reset(ROUTE, RESET_ALL) == 0) {
2937 			return (EIO);
2938 		}
2939 
2940 	}
2941 
2942 	blkno += p_lblksrt;
2943 
2944 	/*
2945 	 * It should be safe to call the allocator here without
2946 	 * worrying about being locked for DVMA mapping because
2947 	 * the address we're passed is already a DVMA mapping
2948 	 *
2949 	 * We are also not going to worry about semaphore ownership
2950 	 * in the dump buffer. Dumping is single threaded at present.
2951 	 */
2952 
2953 	bp = &local;
2954 	bzero((caddr_t)bp, sizeof (*bp));
2955 	bp->b_flags = B_BUSY;
2956 	bp->b_un.b_addr = addr;
2957 	bp->b_bcount = nblk << DEV_BSHIFT;
2958 	bp->b_resid = 0;
2959 
2960 	for (i = 0; i < NDUMP_RETRIES; i++) {
2961 		bp->b_flags &= ~B_ERROR;
2962 		if ((pkt = dcd_init_pkt(ROUTE, NULL, bp,
2963 		    (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
2964 		    PKT_CONSISTENT, NULL_FUNC, NULL)) != NULL) {
2965 			break;
2966 		}
2967 		if (i == 0) {
2968 			if (bp->b_flags & B_ERROR) {
2969 				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2970 				    "no resources for dumping; "
2971 				    "error code: 0x%x, retrying",
2972 				    geterror(bp));
2973 			} else {
2974 				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2975 				    "no resources for dumping; retrying");
2976 			}
2977 		} else if (i != (NDUMP_RETRIES - 1)) {
2978 			if (bp->b_flags & B_ERROR) {
2979 				dcd_log(DCD_DEVINFO, dcd_label, CE_CONT, "no "
2980 				    "resources for dumping; error code: 0x%x, "
2981 				    "retrying\n", geterror(bp));
2982 			}
2983 		} else {
2984 			if (bp->b_flags & B_ERROR) {
2985 				dcd_log(DCD_DEVINFO, dcd_label, CE_CONT,
2986 				    "no resources for dumping; "
2987 				    "error code: 0x%x, retries failed, "
2988 				    "giving up.\n", geterror(bp));
2989 			} else {
2990 				dcd_log(DCD_DEVINFO, dcd_label, CE_CONT,
2991 				    "no resources for dumping; "
2992 				    "retries failed, giving up.\n");
2993 			}
2994 			return (EIO);
2995 		}
2996 		delay(10);
2997 	}
2998 	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
2999 		com = ATA_WRITE_DMA;
3000 	} else {
3001 		if (un->un_dp->options & BLOCK_MODE)
3002 			com = ATA_WRITE_MULTIPLE;
3003 		else
3004 			com = ATA_WRITE;
3005 	}
3006 
3007 	makecommand(pkt, 0, com, blkno, ADD_LBA_MODE,
3008 	    (int)nblk*un->un_secsize, DATA_WRITE, 0);
3009 
3010 	for (err = EIO, i = 0; i < NDUMP_RETRIES && err == EIO; i++) {
3011 
3012 		if (dcd_poll(pkt) == 0) {
3013 			switch (SCBP_C(pkt)) {
3014 			case STATUS_GOOD:
3015 				if (pkt->pkt_resid == 0) {
3016 					err = 0;
3017 				}
3018 				break;
3019 			case STATUS_ATA_BUSY:
3020 				(void) dcd_reset(ROUTE, RESET_TARGET);
3021 				break;
3022 			default:
3023 				mutex_enter(DCD_MUTEX);
3024 				(void) dcd_reset_disk(un, pkt);
3025 				mutex_exit(DCD_MUTEX);
3026 				break;
3027 			}
3028 		} else if (i > NDUMP_RETRIES/2) {
3029 			(void) dcd_reset(ROUTE, RESET_ALL);
3030 		}
3031 
3032 	}
3033 	dcd_destroy_pkt(pkt);
3034 	return (err);
3035 }
3036 
3037 /*
3038  * This routine implements the ioctl calls.  It is called
3039  * from the device switch at normal priority.
3040  */
3041 /* ARGSUSED3 */
3042 static int
dcdioctl(dev_t dev,int cmd,intptr_t arg,int flag,cred_t * cred_p,int * rval_p)3043 dcdioctl(dev_t dev, int cmd, intptr_t arg, int flag,
3044     cred_t *cred_p, int *rval_p)
3045 {
3046 	auto int32_t data[512 / (sizeof (int32_t))];
3047 	struct dk_cinfo *info;
3048 	struct dk_minfo media_info;
3049 	struct udcd_cmd *scmd;
3050 	int i, err;
3051 	enum uio_seg uioseg = 0;
3052 	enum dkio_state state = 0;
3053 #ifdef _MULTI_DATAMODEL
3054 	struct dadkio_rwcmd rwcmd;
3055 #endif
3056 	struct dadkio_rwcmd32 rwcmd32;
3057 	struct dcd_cmd dcdcmd;
3058 
3059 	GET_SOFT_STATE(dev);
3060 #ifdef lint
3061 	part = part;
3062 	state = state;
3063 	uioseg = uioseg;
3064 #endif  /* lint */
3065 
3066 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3067 	    "dcd_ioctl : cmd %x, arg %lx\n", cmd, arg);
3068 
3069 	bzero((caddr_t)data, sizeof (data));
3070 
3071 	switch (cmd) {
3072 
3073 #ifdef DCDDEBUG
3074 /*
3075  * Following ioctl are for testing RESET/ABORTS
3076  */
3077 #define	DKIOCRESET	(DKIOC|14)
3078 #define	DKIOCABORT	(DKIOC|15)
3079 
3080 	case DKIOCRESET:
3081 		if (ddi_copyin((caddr_t)arg, (caddr_t)data, 4, flag))
3082 			return (EFAULT);
3083 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3084 		    "DKIOCRESET: data = 0x%x\n", data[0]);
3085 		if (dcd_reset(ROUTE, data[0])) {
3086 			return (0);
3087 		} else {
3088 			return (EIO);
3089 		}
3090 	case DKIOCABORT:
3091 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3092 		    "DKIOCABORT:\n");
3093 		if (dcd_abort(ROUTE, (struct dcd_pkt *)0)) {
3094 			return (0);
3095 		} else {
3096 			return (EIO);
3097 		}
3098 #endif
3099 
3100 	case DKIOCINFO:
3101 		/*
3102 		 * Controller Information
3103 		 */
3104 		info = (struct dk_cinfo *)data;
3105 
3106 		mutex_enter(DCD_MUTEX);
3107 		switch (un->un_dp->ctype) {
3108 		default:
3109 			info->dki_ctype = DKC_DIRECT;
3110 			break;
3111 		}
3112 		mutex_exit(DCD_MUTEX);
3113 		info->dki_cnum = ddi_get_instance(ddi_get_parent(DCD_DEVINFO));
3114 		(void) strcpy(info->dki_cname,
3115 		    ddi_get_name(ddi_get_parent(DCD_DEVINFO)));
3116 		/*
3117 		 * Unit Information
3118 		 */
3119 		info->dki_unit = ddi_get_instance(DCD_DEVINFO);
3120 		info->dki_slave = (Tgt(DCD_DCD_DEVP)<<3);
3121 		(void) strcpy(info->dki_dname, ddi_driver_name(DCD_DEVINFO));
3122 		info->dki_flags = DKI_FMTVOL;
3123 		info->dki_partition = DCDPART(dev);
3124 
3125 		/*
3126 		 * Max Transfer size of this device in blocks
3127 		 */
3128 		info->dki_maxtransfer = un->un_max_xfer_size / DEV_BSIZE;
3129 
3130 		/*
3131 		 * We can't get from here to there yet
3132 		 */
3133 		info->dki_addr = 0;
3134 		info->dki_space = 0;
3135 		info->dki_prio = 0;
3136 		info->dki_vec = 0;
3137 
3138 		i = sizeof (struct dk_cinfo);
3139 		if (ddi_copyout((caddr_t)data, (caddr_t)arg, i, flag))
3140 			return (EFAULT);
3141 		else
3142 			return (0);
3143 
3144 	case DKIOCGMEDIAINFO:
3145 		/*
3146 		 * As dad target driver is used for IDE disks only
3147 		 * Can keep the return value hardcoded to FIXED_DISK
3148 		 */
3149 		media_info.dki_media_type = DK_FIXED_DISK;
3150 
3151 		mutex_enter(DCD_MUTEX);
3152 		media_info.dki_lbsize = un->un_lbasize;
3153 		media_info.dki_capacity = un->un_diskcapacity;
3154 		mutex_exit(DCD_MUTEX);
3155 
3156 		if (ddi_copyout(&media_info, (caddr_t)arg,
3157 		    sizeof (struct dk_minfo), flag))
3158 			return (EFAULT);
3159 		else
3160 			return (0);
3161 
3162 	case DKIOCGGEOM:
3163 	case DKIOCGVTOC:
3164 	case DKIOCGETEFI:
3165 
3166 		mutex_enter(DCD_MUTEX);
3167 		if (un->un_ncmds == 0) {
3168 			if ((err = dcd_unit_ready(dev)) != 0) {
3169 				mutex_exit(DCD_MUTEX);
3170 				return (err);
3171 			}
3172 		}
3173 
3174 		mutex_exit(DCD_MUTEX);
3175 		err = cmlb_ioctl(un->un_dklbhandle, dev, cmd,
3176 		    arg, flag, cred_p, rval_p, 0);
3177 		return (err);
3178 
3179 	case DKIOCGAPART:
3180 	case DKIOCSAPART:
3181 	case DKIOCSGEOM:
3182 	case DKIOCSVTOC:
3183 	case DKIOCSETEFI:
3184 	case DKIOCPARTITION:
3185 	case DKIOCPARTINFO:
3186 	case DKIOCGMBOOT:
3187 	case DKIOCSMBOOT:
3188 
3189 		err = cmlb_ioctl(un->un_dklbhandle, dev, cmd,
3190 		    arg, flag, cred_p, rval_p, 0);
3191 		return (err);
3192 
3193 	case DIOCTL_RWCMD:
3194 		if (drv_priv(cred_p) != 0) {
3195 			return (EPERM);
3196 		}
3197 
3198 #ifdef _MULTI_DATAMODEL
3199 		switch (ddi_model_convert_from(flag & FMODELS)) {
3200 		case DDI_MODEL_NONE:
3201 			if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd,
3202 			    sizeof (struct dadkio_rwcmd), flag)) {
3203 				return (EFAULT);
3204 			}
3205 			rwcmd32.cmd = rwcmd.cmd;
3206 			rwcmd32.flags = rwcmd.flags;
3207 			rwcmd32.blkaddr = rwcmd.blkaddr;
3208 			rwcmd32.buflen = rwcmd.buflen;
3209 			rwcmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmd.bufaddr;
3210 			break;
3211 		case DDI_MODEL_ILP32:
3212 			if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32,
3213 			    sizeof (struct dadkio_rwcmd32), flag)) {
3214 				return (EFAULT);
3215 			}
3216 			break;
3217 		}
3218 #else
3219 		if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32,
3220 		    sizeof (struct dadkio_rwcmd32), flag)) {
3221 			return (EFAULT);
3222 		}
3223 #endif
3224 		mutex_enter(DCD_MUTEX);
3225 
3226 		uioseg  = UIO_SYSSPACE;
3227 		scmd = (struct udcd_cmd *)data;
3228 		scmd->udcd_cmd = &dcdcmd;
3229 		/*
3230 		 * Convert the dadkio_rwcmd structure to udcd_cmd so that
3231 		 * it can take the normal path to get the io done
3232 		 */
3233 		if (rwcmd32.cmd == DADKIO_RWCMD_READ) {
3234 			if ((un->un_dp->options & DMA_SUPPORTTED) ==
3235 			    DMA_SUPPORTTED)
3236 				scmd->udcd_cmd->cmd = ATA_READ_DMA;
3237 			else
3238 				scmd->udcd_cmd->cmd = ATA_READ;
3239 			scmd->udcd_cmd->address_mode = ADD_LBA_MODE;
3240 			scmd->udcd_cmd->direction = DATA_READ;
3241 			scmd->udcd_flags |= UDCD_READ|UDCD_SILENT;
3242 		} else if (rwcmd32.cmd == DADKIO_RWCMD_WRITE) {
3243 			if ((un->un_dp->options & DMA_SUPPORTTED) ==
3244 			    DMA_SUPPORTTED)
3245 				scmd->udcd_cmd->cmd = ATA_WRITE_DMA;
3246 			else
3247 				scmd->udcd_cmd->cmd = ATA_WRITE;
3248 			scmd->udcd_cmd->direction = DATA_WRITE;
3249 			scmd->udcd_flags |= UDCD_WRITE|UDCD_SILENT;
3250 		} else {
3251 			mutex_exit(DCD_MUTEX);
3252 			return (EINVAL);
3253 		}
3254 
3255 		scmd->udcd_cmd->address_mode = ADD_LBA_MODE;
3256 		scmd->udcd_cmd->features = 0;
3257 		scmd->udcd_cmd->size = rwcmd32.buflen;
3258 		scmd->udcd_cmd->sector_num.lba_num = rwcmd32.blkaddr;
3259 		scmd->udcd_bufaddr = (caddr_t)(uintptr_t)rwcmd32.bufaddr;
3260 		scmd->udcd_buflen = rwcmd32.buflen;
3261 		scmd->udcd_timeout = (ushort_t)dcd_io_time;
3262 		scmd->udcd_resid = 0ULL;
3263 		scmd->udcd_status = 0;
3264 		scmd->udcd_error_reg = 0;
3265 		scmd->udcd_status_reg = 0;
3266 
3267 		mutex_exit(DCD_MUTEX);
3268 
3269 		i = dcdioctl_cmd(dev, scmd, UIO_SYSSPACE, UIO_USERSPACE);
3270 		mutex_enter(DCD_MUTEX);
3271 		/*
3272 		 * After return convert the status from scmd to
3273 		 * dadkio_status
3274 		 */
3275 		(void) dcd_translate(&(rwcmd32.status), scmd);
3276 		rwcmd32.status.resid = scmd->udcd_resid;
3277 		mutex_exit(DCD_MUTEX);
3278 
3279 #ifdef _MULTI_DATAMODEL
3280 		switch (ddi_model_convert_from(flag & FMODELS)) {
3281 		case DDI_MODEL_NONE: {
3282 			int counter;
3283 			rwcmd.status.status = rwcmd32.status.status;
3284 			rwcmd.status.resid  = rwcmd32.status.resid;
3285 			rwcmd.status.failed_blk_is_valid =
3286 			    rwcmd32.status.failed_blk_is_valid;
3287 			rwcmd.status.failed_blk = rwcmd32.status.failed_blk;
3288 			rwcmd.status.fru_code_is_valid =
3289 			    rwcmd32.status.fru_code_is_valid;
3290 			rwcmd.status.fru_code = rwcmd32.status.fru_code;
3291 			for (counter = 0;
3292 			    counter < DADKIO_ERROR_INFO_LEN; counter++)
3293 				rwcmd.status.add_error_info[counter] =
3294 				    rwcmd32.status.add_error_info[counter];
3295 			}
3296 			/* Copy out the result back to the user program */
3297 			if (ddi_copyout((caddr_t)&rwcmd, (caddr_t)arg,
3298 			    sizeof (struct dadkio_rwcmd), flag)) {
3299 				if (i != 0) {
3300 					i = EFAULT;
3301 				}
3302 			}
3303 			break;
3304 		case DDI_MODEL_ILP32:
3305 			/* Copy out the result back to the user program */
3306 			if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg,
3307 			    sizeof (struct dadkio_rwcmd32), flag)) {
3308 				if (i != 0) {
3309 					i = EFAULT;
3310 				}
3311 			}
3312 			break;
3313 		}
3314 #else
3315 		/* Copy out the result back to the user program  */
3316 		if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg,
3317 		    sizeof (struct dadkio_rwcmd32), flag)) {
3318 			if (i != 0)
3319 				i = EFAULT;
3320 		}
3321 #endif
3322 		return (i);
3323 
3324 	case UDCDCMD:	{
3325 #ifdef	_MULTI_DATAMODEL
3326 		/*
3327 		 * For use when a 32 bit app makes a call into a
3328 		 * 64 bit ioctl
3329 		 */
3330 		struct udcd_cmd32	udcd_cmd_32_for_64;
3331 		struct udcd_cmd32	*ucmd32 = &udcd_cmd_32_for_64;
3332 		model_t			model;
3333 #endif /* _MULTI_DATAMODEL */
3334 
3335 		if (drv_priv(cred_p) != 0) {
3336 			return (EPERM);
3337 		}
3338 
3339 		scmd = (struct udcd_cmd *)data;
3340 
3341 #ifdef _MULTI_DATAMODEL
3342 		switch (model = ddi_model_convert_from(flag & FMODELS)) {
3343 		case DDI_MODEL_ILP32:
3344 			if (ddi_copyin((caddr_t)arg, ucmd32,
3345 			    sizeof (struct udcd_cmd32), flag)) {
3346 				return (EFAULT);
3347 			}
3348 			/*
3349 			 * Convert the ILP32 uscsi data from the
3350 			 * application to LP64 for internal use.
3351 			 */
3352 			udcd_cmd32toudcd_cmd(ucmd32, scmd);
3353 			break;
3354 		case DDI_MODEL_NONE:
3355 			if (ddi_copyin((caddr_t)arg, scmd, sizeof (*scmd),
3356 			    flag)) {
3357 				return (EFAULT);
3358 			}
3359 			break;
3360 		}
3361 #else /* ! _MULTI_DATAMODEL */
3362 		if (ddi_copyin((caddr_t)arg, (caddr_t)scmd,
3363 		    sizeof (*scmd), flag)) {
3364 			return (EFAULT);
3365 		}
3366 #endif /* ! _MULTI_DATAMODEL */
3367 
3368 		scmd->udcd_flags &= ~UDCD_NOINTR;
3369 		uioseg = (flag & FKIOCTL)? UIO_SYSSPACE: UIO_USERSPACE;
3370 
3371 		i = dcdioctl_cmd(dev, scmd, uioseg, uioseg);
3372 #ifdef _MULTI_DATAMODEL
3373 		switch (model) {
3374 		case DDI_MODEL_ILP32:
3375 			/*
3376 			 * Convert back to ILP32 before copyout to the
3377 			 * application
3378 			 */
3379 			udcd_cmdtoudcd_cmd32(scmd, ucmd32);
3380 			if (ddi_copyout(ucmd32, (caddr_t)arg,
3381 			    sizeof (*ucmd32), flag)) {
3382 				if (i != 0)
3383 					i = EFAULT;
3384 			}
3385 			break;
3386 		case DDI_MODEL_NONE:
3387 			if (ddi_copyout(scmd, (caddr_t)arg, sizeof (*scmd),
3388 			    flag)) {
3389 				if (i != 0)
3390 					i = EFAULT;
3391 			}
3392 			break;
3393 		}
3394 #else /* ! _MULTI_DATAMODE */
3395 		if (ddi_copyout((caddr_t)scmd, (caddr_t)arg,
3396 		    sizeof (*scmd), flag)) {
3397 			if (i != 0)
3398 				i = EFAULT;
3399 		}
3400 #endif
3401 		return (i);
3402 	}
3403 	case DKIOCFLUSHWRITECACHE:	{
3404 		struct dk_callback *dkc = (struct dk_callback *)arg;
3405 		struct dcd_pkt *pkt;
3406 		struct buf *bp;
3407 		int is_sync = 1;
3408 
3409 		mutex_enter(DCD_MUTEX);
3410 		if (un->un_flush_not_supported ||
3411 		    ! un->un_write_cache_enabled) {
3412 			i = un->un_flush_not_supported ? ENOTSUP : 0;
3413 			mutex_exit(DCD_MUTEX);
3414 			/*
3415 			 * If a callback was requested: a callback will
3416 			 * always be done if the caller saw the
3417 			 * DKIOCFLUSHWRITECACHE ioctl return 0, and
3418 			 * never done if the caller saw the ioctl return
3419 			 * an error.
3420 			 */
3421 			if ((flag & FKIOCTL) && dkc != NULL &&
3422 			    dkc->dkc_callback != NULL) {
3423 				(*dkc->dkc_callback)(dkc->dkc_cookie, i);
3424 				/*
3425 				 * Did callback and reported error.
3426 				 * Since we did a callback, ioctl
3427 				 * should return 0.
3428 				 */
3429 				i = 0;
3430 			}
3431 			return (i);
3432 		}
3433 
3434 		/*
3435 		 * Get the special buffer
3436 		 */
3437 		while (un->un_sbuf_busy) {
3438 			cv_wait(&un->un_sbuf_cv, DCD_MUTEX);
3439 		}
3440 		un->un_sbuf_busy = 1;
3441 		bp  = un->un_sbufp;
3442 		mutex_exit(DCD_MUTEX);
3443 
3444 		pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
3445 		    NULL, (uint32_t)sizeof (struct dcd_cmd),
3446 		    2, PP_LEN, PKT_CONSISTENT, SLEEP_FUNC, (caddr_t)un);
3447 		ASSERT(pkt != NULL);
3448 
3449 		makecommand(pkt, un->un_cmd_flags | FLAG_SILENT,
3450 		    ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0, NO_DATA_XFER, 0);
3451 
3452 		pkt->pkt_comp = dcdintr;
3453 		pkt->pkt_time = DCD_FLUSH_TIME;
3454 		PKT_SET_BP(pkt, bp);
3455 
3456 		bp->av_back = (struct buf *)pkt;
3457 		bp->b_forw = NULL;
3458 		bp->b_flags = B_BUSY;
3459 		bp->b_error = 0;
3460 		bp->b_edev = dev;
3461 		bp->b_dev = cmpdev(dev);
3462 		bp->b_bcount = 0;
3463 		bp->b_blkno = 0;
3464 		bp->b_un.b_addr = 0;
3465 		bp->b_iodone = NULL;
3466 		bp->b_list = NULL;
3467 		bp->b_private = NULL;
3468 
3469 		if ((flag & FKIOCTL) && dkc != NULL &&
3470 		    dkc->dkc_callback != NULL) {
3471 			struct dk_callback *dkc2 = (struct dk_callback *)
3472 			    kmem_zalloc(sizeof (*dkc2), KM_SLEEP);
3473 			bcopy(dkc, dkc2, sizeof (*dkc2));
3474 
3475 			bp->b_private = dkc2;
3476 			bp->b_iodone = dcdflushdone;
3477 			is_sync = 0;
3478 		}
3479 
3480 		(void) dcdstrategy(bp);
3481 
3482 		i = 0;
3483 		if (is_sync) {
3484 			i = biowait(bp);
3485 			(void) dcdflushdone(bp);
3486 		}
3487 
3488 		return (i);
3489 	}
3490 	default:
3491 		break;
3492 	}
3493 	return (ENOTTY);
3494 }
3495 
3496 
3497 static int
dcdflushdone(struct buf * bp)3498 dcdflushdone(struct buf *bp)
3499 {
3500 	struct dcd_disk *un = ddi_get_soft_state(dcd_state,
3501 	    DCDUNIT(bp->b_edev));
3502 	struct dcd_pkt *pkt = BP_PKT(bp);
3503 	struct dk_callback *dkc = bp->b_private;
3504 
3505 	ASSERT(un != NULL);
3506 	ASSERT(bp == un->un_sbufp);
3507 	ASSERT(pkt != NULL);
3508 
3509 	dcd_destroy_pkt(pkt);
3510 	bp->av_back = NO_PKT_ALLOCATED;
3511 
3512 	if (dkc != NULL) {
3513 		ASSERT(bp->b_iodone != NULL);
3514 		(*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
3515 		kmem_free(dkc, sizeof (*dkc));
3516 		bp->b_iodone = NULL;
3517 		bp->b_private = NULL;
3518 	}
3519 
3520 	/*
3521 	 * Tell anybody who cares that the buffer is now free
3522 	 */
3523 	mutex_enter(DCD_MUTEX);
3524 	un->un_sbuf_busy = 0;
3525 	cv_signal(&un->un_sbuf_cv);
3526 	mutex_exit(DCD_MUTEX);
3527 	return (0);
3528 }
3529 
3530 /*
3531  * dcdrunout:
3532  *	the callback function for resource allocation
3533  *
3534  * XXX it would be preferable that dcdrunout() scans the whole
3535  *	list for possible candidates for dcdstart(); this avoids
3536  *	that a bp at the head of the list whose request cannot be
3537  *	satisfied is retried again and again
3538  */
3539 /*ARGSUSED*/
3540 static int
dcdrunout(caddr_t arg)3541 dcdrunout(caddr_t arg)
3542 {
3543 	int serviced;
3544 	struct dcd_disk *un;
3545 	struct diskhd *dp;
3546 
3547 	TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_START, "dcdrunout_start: arg 0x%p",
3548 	    arg);
3549 	serviced = 1;
3550 
3551 	un = (struct dcd_disk *)arg;
3552 	dp = &un->un_utab;
3553 
3554 	/*
3555 	 * We now support passing a structure to the callback
3556 	 * routine.
3557 	 */
3558 	ASSERT(un != NULL);
3559 	mutex_enter(DCD_MUTEX);
3560 	if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) {
3561 		dcdstart(un);
3562 	}
3563 	if (un->un_state == DCD_STATE_RWAIT) {
3564 		serviced = 0;
3565 	}
3566 	mutex_exit(DCD_MUTEX);
3567 	TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_END,
3568 	    "dcdrunout_end: serviced %d", serviced);
3569 	return (serviced);
3570 }
3571 
3572 
3573 /*
3574  * This routine called to see whether unit is (still) there. Must not
3575  * be called when un->un_sbufp is in use, and must not be called with
3576  * an unattached disk. Soft state of disk is restored to what it was
3577  * upon entry- up to caller to set the correct state.
3578  *
3579  * We enter with the disk mutex held.
3580  */
3581 
3582 /* ARGSUSED0 */
3583 static int
dcd_unit_ready(dev_t dev)3584 dcd_unit_ready(dev_t dev)
3585 {
3586 #ifndef lint
3587 	auto struct udcd_cmd dcmd, *com = &dcmd;
3588 	auto struct dcd_cmd cmdblk;
3589 #endif
3590 	int error;
3591 #ifndef lint
3592 	GET_SOFT_STATE(dev);
3593 #endif
3594 
3595 	/*
3596 	 * Now that we protect the special buffer with
3597 	 * a mutex, we could probably do a mutex_tryenter
3598 	 * on it here and return failure if it were held...
3599 	 */
3600 
3601 	error = 0;
3602 	return (error);
3603 }
3604 
3605 /* ARGSUSED0 */
3606 int
dcdioctl_cmd(dev_t devp,struct udcd_cmd * in,enum uio_seg cdbspace,enum uio_seg dataspace)3607 dcdioctl_cmd(dev_t devp, struct udcd_cmd *in, enum uio_seg cdbspace,
3608     enum uio_seg dataspace)
3609 {
3610 
3611 	struct buf *bp;
3612 	struct	udcd_cmd *scmd;
3613 	struct dcd_pkt *pkt;
3614 	int	err, rw;
3615 	caddr_t	cdb;
3616 	int	flags = 0;
3617 
3618 	GET_SOFT_STATE(devp);
3619 
3620 #ifdef lint
3621 	part = part;
3622 #endif
3623 
3624 	/*
3625 	 * Is this a request to reset the bus?
3626 	 * if so, we need to do reseting.
3627 	 */
3628 
3629 	if (in->udcd_flags & UDCD_RESET) {
3630 		int flag = RESET_TARGET;
3631 		err = dcd_reset(ROUTE, flag) ? 0: EIO;
3632 		return (err);
3633 	}
3634 
3635 	scmd = in;
3636 
3637 
3638 	/* Do some sanity checks */
3639 	if (scmd->udcd_buflen <= 0) {
3640 		if (scmd->udcd_flags & (UDCD_READ | UDCD_WRITE)) {
3641 			return (EINVAL);
3642 		} else {
3643 			scmd->udcd_buflen = 0;
3644 		}
3645 	}
3646 
3647 	/* Make a copy of the dcd_cmd passed  */
3648 	cdb = kmem_zalloc(sizeof (struct dcd_cmd), KM_SLEEP);
3649 	if (cdbspace == UIO_SYSSPACE) {
3650 		flags |= FKIOCTL;
3651 	}
3652 
3653 	if (ddi_copyin((void *)scmd->udcd_cmd, cdb, sizeof (struct dcd_cmd),
3654 	    flags)) {
3655 		kmem_free(cdb, sizeof (struct dcd_cmd));
3656 		return (EFAULT);
3657 	}
3658 	scmd = (struct udcd_cmd *)kmem_alloc(sizeof (*scmd), KM_SLEEP);
3659 	bcopy((caddr_t)in, (caddr_t)scmd, sizeof (*scmd));
3660 	scmd->udcd_cmd = (struct dcd_cmd *)cdb;
3661 	rw = (scmd->udcd_flags & UDCD_READ) ? B_READ: B_WRITE;
3662 
3663 
3664 	/*
3665 	 * Get the special buffer
3666 	 */
3667 
3668 	mutex_enter(DCD_MUTEX);
3669 	while (un->un_sbuf_busy) {
3670 		if (cv_wait_sig(&un->un_sbuf_cv, DCD_MUTEX) == 0) {
3671 			kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd));
3672 			kmem_free((caddr_t)scmd, sizeof (*scmd));
3673 			mutex_exit(DCD_MUTEX);
3674 			return (EINTR);
3675 		}
3676 	}
3677 
3678 	un->un_sbuf_busy = 1;
3679 	bp  = un->un_sbufp;
3680 	mutex_exit(DCD_MUTEX);
3681 
3682 
3683 	/*
3684 	 * If we are going to do actual I/O, let physio do all the
3685 	 * things
3686 	 */
3687 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3688 	    "dcdioctl_cmd : buflen %x\n", scmd->udcd_buflen);
3689 
3690 	if (scmd->udcd_buflen) {
3691 		auto struct iovec aiov;
3692 		auto struct uio auio;
3693 		struct uio *uio = &auio;
3694 
3695 		bzero((caddr_t)&auio, sizeof (struct uio));
3696 		bzero((caddr_t)&aiov, sizeof (struct iovec));
3697 
3698 		aiov.iov_base = scmd->udcd_bufaddr;
3699 		aiov.iov_len = scmd->udcd_buflen;
3700 
3701 		uio->uio_iov = &aiov;
3702 		uio->uio_iovcnt = 1;
3703 		uio->uio_resid = scmd->udcd_buflen;
3704 		uio->uio_segflg = dataspace;
3705 
3706 		/*
3707 		 * Let physio do the rest...
3708 		 */
3709 		bp->av_back = NO_PKT_ALLOCATED;
3710 		bp->b_forw = (struct buf *)scmd;
3711 		err = physio(dcdstrategy, bp, devp, rw, dcdudcdmin, uio);
3712 	} else {
3713 		/*
3714 		 * We have to mimic what physio would do here.
3715 		 */
3716 		bp->av_back = NO_PKT_ALLOCATED;
3717 		bp->b_forw = (struct buf *)scmd;
3718 		bp->b_flags = B_BUSY | rw;
3719 		bp->b_edev = devp;
3720 		bp->b_dev = cmpdev(devp);
3721 		bp->b_bcount = bp->b_blkno = 0;
3722 		(void) dcdstrategy(bp);
3723 		err = biowait(bp);
3724 	}
3725 
3726 done:
3727 	if ((pkt = BP_PKT(bp)) != NULL) {
3728 		bp->av_back = NO_PKT_ALLOCATED;
3729 		/* we need to update the completion status of udcd command */
3730 		in->udcd_resid = bp->b_resid;
3731 		in->udcd_status_reg = SCBP_C(pkt);
3732 		/* XXX: we need to give error_reg also */
3733 		dcd_destroy_pkt(pkt);
3734 	}
3735 	/*
3736 	 * Tell anybody who cares that the buffer is now free
3737 	 */
3738 	mutex_enter(DCD_MUTEX);
3739 	un->un_sbuf_busy = 0;
3740 	cv_signal(&un->un_sbuf_cv);
3741 	mutex_exit(DCD_MUTEX);
3742 
3743 	kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd));
3744 	kmem_free((caddr_t)scmd, sizeof (*scmd));
3745 	return (err);
3746 }
3747 
3748 static void
dcdudcdmin(struct buf * bp)3749 dcdudcdmin(struct buf *bp)
3750 {
3751 
3752 #ifdef lint
3753 	bp = bp;
3754 #endif
3755 
3756 }
3757 
3758 /*
3759  * restart a cmd from timeout() context
3760  *
3761  * the cmd is expected to be in un_utab.b_forw. If this pointer is non-zero
3762  * a restart timeout request has been issued and no new timeouts should
3763  * be requested. b_forw is reset when the cmd eventually completes in
3764  * dcddone_and_mutex_exit()
3765  */
3766 void
dcdrestart(void * arg)3767 dcdrestart(void *arg)
3768 {
3769 	struct dcd_disk *un = (struct dcd_disk *)arg;
3770 	struct buf *bp;
3771 	int status;
3772 
3773 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart\n");
3774 
3775 	mutex_enter(DCD_MUTEX);
3776 	bp = un->un_utab.b_forw;
3777 	if (bp) {
3778 		un->un_ncmds++;
3779 		DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp);
3780 	}
3781 
3782 
3783 	if (bp) {
3784 		struct dcd_pkt *pkt = BP_PKT(bp);
3785 
3786 		mutex_exit(DCD_MUTEX);
3787 
3788 		pkt->pkt_flags = 0;
3789 
3790 		if ((status = dcd_transport(pkt)) != TRAN_ACCEPT) {
3791 			mutex_enter(DCD_MUTEX);
3792 			DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
3793 			un->un_ncmds--;
3794 			if (status == TRAN_BUSY) {
3795 				/* XXX : To be checked */
3796 				/*
3797 				 * if (un->un_throttle > 1) {
3798 				 *	ASSERT(un->un_ncmds >= 0);
3799 				 *	un->un_throttle = un->un_ncmds;
3800 				 * }
3801 				 */
3802 				un->un_reissued_timeid =
3803 				    timeout(dcdrestart, (caddr_t)un,
3804 				    DCD_BSY_TIMEOUT/500);
3805 				mutex_exit(DCD_MUTEX);
3806 				return;
3807 			}
3808 			DCD_DO_ERRSTATS(un, dcd_transerrs);
3809 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
3810 			    "dcdrestart transport failed (%x)\n", status);
3811 			bp->b_resid = bp->b_bcount;
3812 			SET_BP_ERROR(bp, EIO);
3813 
3814 			DCD_DO_KSTATS(un, kstat_waitq_exit, bp);
3815 			un->un_reissued_timeid = 0L;
3816 			dcddone_and_mutex_exit(un, bp);
3817 			return;
3818 		}
3819 		mutex_enter(DCD_MUTEX);
3820 	}
3821 	un->un_reissued_timeid = 0L;
3822 	mutex_exit(DCD_MUTEX);
3823 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart done\n");
3824 }
3825 
3826 /*
3827  * This routine gets called to reset the throttle to its saved
3828  * value wheneven we lower the throttle.
3829  */
3830 void
dcd_reset_throttle(caddr_t arg)3831 dcd_reset_throttle(caddr_t arg)
3832 {
3833 	struct dcd_disk *un = (struct dcd_disk *)arg;
3834 	struct diskhd *dp;
3835 
3836 	mutex_enter(DCD_MUTEX);
3837 	dp = &un->un_utab;
3838 
3839 	/*
3840 	 * start any commands that didn't start while throttling.
3841 	 */
3842 	if (dp->b_actf && (un->un_ncmds < un->un_throttle) &&
3843 	    (dp->b_forw == NULL)) {
3844 		dcdstart(un);
3845 	}
3846 	mutex_exit(DCD_MUTEX);
3847 }
3848 
3849 
3850 /*
3851  * This routine handles the case when a TRAN_BUSY is
3852  * returned by HBA.
3853  *
3854  * If there are some commands already in the transport, the
3855  * bp can be put back on queue and it will
3856  * be retried when the queue is emptied after command
3857  * completes. But if there is no command in the tranport
3858  * and it still return busy, we have to retry the command
3859  * after some time like 10ms.
3860  */
3861 /* ARGSUSED0 */
3862 static void
dcd_handle_tran_busy(struct buf * bp,struct diskhd * dp,struct dcd_disk * un)3863 dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp, struct dcd_disk *un)
3864 {
3865 	ASSERT(mutex_owned(DCD_MUTEX));
3866 
3867 
3868 	if (dp->b_forw == NULL || dp->b_forw == bp) {
3869 		dp->b_forw = bp;
3870 	} else if (dp->b_forw != bp) {
3871 		bp->b_actf = dp->b_actf;
3872 		dp->b_actf = bp;
3873 
3874 	}
3875 	if (!un->un_reissued_timeid) {
3876 		un->un_reissued_timeid =
3877 		    timeout(dcdrestart, (caddr_t)un, DCD_BSY_TIMEOUT/500);
3878 	}
3879 }
3880 
3881 static int
dcd_write_deviceid(struct dcd_disk * un)3882 dcd_write_deviceid(struct dcd_disk *un)
3883 {
3884 
3885 	int	status;
3886 	diskaddr_t blk;
3887 	struct udcd_cmd ucmd;
3888 	struct dcd_cmd cdb;
3889 	struct dk_devid	*dkdevid;
3890 	uint_t *ip, chksum;
3891 	int	i;
3892 	dev_t	dev;
3893 
3894 	mutex_exit(DCD_MUTEX);
3895 	if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) {
3896 		mutex_enter(DCD_MUTEX);
3897 		return (EINVAL);
3898 	}
3899 	mutex_enter(DCD_MUTEX);
3900 
3901 	/* Allocate the buffer */
3902 	dkdevid = kmem_zalloc(un->un_secsize, KM_SLEEP);
3903 
3904 	/* Fill in the revision */
3905 	dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB;
3906 	dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB;
3907 
3908 	/* Copy in the device id */
3909 	bcopy(un->un_devid, &dkdevid->dkd_devid,
3910 	    ddi_devid_sizeof(un->un_devid));
3911 
3912 	/* Calculate the chksum */
3913 	chksum = 0;
3914 	ip = (uint_t *)dkdevid;
3915 	for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++)
3916 		chksum ^= ip[i];
3917 
3918 	/* Fill in the checksum */
3919 	DKD_FORMCHKSUM(chksum, dkdevid);
3920 
3921 	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
3922 	(void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
3923 
3924 	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
3925 		cdb.cmd = ATA_WRITE_DMA;
3926 	} else {
3927 		if (un->un_dp->options & BLOCK_MODE)
3928 			cdb.cmd = ATA_WRITE_MULTIPLE;
3929 		else
3930 			cdb.cmd = ATA_WRITE;
3931 	}
3932 	cdb.size = un->un_secsize;
3933 	cdb.sector_num.lba_num = blk;
3934 	cdb.address_mode = ADD_LBA_MODE;
3935 	cdb.direction = DATA_WRITE;
3936 
3937 	ucmd.udcd_flags = UDCD_WRITE;
3938 	ucmd.udcd_cmd =  &cdb;
3939 	ucmd.udcd_bufaddr = (caddr_t)dkdevid;
3940 	ucmd.udcd_buflen = un->un_secsize;
3941 	ucmd.udcd_flags |= UDCD_SILENT;
3942 	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
3943 	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
3944 	mutex_exit(DCD_MUTEX);
3945 	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
3946 	mutex_enter(DCD_MUTEX);
3947 
3948 	kmem_free(dkdevid, un->un_secsize);
3949 	return (status);
3950 }
3951 
3952 static int
dcd_read_deviceid(struct dcd_disk * un)3953 dcd_read_deviceid(struct dcd_disk *un)
3954 {
3955 	int status;
3956 	diskaddr_t blk;
3957 	struct udcd_cmd ucmd;
3958 	struct dcd_cmd cdb;
3959 	struct dk_devid *dkdevid;
3960 	uint_t *ip;
3961 	int chksum;
3962 	int i, sz;
3963 	dev_t dev;
3964 
3965 	mutex_exit(DCD_MUTEX);
3966 	if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) {
3967 		mutex_enter(DCD_MUTEX);
3968 		return (EINVAL);
3969 	}
3970 	mutex_enter(DCD_MUTEX);
3971 
3972 	dkdevid = kmem_alloc(un->un_secsize, KM_SLEEP);
3973 
3974 	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
3975 	(void) bzero((caddr_t)&cdb, sizeof (cdb));
3976 
3977 	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
3978 		cdb.cmd = ATA_READ_DMA;
3979 	} else {
3980 		if (un->un_dp->options & BLOCK_MODE)
3981 			cdb.cmd = ATA_READ_MULTIPLE;
3982 		else
3983 			cdb.cmd = ATA_READ;
3984 	}
3985 	cdb.size = un->un_secsize;
3986 	cdb.sector_num.lba_num = blk;
3987 	cdb.address_mode = ADD_LBA_MODE;
3988 	cdb.direction = DATA_READ;
3989 
3990 	ucmd.udcd_flags = UDCD_READ;
3991 	ucmd.udcd_cmd =  &cdb;
3992 	ucmd.udcd_bufaddr = (caddr_t)dkdevid;
3993 	ucmd.udcd_buflen = un->un_secsize;
3994 	ucmd.udcd_flags |= UDCD_SILENT;
3995 	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
3996 	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
3997 	mutex_exit(DCD_MUTEX);
3998 	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
3999 	mutex_enter(DCD_MUTEX);
4000 
4001 	if (status != 0) {
4002 		kmem_free((caddr_t)dkdevid, un->un_secsize);
4003 		return (status);
4004 	}
4005 
4006 	/* Validate the revision */
4007 
4008 	if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) ||
4009 	    (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) {
4010 		kmem_free((caddr_t)dkdevid, un->un_secsize);
4011 		return (EINVAL);
4012 	}
4013 
4014 	/* Calculate the checksum */
4015 	chksum = 0;
4016 	ip = (uint_t *)dkdevid;
4017 	for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++)
4018 		chksum ^= ip[i];
4019 
4020 	/* Compare the checksums */
4021 
4022 	if (DKD_GETCHKSUM(dkdevid) != chksum) {
4023 		kmem_free((caddr_t)dkdevid, un->un_secsize);
4024 		return (EINVAL);
4025 	}
4026 
4027 	/* VAlidate the device id */
4028 	if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) {
4029 		kmem_free((caddr_t)dkdevid, un->un_secsize);
4030 		return (EINVAL);
4031 	}
4032 
4033 	/* return a copy of the device id */
4034 	sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid);
4035 	un->un_devid = (ddi_devid_t)kmem_alloc(sz, KM_SLEEP);
4036 	bcopy(&dkdevid->dkd_devid, un->un_devid, sz);
4037 	kmem_free((caddr_t)dkdevid, un->un_secsize);
4038 
4039 	return (0);
4040 }
4041 
4042 /*
4043  * Return the device id for the device.
4044  * 1. If the device ID exists then just return it - nothing to do in that case.
4045  * 2. Build one from the drives model number and serial number.
4046  * 3. If there is a problem in building it from serial/model #, then try
4047  * to read it from the acyl region of the disk.
4048  * Note: If this function is unable to return a valid ID then the calling
4049  * point will invoke the routine to create a fabricated ID ans stor it on the
4050  * acyl region of the disk.
4051  */
4052 static ddi_devid_t
dcd_get_devid(struct dcd_disk * un)4053 dcd_get_devid(struct dcd_disk *un)
4054 {
4055 	int		rc;
4056 
4057 	/* If already registered, return that value */
4058 	if (un->un_devid != NULL)
4059 		return (un->un_devid);
4060 
4061 	/* Build a devid from model and serial number, if present */
4062 	rc = dcd_make_devid_from_serial(un);
4063 
4064 	if (rc != DDI_SUCCESS) {
4065 		/* Read the devid from the disk. */
4066 		if (dcd_read_deviceid(un))
4067 			return (NULL);
4068 	}
4069 
4070 	(void) ddi_devid_register(DCD_DEVINFO, un->un_devid);
4071 	return (un->un_devid);
4072 }
4073 
4074 
4075 static ddi_devid_t
dcd_create_devid(struct dcd_disk * un)4076 dcd_create_devid(struct dcd_disk *un)
4077 {
4078 	if (ddi_devid_init(DCD_DEVINFO, DEVID_FAB, 0, NULL, (ddi_devid_t *)
4079 	    &un->un_devid) == DDI_FAILURE)
4080 		return (NULL);
4081 
4082 	if (dcd_write_deviceid(un)) {
4083 		ddi_devid_free(un->un_devid);
4084 		un->un_devid = NULL;
4085 		return (NULL);
4086 	}
4087 
4088 	(void) ddi_devid_register(DCD_DEVINFO, un->un_devid);
4089 	return (un->un_devid);
4090 }
4091 
4092 /*
4093  * Build a devid from the model and serial number, if present
4094  * Return DDI_SUCCESS or DDI_FAILURE.
4095  */
4096 static int
dcd_make_devid_from_serial(struct dcd_disk * un)4097 dcd_make_devid_from_serial(struct dcd_disk *un)
4098 {
4099 	int	rc = DDI_SUCCESS;
4100 	char	*hwid;
4101 	char	*model;
4102 	int	model_len;
4103 	char	*serno;
4104 	int	serno_len;
4105 	int	total_len;
4106 
4107 	/* initialize the model and serial number information */
4108 	model = un->un_dcd->dcd_ident->dcd_model;
4109 	model_len = DCD_MODEL_NUMBER_LENGTH;
4110 	serno = un->un_dcd->dcd_ident->dcd_drvser;
4111 	serno_len = DCD_SERIAL_NUMBER_LENGTH;
4112 
4113 	/* Verify the model and serial number */
4114 	dcd_validate_model_serial(model, &model_len, model_len);
4115 	if (model_len == 0) {
4116 		rc = DDI_FAILURE;
4117 		goto out;
4118 	}
4119 	dcd_validate_model_serial(serno, &serno_len, serno_len);
4120 	if (serno_len == 0) {
4121 		rc = DDI_FAILURE;
4122 		goto out;
4123 	}
4124 
4125 	/*
4126 	 * The device ID will be concatenation of the model number,
4127 	 * the '=' separator, the serial number. Allocate
4128 	 * the string and concatenate the components.
4129 	 */
4130 	total_len = model_len + 1 + serno_len;
4131 	hwid = kmem_alloc(total_len, KM_SLEEP);
4132 	bcopy((caddr_t)model, (caddr_t)hwid, model_len);
4133 	bcopy((caddr_t)"=", (caddr_t)&hwid[model_len], 1);
4134 	bcopy((caddr_t)serno, (caddr_t)&hwid[model_len + 1], serno_len);
4135 
4136 	/* Initialize the device ID, trailing NULL not included */
4137 	rc = ddi_devid_init(DCD_DEVINFO, DEVID_ATA_SERIAL, total_len,
4138 	    hwid, (ddi_devid_t *)&un->un_devid);
4139 
4140 	/* Free the allocated string */
4141 	kmem_free(hwid, total_len);
4142 
4143 out:	return (rc);
4144 }
4145 
4146 /*
4147  * Test for a valid model or serial number. Assume that a valid representation
4148  * contains at least one character that is neither a space, 0 digit, or NULL.
4149  * Trim trailing blanks and NULLS from returned length.
4150  */
4151 static void
dcd_validate_model_serial(char * str,int * retlen,int totallen)4152 dcd_validate_model_serial(char *str, int *retlen, int totallen)
4153 {
4154 	char		ch;
4155 	boolean_t	ret = B_FALSE;
4156 	int		i;
4157 	int		tb;
4158 
4159 	for (i = 0, tb = 0; i < totallen; i++) {
4160 		ch = *str++;
4161 		if ((ch != ' ') && (ch != '\0') && (ch != '0'))
4162 			ret = B_TRUE;
4163 		if ((ch == ' ') || (ch == '\0'))
4164 			tb++;
4165 		else
4166 			tb = 0;
4167 	}
4168 
4169 	if (ret == B_TRUE) {
4170 		/* Atleast one non 0 or blank character. */
4171 		*retlen = totallen - tb;
4172 	} else {
4173 		*retlen = 0;
4174 	}
4175 }
4176 
4177 #ifndef lint
4178 void
clean_print(dev_info_t * dev,char * label,uint_t level,char * title,char * data,int len)4179 clean_print(dev_info_t *dev, char *label, uint_t level,
4180     char *title, char *data, int len)
4181 {
4182 	int	i;
4183 	char	buf[256];
4184 
4185 	(void) sprintf(buf, "%s:", title);
4186 	for (i = 0; i < len; i++) {
4187 		(void) sprintf(&buf[strlen(buf)], "0x%x ", (data[i] & 0xff));
4188 	}
4189 	(void) sprintf(&buf[strlen(buf)], "\n");
4190 
4191 	dcd_log(dev, label, level, "%s", buf);
4192 }
4193 #endif /* Not lint */
4194 
4195 #ifndef lint
4196 /*
4197  * Print a piece of inquiry data- cleaned up for non-printable characters
4198  * and stopping at the first space character after the beginning of the
4199  * passed string;
4200  */
4201 
4202 void
inq_fill(char * p,int l,char * s)4203 inq_fill(char *p, int l, char *s)
4204 {
4205 	unsigned i = 0;
4206 	char c;
4207 
4208 	while (i++ < l) {
4209 		if ((c = *p++) < ' ' || c >= 0177) {
4210 			c = '*';
4211 		} else if (i != 1 && c == ' ') {
4212 			break;
4213 		}
4214 		*s++ = c;
4215 	}
4216 	*s++ = 0;
4217 }
4218 #endif /* Not lint */
4219 
4220 char *
dcd_sname(uchar_t status)4221 dcd_sname(uchar_t status)
4222 {
4223 	switch (status & STATUS_ATA_MASK) {
4224 	case STATUS_GOOD:
4225 		return ("good status");
4226 
4227 	case STATUS_ATA_BUSY:
4228 		return ("busy");
4229 
4230 	default:
4231 		return ("<unknown status>");
4232 	}
4233 }
4234 
4235 /* ARGSUSED0 */
4236 char *
dcd_rname(int reason)4237 dcd_rname(int reason)
4238 {
4239 	static char *rnames[] = {
4240 		"cmplt",
4241 		"incomplete",
4242 		"dma_derr",
4243 		"tran_err",
4244 		"reset",
4245 		"aborted",
4246 		"timeout",
4247 		"data_ovr",
4248 	};
4249 	if (reason > CMD_DATA_OVR) {
4250 		return ("<unknown reason>");
4251 	} else {
4252 		return (rnames[reason]);
4253 	}
4254 }
4255 
4256 
4257 
4258 /* ARGSUSED0 */
4259 int
dcd_check_wp(dev_t dev)4260 dcd_check_wp(dev_t dev)
4261 {
4262 
4263 	return (0);
4264 }
4265 
4266 /*
4267  * Create device error kstats
4268  */
4269 static int
dcd_create_errstats(struct dcd_disk * un,int instance)4270 dcd_create_errstats(struct dcd_disk *un, int instance)
4271 {
4272 
4273 	char kstatname[KSTAT_STRLEN];
4274 
4275 	if (un->un_errstats == (kstat_t *)0) {
4276 		(void) sprintf(kstatname, "dad%d,error", instance);
4277 		un->un_errstats = kstat_create("daderror", instance, kstatname,
4278 		    "device_error", KSTAT_TYPE_NAMED,
4279 		    sizeof (struct dcd_errstats)/ sizeof (kstat_named_t),
4280 		    KSTAT_FLAG_PERSISTENT);
4281 
4282 		if (un->un_errstats) {
4283 			struct dcd_errstats *dtp;
4284 
4285 			dtp = (struct dcd_errstats *)un->un_errstats->ks_data;
4286 			kstat_named_init(&dtp->dcd_softerrs, "Soft Errors",
4287 			    KSTAT_DATA_UINT32);
4288 			kstat_named_init(&dtp->dcd_harderrs, "Hard Errors",
4289 			    KSTAT_DATA_UINT32);
4290 			kstat_named_init(&dtp->dcd_transerrs,
4291 			    "Transport Errors", KSTAT_DATA_UINT32);
4292 			kstat_named_init(&dtp->dcd_model, "Model",
4293 			    KSTAT_DATA_CHAR);
4294 			kstat_named_init(&dtp->dcd_revision, "Revision",
4295 			    KSTAT_DATA_CHAR);
4296 			kstat_named_init(&dtp->dcd_serial, "Serial No",
4297 			    KSTAT_DATA_CHAR);
4298 			kstat_named_init(&dtp->dcd_capacity, "Size",
4299 			    KSTAT_DATA_ULONGLONG);
4300 			kstat_named_init(&dtp->dcd_rq_media_err, "Media Error",
4301 			    KSTAT_DATA_UINT32);
4302 			kstat_named_init(&dtp->dcd_rq_ntrdy_err,
4303 			    "Device Not Ready", KSTAT_DATA_UINT32);
4304 			kstat_named_init(&dtp->dcd_rq_nodev_err, " No Device",
4305 			    KSTAT_DATA_UINT32);
4306 			kstat_named_init(&dtp->dcd_rq_recov_err, "Recoverable",
4307 			    KSTAT_DATA_UINT32);
4308 			kstat_named_init(&dtp->dcd_rq_illrq_err,
4309 			    "Illegal Request", KSTAT_DATA_UINT32);
4310 
4311 			un->un_errstats->ks_private = un;
4312 			un->un_errstats->ks_update = nulldev;
4313 			kstat_install(un->un_errstats);
4314 
4315 			(void) strncpy(&dtp->dcd_model.value.c[0],
4316 			    un->un_dcd->dcd_ident->dcd_model, 16);
4317 			(void) strncpy(&dtp->dcd_serial.value.c[0],
4318 			    un->un_dcd->dcd_ident->dcd_drvser, 16);
4319 			(void) strncpy(&dtp->dcd_revision.value.c[0],
4320 			    un->un_dcd->dcd_ident->dcd_fw, 8);
4321 			dtp->dcd_capacity.value.ui64 =
4322 			    (uint64_t)((uint64_t)un->un_diskcapacity *
4323 			    (uint64_t)un->un_lbasize);
4324 		}
4325 	}
4326 	return (0);
4327 }
4328 
4329 
4330 /*
4331  * This has been moved from DADA layer as this does not do anything other than
4332  * retrying the command when it is busy or it does not complete
4333  */
4334 int
dcd_poll(struct dcd_pkt * pkt)4335 dcd_poll(struct dcd_pkt *pkt)
4336 {
4337 	int	busy_count, rval = -1, savef;
4338 	clock_t	savet;
4339 	void	(*savec)();
4340 
4341 
4342 	/*
4343 	 * Save old flags
4344 	 */
4345 	savef = pkt->pkt_flags;
4346 	savec = pkt->pkt_comp;
4347 	savet = pkt->pkt_time;
4348 
4349 	pkt->pkt_flags |= FLAG_NOINTR;
4350 
4351 
4352 	/*
4353 	 * Set the Pkt_comp to NULL
4354 	 */
4355 
4356 	pkt->pkt_comp = 0;
4357 
4358 	/*
4359 	 * Set the Pkt time for the polled command
4360 	 */
4361 	if (pkt->pkt_time == 0) {
4362 		pkt->pkt_time = DCD_POLL_TIMEOUT;
4363 	}
4364 
4365 
4366 	/* Now transport the command */
4367 	for (busy_count = 0; busy_count < dcd_poll_busycnt; busy_count++) {
4368 		if ((rval = dcd_transport(pkt)) == TRAN_ACCEPT) {
4369 			if (pkt->pkt_reason == CMD_INCOMPLETE &&
4370 			    pkt->pkt_state == 0) {
4371 				delay(100);
4372 			} else if (pkt->pkt_reason  == CMD_CMPLT) {
4373 				rval = 0;
4374 				break;
4375 			}
4376 		}
4377 		if (rval == TRAN_BUSY)  {
4378 			delay(100);
4379 			continue;
4380 		}
4381 	}
4382 
4383 	pkt->pkt_flags = savef;
4384 	pkt->pkt_comp = savec;
4385 	pkt->pkt_time = savet;
4386 	return (rval);
4387 }
4388 
4389 
4390 void
dcd_translate(struct dadkio_status32 * statp,struct udcd_cmd * cmdp)4391 dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp)
4392 {
4393 	if (cmdp->udcd_status_reg & STATUS_ATA_BUSY)
4394 		statp->status = DADKIO_STAT_NOT_READY;
4395 	else if (cmdp->udcd_status_reg & STATUS_ATA_DWF)
4396 		statp->status = DADKIO_STAT_HARDWARE_ERROR;
4397 	else if (cmdp->udcd_status_reg & STATUS_ATA_CORR)
4398 		statp->status = DADKIO_STAT_SOFT_ERROR;
4399 	else if (cmdp->udcd_status_reg & STATUS_ATA_ERR) {
4400 		/*
4401 		 * The error register is valid only when BSY and DRQ not set
4402 		 * Assumed that HBA has checked this before it gives the data
4403 		 */
4404 		if (cmdp->udcd_error_reg & ERR_AMNF)
4405 			statp->status = DADKIO_STAT_NOT_FORMATTED;
4406 		else if (cmdp->udcd_error_reg & ERR_TKONF)
4407 			statp->status = DADKIO_STAT_NOT_FORMATTED;
4408 		else if (cmdp->udcd_error_reg & ERR_ABORT)
4409 			statp->status = DADKIO_STAT_ILLEGAL_REQUEST;
4410 		else if (cmdp->udcd_error_reg & ERR_IDNF)
4411 			statp->status = DADKIO_STAT_NOT_FORMATTED;
4412 		else if (cmdp->udcd_error_reg & ERR_UNC)
4413 			statp->status = DADKIO_STAT_BUS_ERROR;
4414 		else if (cmdp->udcd_error_reg & ERR_BBK)
4415 			statp->status = DADKIO_STAT_MEDIUM_ERROR;
4416 	} else
4417 		statp->status = DADKIO_STAT_NO_ERROR;
4418 }
4419 
4420 static void
dcd_flush_cache(struct dcd_disk * un)4421 dcd_flush_cache(struct dcd_disk *un)
4422 {
4423 	struct dcd_pkt *pkt;
4424 	int retry_count;
4425 
4426 
4427 	if ((pkt = dcd_init_pkt(ROUTE, NULL, NULL,
4428 	    (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
4429 	    PKT_CONSISTENT, NULL_FUNC, NULL)) == NULL) {
4430 		return;
4431 	}
4432 
4433 	makecommand(pkt, 0, ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0,
4434 	    NO_DATA_XFER, 0);
4435 
4436 	/*
4437 	 * Send the command. There are chances it might fail on some
4438 	 * disks since it is not a mandatory command as per ata-4. Try
4439 	 * 3 times if it fails. The retry count has been randomly selected.
4440 	 * There is a need for retry since as per the spec FLUSH CACHE can fail
4441 	 * as a result of unrecoverable error encountered during execution
4442 	 * of writing data and subsequent command should continue flushing
4443 	 * cache.
4444 	 */
4445 	for (retry_count = 0; retry_count < 3; retry_count++) {
4446 		/*
4447 		 * Set the packet fields.
4448 		 */
4449 		pkt->pkt_comp = 0;
4450 		pkt->pkt_time = DCD_POLL_TIMEOUT;
4451 		pkt->pkt_flags |= FLAG_FORCENOINTR;
4452 		pkt->pkt_flags |= FLAG_NOINTR;
4453 		if (dcd_transport(pkt) == TRAN_ACCEPT) {
4454 			if (pkt->pkt_reason  == CMD_CMPLT) {
4455 				break;
4456 			}
4457 		}
4458 		/*
4459 		 * Note the wait time value of 100ms is same as in the
4460 		 * dcd_poll routine.
4461 		 */
4462 		drv_usecwait(1000000);
4463 	}
4464 	(void) dcd_destroy_pkt(pkt);
4465 }
4466 
4467 static int
dcd_send_lb_rw_cmd(dev_info_t * devi,void * bufaddr,diskaddr_t start_block,size_t reqlength,uchar_t cmd)4468 dcd_send_lb_rw_cmd(dev_info_t *devi, void *bufaddr,
4469     diskaddr_t start_block, size_t reqlength, uchar_t cmd)
4470 {
4471 	struct dcd_pkt *pkt;
4472 	struct buf *bp;
4473 	diskaddr_t real_addr = start_block;
4474 	size_t buffer_size = reqlength;
4475 	uchar_t command, tmp;
4476 	int i, rval = 0;
4477 	struct dcd_disk *un;
4478 
4479 	un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi));
4480 	if (un == NULL)
4481 		return (ENXIO);
4482 
4483 	bp = dcd_alloc_consistent_buf(ROUTE, (struct buf *)NULL,
4484 	    buffer_size, B_READ, NULL_FUNC, NULL);
4485 	if (!bp) {
4486 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4487 		    "no bp for disk label\n");
4488 		return (ENOMEM);
4489 	}
4490 
4491 	pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
4492 	    bp, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
4493 	    PKT_CONSISTENT, NULL_FUNC, NULL);
4494 
4495 	if (!pkt) {
4496 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4497 		    "no memory for disk label\n");
4498 		dcd_free_consistent_buf(bp);
4499 		return (ENOMEM);
4500 	}
4501 
4502 	if (cmd == TG_READ) {
4503 		bzero(bp->b_un.b_addr, buffer_size);
4504 		tmp = DATA_READ;
4505 	} else {
4506 		bcopy((caddr_t)bufaddr, bp->b_un.b_addr, buffer_size);
4507 		tmp = DATA_WRITE;
4508 	}
4509 
4510 	mutex_enter(DCD_MUTEX);
4511 	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
4512 		if (cmd == TG_READ) {
4513 			command = ATA_READ_DMA;
4514 		} else {
4515 			command = ATA_WRITE_DMA;
4516 		}
4517 	} else {
4518 		if (cmd == TG_READ) {
4519 			if (un->un_dp->options & BLOCK_MODE)
4520 				command = ATA_READ_MULTIPLE;
4521 			else
4522 				command = ATA_READ;
4523 		} else {
4524 			if (un->un_dp->options & BLOCK_MODE)
4525 				command = ATA_READ_MULTIPLE;
4526 			else
4527 				command = ATA_WRITE;
4528 		}
4529 	}
4530 	mutex_exit(DCD_MUTEX);
4531 	(void) makecommand(pkt, 0, command, real_addr, ADD_LBA_MODE,
4532 	    buffer_size, tmp, 0);
4533 
4534 	for (i = 0; i < 3; i++) {
4535 		if (dcd_poll(pkt) || SCBP_C(pkt) != STATUS_GOOD ||
4536 		    (pkt->pkt_state & STATE_XFERRED_DATA) == 0 ||
4537 		    (pkt->pkt_resid != 0)) {
4538 			DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
4539 			    "Status %x, state %x, resid %lx\n",
4540 			    SCBP_C(pkt), pkt->pkt_state, pkt->pkt_resid);
4541 			rval = EIO;
4542 		} else {
4543 			break;
4544 		}
4545 	}
4546 
4547 	if (rval != 0) {
4548 		dcd_destroy_pkt(pkt);
4549 		dcd_free_consistent_buf(bp);
4550 		return (EIO);
4551 	}
4552 
4553 	if (cmd == TG_READ) {
4554 		bcopy(bp->b_un.b_addr, bufaddr, reqlength);
4555 		rval = 0;
4556 	}
4557 
4558 	dcd_destroy_pkt(pkt);
4559 	dcd_free_consistent_buf(bp);
4560 	return (rval);
4561 }
4562 
dcd_compute_dk_capacity(struct dcd_device * devp,diskaddr_t * capacity)4563 static int dcd_compute_dk_capacity(struct dcd_device *devp,
4564     diskaddr_t *capacity)
4565 {
4566 	diskaddr_t cap;
4567 	diskaddr_t no_of_lbasec;
4568 
4569 	cap = devp->dcd_ident->dcd_fixcyls *
4570 	    devp->dcd_ident->dcd_heads *
4571 	    devp->dcd_ident->dcd_sectors;
4572 	no_of_lbasec = devp->dcd_ident->dcd_addrsec[1];
4573 	no_of_lbasec = no_of_lbasec << 16;
4574 	no_of_lbasec = no_of_lbasec | devp->dcd_ident->dcd_addrsec[0];
4575 
4576 	if (no_of_lbasec > cap) {
4577 		cap = no_of_lbasec;
4578 	}
4579 
4580 	if (cap != ((uint32_t)-1))
4581 		*capacity = cap;
4582 	else
4583 		return (EINVAL);
4584 	return (0);
4585 }
4586 
4587 /*ARGSUSED5*/
4588 static int
dcd_lb_rdwr(dev_info_t * devi,uchar_t cmd,void * bufaddr,diskaddr_t start_block,size_t reqlength,void * tg_cookie)4589 dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
4590     diskaddr_t start_block, size_t reqlength, void *tg_cookie)
4591 {
4592 	if (cmd != TG_READ && cmd != TG_WRITE)
4593 		return (EINVAL);
4594 
4595 	return (dcd_send_lb_rw_cmd(devi, bufaddr, start_block,
4596 	    reqlength, cmd));
4597 }
4598 
4599 static int
dcd_lb_getphygeom(dev_info_t * devi,cmlb_geom_t * phygeomp)4600 dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp)
4601 {
4602 	struct dcd_device *devp;
4603 	uint32_t no_of_lbasec, capacity, calculated_cylinders;
4604 
4605 	devp = ddi_get_driver_private(devi);
4606 
4607 	if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
4608 		if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
4609 			phygeomp->g_ncyl = devp->dcd_ident->dcd_fixcyls - 2;
4610 			phygeomp->g_acyl = 2;
4611 			phygeomp->g_nhead = devp->dcd_ident->dcd_heads;
4612 			phygeomp->g_nsect = devp->dcd_ident->dcd_sectors;
4613 
4614 			no_of_lbasec = devp->dcd_ident->dcd_addrsec[1];
4615 			no_of_lbasec = no_of_lbasec << 16;
4616 			no_of_lbasec = no_of_lbasec |
4617 			    devp->dcd_ident->dcd_addrsec[0];
4618 			capacity = devp->dcd_ident->dcd_fixcyls *
4619 			    devp->dcd_ident->dcd_heads *
4620 			    devp->dcd_ident->dcd_sectors;
4621 			if (no_of_lbasec > capacity) {
4622 				capacity = no_of_lbasec;
4623 				if (capacity > NUM_SECTORS_32G) {
4624 					/*
4625 					 * if the capacity is greater than 32G,
4626 					 * then 255 is the sectors per track.
4627 					 * This should be good until 128G disk
4628 					 * capacity, which is the current ATA-4
4629 					 * limitation.
4630 					 */
4631 					phygeomp->g_nsect = 255;
4632 				}
4633 
4634 				/*
4635 				 * If the disk capacity is >= 128GB then no. of
4636 				 * addressable sectors will be set to 0xfffffff
4637 				 * in the IDENTIFY info. In that case set the
4638 				 *  no. of pcyl to the Max. 16bit value.
4639 				 */
4640 
4641 				calculated_cylinders = (capacity) /
4642 				    (phygeomp->g_nhead * phygeomp->g_nsect);
4643 				if (calculated_cylinders >= USHRT_MAX) {
4644 					phygeomp->g_ncyl = USHRT_MAX - 2;
4645 				} else {
4646 					phygeomp->g_ncyl =
4647 					    calculated_cylinders - 2;
4648 				}
4649 			}
4650 
4651 			phygeomp->g_capacity = capacity;
4652 			phygeomp->g_intrlv = 0;
4653 			phygeomp->g_rpm = 5400;
4654 			phygeomp->g_secsize = devp->dcd_ident->dcd_secsiz;
4655 
4656 			return (0);
4657 		} else
4658 			return (ENOTSUP);
4659 	} else {
4660 		return (EINVAL);
4661 	}
4662 }
4663 
4664 
4665 /*ARGSUSED3*/
4666 static int
dcd_lb_getinfo(dev_info_t * devi,int cmd,void * arg,void * tg_cookie)4667 dcd_lb_getinfo(dev_info_t *devi, int cmd,  void *arg, void *tg_cookie)
4668 {
4669 	struct dcd_disk *un;
4670 
4671 	un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi));
4672 
4673 	if (un == NULL)
4674 		return (ENXIO);
4675 
4676 	switch (cmd) {
4677 	case TG_GETPHYGEOM:
4678 		return (dcd_lb_getphygeom(devi, (cmlb_geom_t *)arg));
4679 
4680 	case TG_GETVIRTGEOM:
4681 		return (-1);
4682 
4683 	case TG_GETCAPACITY:
4684 	case TG_GETBLOCKSIZE:
4685 		mutex_enter(DCD_MUTEX);
4686 		if (un->un_diskcapacity <= 0) {
4687 			mutex_exit(DCD_MUTEX);
4688 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4689 			    "invalid disk capacity\n");
4690 			return (EIO);
4691 		}
4692 		if (cmd == TG_GETCAPACITY)
4693 			*(diskaddr_t *)arg = un->un_diskcapacity;
4694 		else
4695 			*(uint32_t *)arg = DEV_BSIZE;
4696 
4697 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %x\n",
4698 		    un->un_diskcapacity);
4699 		mutex_exit(DCD_MUTEX);
4700 		return (0);
4701 
4702 	case TG_GETATTR:
4703 		mutex_enter(DCD_MUTEX);
4704 		*(tg_attribute_t *)arg = un->un_tgattribute;
4705 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
4706 		    "media_is_writable %x\n",
4707 		    un->un_tgattribute.media_is_writable);
4708 		mutex_exit(DCD_MUTEX);
4709 		return (0);
4710 	default:
4711 		return (ENOTTY);
4712 	}
4713 }
4714