1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25 */
26
27
28/*
29 * SCSA HBA nexus driver that emulates an HBA connected to SCSI target
30 * devices (large disks).
31 */
32
33#ifdef DEBUG
34#define	EMUL64DEBUG
35#endif
36
37#include <sys/scsi/scsi.h>
38#include <sys/ddi.h>
39#include <sys/sunddi.h>
40#include <sys/taskq.h>
41#include <sys/disp.h>
42#include <sys/types.h>
43#include <sys/buf.h>
44#include <sys/cpuvar.h>
45#include <sys/dklabel.h>
46
47#include <sys/emul64.h>
48#include <sys/emul64cmd.h>
49#include <sys/emul64var.h>
50
51int emul64_usetaskq	= 1;	/* set to zero for debugging */
52int emul64debug		= 0;
53#ifdef	EMUL64DEBUG
54static int emul64_cdb_debug	= 0;
55#include <sys/debug.h>
56#endif
57
58/*
59 * cb_ops function prototypes
60 */
61static int emul64_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
62
63/*
64 * dev_ops functions prototypes
65 */
66static int emul64_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
67    void *arg, void **result);
68static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
69static int emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
70
71/*
72 * Function prototypes
73 *
74 * SCSA functions exported by means of the transport table
75 */
76static int emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
77	scsi_hba_tran_t *tran, struct scsi_device *sd);
78static int emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
79static void emul64_pkt_comp(void *);
80static int emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
81static int emul64_scsi_reset(struct scsi_address *ap, int level);
82static int emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
83static int emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value,
84    int whom);
85static struct scsi_pkt *emul64_scsi_init_pkt(struct scsi_address *ap,
86    struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
87    int tgtlen, int flags, int (*callback)(), caddr_t arg);
88static void emul64_scsi_destroy_pkt(struct scsi_address *ap,
89    struct scsi_pkt *pkt);
90static void emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
91static void emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
92static int emul64_scsi_reset_notify(struct scsi_address *ap, int flag,
93    void (*callback)(caddr_t), caddr_t arg);
94
95/*
96 * internal functions
97 */
98static void emul64_i_initcap(struct emul64 *emul64);
99
100static void emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...);
101static int emul64_get_tgtrange(struct emul64 *, intptr_t, emul64_tgt_t **,
102    emul64_tgt_range_t *);
103static int emul64_write_off(struct emul64 *, emul64_tgt_t *,
104    emul64_tgt_range_t *);
105static int emul64_write_on(struct emul64 *, emul64_tgt_t *,
106    emul64_tgt_range_t *);
107static emul64_nowrite_t *emul64_nowrite_alloc(emul64_range_t *);
108static void emul64_nowrite_free(emul64_nowrite_t *);
109static emul64_nowrite_t *emul64_find_nowrite(emul64_tgt_t *,
110    diskaddr_t start_block, size_t blkcnt, emul64_rng_overlap_t *overlapp,
111    emul64_nowrite_t ***prevp);
112
113extern emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t);
114
115#ifdef EMUL64DEBUG
116static void emul64_debug_dump_cdb(struct scsi_address *ap,
117    struct scsi_pkt *pkt);
118#endif
119
120
121#ifdef	_DDICT
122static int	ddi_in_panic(void);
123static int	ddi_in_panic() { return (0); }
124#ifndef	SCSI_CAP_RESET_NOTIFICATION
125#define	SCSI_CAP_RESET_NOTIFICATION		14
126#endif
127#ifndef	SCSI_RESET_NOTIFY
128#define	SCSI_RESET_NOTIFY			0x01
129#endif
130#ifndef	SCSI_RESET_CANCEL
131#define	SCSI_RESET_CANCEL			0x02
132#endif
133#endif
134
135/*
136 * Tunables:
137 *
138 * emul64_max_task
139 *	The taskq facility is used to queue up SCSI start requests on a per
140 *	controller basis.  If the maximum number of queued tasks is hit,
141 *	taskq_ent_alloc() delays for a second, which adversely impacts our
142 *	performance.  This value establishes the maximum number of task
143 *	queue entries when taskq_create is called.
144 *
145 * emul64_task_nthreads
146 *	Specifies the number of threads that should be used to process a
147 *	controller's task queue.  Our init function sets this to the number
148 *	of CPUs on the system, but this can be overridden in emul64.conf.
149 */
150int emul64_max_task = 16;
151int emul64_task_nthreads = 1;
152
153/*
154 * Local static data
155 */
156static void		*emul64_state = NULL;
157
158/*
159 * Character/block operations.
160 */
161static struct cb_ops emul64_cbops = {
162	scsi_hba_open,		/* cb_open */
163	scsi_hba_close,		/* cb_close */
164	nodev,			/* cb_strategy */
165	nodev,			/* cb_print */
166	nodev,			/* cb_dump */
167	nodev,			/* cb_read */
168	nodev,			/* cb_write */
169	emul64_ioctl,		/* cb_ioctl */
170	nodev,			/* cb_devmap */
171	nodev,			/* cb_mmap */
172	nodev,			/* cb_segmap */
173	nochpoll,		/* cb_chpoll */
174	ddi_prop_op,		/* cb_prop_op */
175	NULL,			/* cb_str */
176	D_MP | D_64BIT | D_HOTPLUG, /* cb_flag */
177	CB_REV,			/* cb_rev */
178	nodev,			/* cb_aread */
179	nodev			/* cb_awrite */
180};
181
182/*
183 * autoconfiguration routines.
184 */
185
186static struct dev_ops emul64_ops = {
187	DEVO_REV,			/* rev, */
188	0,				/* refcnt */
189	emul64_info,			/* getinfo */
190	nulldev,			/* identify */
191	nulldev,			/* probe */
192	emul64_attach,			/* attach */
193	emul64_detach,			/* detach */
194	nodev,				/* reset */
195	&emul64_cbops,			/* char/block ops */
196	NULL,				/* bus ops */
197	NULL,				/* power */
198	ddi_quiesce_not_needed,			/* quiesce */
199};
200
201static struct modldrv modldrv = {
202	&mod_driverops,			/* module type - driver */
203	"emul64 SCSI Host Bus Adapter",	/* module name */
204	&emul64_ops,			/* driver ops */
205};
206
207static struct modlinkage modlinkage = {
208	MODREV_1,			/* ml_rev - must be MODREV_1 */
209	&modldrv,			/* ml_linkage */
210	NULL				/* end of driver linkage */
211};
212
213int
214_init(void)
215{
216	int	ret;
217
218	ret = ddi_soft_state_init(&emul64_state, sizeof (struct emul64),
219	    EMUL64_INITIAL_SOFT_SPACE);
220	if (ret != 0)
221		return (ret);
222
223	if ((ret = scsi_hba_init(&modlinkage)) != 0) {
224		ddi_soft_state_fini(&emul64_state);
225		return (ret);
226	}
227
228	/* Set the number of task threads to the number of CPUs */
229	if (boot_max_ncpus == -1) {
230		emul64_task_nthreads = max_ncpus;
231	} else {
232		emul64_task_nthreads = boot_max_ncpus;
233	}
234
235	emul64_bsd_init();
236
237	ret = mod_install(&modlinkage);
238	if (ret != 0) {
239		emul64_bsd_fini();
240		scsi_hba_fini(&modlinkage);
241		ddi_soft_state_fini(&emul64_state);
242	}
243
244	return (ret);
245}
246
247int
248_fini(void)
249{
250	int	ret;
251
252	if ((ret = mod_remove(&modlinkage)) != 0)
253		return (ret);
254
255	emul64_bsd_fini();
256
257	scsi_hba_fini(&modlinkage);
258
259	ddi_soft_state_fini(&emul64_state);
260
261	return (ret);
262}
263
264int
265_info(struct modinfo *modinfop)
266{
267	return (mod_info(&modlinkage, modinfop));
268}
269
270/*
271 * Given the device number return the devinfo pointer
272 * from the scsi_device structure.
273 */
274/*ARGSUSED*/
275static int
276emul64_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
277{
278	struct emul64	*foo;
279	int		instance = getminor((dev_t)arg);
280
281	switch (cmd) {
282	case DDI_INFO_DEVT2DEVINFO:
283		foo = ddi_get_soft_state(emul64_state, instance);
284		if (foo != NULL)
285			*result = (void *)foo->emul64_dip;
286		else {
287			*result = NULL;
288			return (DDI_FAILURE);
289		}
290		break;
291
292	case DDI_INFO_DEVT2INSTANCE:
293		*result = (void *)(uintptr_t)instance;
294		break;
295
296	default:
297		return (DDI_FAILURE);
298	}
299
300	return (DDI_SUCCESS);
301}
302
303/*
304 * Attach an instance of an emul64 host adapter.  Allocate data structures,
305 * initialize the emul64 and we're on the air.
306 */
307/*ARGSUSED*/
308static int
309emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
310{
311	int		mutex_initted = 0;
312	struct emul64	*emul64;
313	int		instance;
314	scsi_hba_tran_t	*tran = NULL;
315	ddi_dma_attr_t	tmp_dma_attr;
316
317	emul64_bsd_get_props(dip);
318
319	bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr));
320	instance = ddi_get_instance(dip);
321
322	switch (cmd) {
323	case DDI_ATTACH:
324		break;
325
326	case DDI_RESUME:
327		tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
328		if (!tran) {
329			return (DDI_FAILURE);
330		}
331		emul64 = TRAN2EMUL64(tran);
332
333		return (DDI_SUCCESS);
334
335	default:
336		emul64_i_log(NULL, CE_WARN,
337		    "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance);
338		return (DDI_FAILURE);
339	}
340
341	/*
342	 * Allocate emul64 data structure.
343	 */
344	if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) {
345		emul64_i_log(NULL, CE_WARN,
346		    "emul64%d: Failed to alloc soft state",
347		    instance);
348		return (DDI_FAILURE);
349	}
350
351	emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance);
352	if (emul64 == (struct emul64 *)NULL) {
353		emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state",
354		    instance);
355		ddi_soft_state_free(emul64_state, instance);
356		return (DDI_FAILURE);
357	}
358
359
360	/*
361	 * Allocate a transport structure
362	 */
363	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
364	if (tran == NULL) {
365		cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n");
366		goto fail;
367	}
368
369	emul64->emul64_tran			= tran;
370	emul64->emul64_dip			= dip;
371
372	tran->tran_hba_private		= emul64;
373	tran->tran_tgt_private		= NULL;
374	tran->tran_tgt_init		= emul64_tran_tgt_init;
375	tran->tran_tgt_probe		= scsi_hba_probe;
376	tran->tran_tgt_free		= NULL;
377
378	tran->tran_start		= emul64_scsi_start;
379	tran->tran_abort		= emul64_scsi_abort;
380	tran->tran_reset		= emul64_scsi_reset;
381	tran->tran_getcap		= emul64_scsi_getcap;
382	tran->tran_setcap		= emul64_scsi_setcap;
383	tran->tran_init_pkt		= emul64_scsi_init_pkt;
384	tran->tran_destroy_pkt		= emul64_scsi_destroy_pkt;
385	tran->tran_dmafree		= emul64_scsi_dmafree;
386	tran->tran_sync_pkt		= emul64_scsi_sync_pkt;
387	tran->tran_reset_notify		= emul64_scsi_reset_notify;
388
389	tmp_dma_attr.dma_attr_minxfer = 0x1;
390	tmp_dma_attr.dma_attr_burstsizes = 0x7f;
391
392	/*
393	 * Attach this instance of the hba
394	 */
395	if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran,
396	    0) != DDI_SUCCESS) {
397		cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n");
398		goto fail;
399	}
400
401	emul64->emul64_initiator_id = 2;
402
403	/*
404	 * Look up the scsi-options property
405	 */
406	emul64->emul64_scsi_options =
407	    ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options",
408	    EMUL64_DEFAULT_SCSI_OPTIONS);
409	EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x",
410	    emul64->emul64_scsi_options);
411
412
413	/* mutexes to protect the emul64 request and response queue */
414	mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER,
415	    emul64->emul64_iblock);
416	mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER,
417	    emul64->emul64_iblock);
418
419	mutex_initted = 1;
420
421	EMUL64_MUTEX_ENTER(emul64);
422
423	/*
424	 * Initialize the default Target Capabilities and Sync Rates
425	 */
426	emul64_i_initcap(emul64);
427
428	EMUL64_MUTEX_EXIT(emul64);
429
430
431	ddi_report_dev(dip);
432	emul64->emul64_taskq = taskq_create("emul64_comp",
433	    emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0);
434
435	return (DDI_SUCCESS);
436
437fail:
438	emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance);
439
440	if (mutex_initted) {
441		mutex_destroy(EMUL64_REQ_MUTEX(emul64));
442		mutex_destroy(EMUL64_RESP_MUTEX(emul64));
443	}
444	if (tran) {
445		scsi_hba_tran_free(tran);
446	}
447	ddi_soft_state_free(emul64_state, instance);
448	return (DDI_FAILURE);
449}
450
451/*ARGSUSED*/
452static int
453emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
454{
455	struct emul64	*emul64;
456	scsi_hba_tran_t	*tran;
457	int		instance = ddi_get_instance(dip);
458
459
460	/* get transport structure pointer from the dip */
461	if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) {
462		return (DDI_FAILURE);
463	}
464
465	/* get soft state from transport structure */
466	emul64 = TRAN2EMUL64(tran);
467
468	if (!emul64) {
469		return (DDI_FAILURE);
470	}
471
472	EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd);
473
474	switch (cmd) {
475	case DDI_DETACH:
476		EMUL64_MUTEX_ENTER(emul64);
477
478		taskq_destroy(emul64->emul64_taskq);
479		(void) scsi_hba_detach(dip);
480
481		scsi_hba_tran_free(emul64->emul64_tran);
482
483
484		EMUL64_MUTEX_EXIT(emul64);
485
486		mutex_destroy(EMUL64_REQ_MUTEX(emul64));
487		mutex_destroy(EMUL64_RESP_MUTEX(emul64));
488
489
490		EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done");
491		ddi_soft_state_free(emul64_state, instance);
492
493		return (DDI_SUCCESS);
494
495	case DDI_SUSPEND:
496		return (DDI_SUCCESS);
497
498	default:
499		return (DDI_FAILURE);
500	}
501}
502
503/*
504 * Function name : emul64_tran_tgt_init
505 *
506 * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise
507 *
508 */
509/*ARGSUSED*/
510static int
511emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
512    scsi_hba_tran_t *tran, struct scsi_device *sd)
513{
514	struct emul64	*emul64;
515	emul64_tgt_t	*tgt;
516	char		**geo_vidpid = NULL;
517	char		*geo, *vidpid;
518	uint32_t	*geoip = NULL;
519	uint_t		length;
520	uint_t		length2;
521	lldaddr_t	sector_count;
522	char		prop_name[15];
523	int		ret = DDI_FAILURE;
524
525	emul64 = TRAN2EMUL64(tran);
526	EMUL64_MUTEX_ENTER(emul64);
527
528	/*
529	 * We get called for each target driver.conf node, multiple
530	 * nodes may map to the same tgt,lun (sd.conf, st.conf, etc).
531	 * Check to see if transport to tgt,lun already established.
532	 */
533	tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun);
534	if (tgt) {
535		ret = DDI_SUCCESS;
536		goto out;
537	}
538
539	/* see if we have driver.conf specified device for this target,lun */
540	(void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d",
541	    sd->sd_address.a_target, sd->sd_address.a_lun);
542	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip,
543	    DDI_PROP_DONTPASS, prop_name,
544	    &geo_vidpid, &length) != DDI_PROP_SUCCESS)
545		goto out;
546	if (length < 2) {
547		cmn_err(CE_WARN, "emul64: %s property does not have 2 "
548		    "elements", prop_name);
549		goto out;
550	}
551
552	/* pick geometry name and vidpid string from string array */
553	geo = *geo_vidpid;
554	vidpid = *(geo_vidpid + 1);
555
556	/* lookup geometry property integer array */
557	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS,
558	    geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) {
559		cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo);
560		goto out;
561	}
562	if (length2 < 6) {
563		cmn_err(CE_WARN, "emul64: property %s does not have 6 "
564		    "elements", *geo_vidpid);
565		goto out;
566	}
567
568	/* allocate and initialize tgt structure for tgt,lun */
569	tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP);
570	rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL);
571	mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL);
572
573	/* create avl for data block storage */
574	avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare,
575	    sizeof (blklist_t), offsetof(blklist_t, bl_node));
576
577	/* save scsi_address and vidpid */
578	bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address));
579	(void) strncpy(tgt->emul64_tgt_inq, vidpid,
580	    sizeof (emul64->emul64_tgt->emul64_tgt_inq));
581
582	/*
583	 * The high order 4 bytes of the sector count always come first in
584	 * emul64.conf.  They are followed by the low order 4 bytes.  Not
585	 * all CPU types want them in this order, but laddr_t takes care of
586	 * this for us.  We then pick up geometry (ncyl X nheads X nsect).
587	 */
588	sector_count._p._u	= *(geoip + 0);
589	sector_count._p._l	= *(geoip + 1);
590	/*
591	 * On 32-bit platforms, fix block size if it's greater than the
592	 * allowable maximum.
593	 */
594#if !defined(_LP64)
595	if (sector_count._f > DK_MAX_BLOCKS)
596		sector_count._f = DK_MAX_BLOCKS;
597#endif
598	tgt->emul64_tgt_sectors = sector_count._f;
599	tgt->emul64_tgt_dtype	= *(geoip + 2);
600	tgt->emul64_tgt_ncyls	= *(geoip + 3);
601	tgt->emul64_tgt_nheads	= *(geoip + 4);
602	tgt->emul64_tgt_nsect	= *(geoip + 5);
603
604	/* insert target structure into list */
605	tgt->emul64_tgt_next = emul64->emul64_tgt;
606	emul64->emul64_tgt = tgt;
607	ret = DDI_SUCCESS;
608
609out:	EMUL64_MUTEX_EXIT(emul64);
610	if (geoip)
611		ddi_prop_free(geoip);
612	if (geo_vidpid)
613		ddi_prop_free(geo_vidpid);
614	return (ret);
615}
616
617/*
618 * Function name : emul64_i_initcap
619 *
620 * Return Values : NONE
621 * Description	 : Initializes the default target capabilities and
622 *		   Sync Rates.
623 *
624 * Context	 : Called from the user thread through attach.
625 *
626 */
627static void
628emul64_i_initcap(struct emul64 *emul64)
629{
630	uint16_t	cap, synch;
631	int		i;
632
633	cap = 0;
634	synch = 0;
635	for (i = 0; i < NTARGETS_WIDE; i++) {
636		emul64->emul64_cap[i] = cap;
637		emul64->emul64_synch[i] = synch;
638	}
639	EMUL64_DEBUG(emul64, SCSI_DEBUG, "default cap = 0x%x", cap);
640}
641
642/*
643 * Function name : emul64_scsi_getcap()
644 *
645 * Return Values : current value of capability, if defined
646 *		   -1 if capability is not defined
647 * Description	 : returns current capability value
648 *
649 * Context	 : Can be called from different kernel process threads.
650 *		   Can be called by interrupt thread.
651 */
652static int
653emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
654{
655	struct emul64	*emul64	= ADDR2EMUL64(ap);
656	int		rval = 0;
657
658	/*
659	 * We don't allow inquiring about capabilities for other targets
660	 */
661	if (cap == NULL || whom == 0) {
662		return (-1);
663	}
664
665	EMUL64_MUTEX_ENTER(emul64);
666
667	switch (scsi_hba_lookup_capstr(cap)) {
668	case SCSI_CAP_DMA_MAX:
669		rval = 1 << 24; /* Limit to 16MB max transfer */
670		break;
671	case SCSI_CAP_MSG_OUT:
672		rval = 1;
673		break;
674	case SCSI_CAP_DISCONNECT:
675		rval = 1;
676		break;
677	case SCSI_CAP_SYNCHRONOUS:
678		rval = 1;
679		break;
680	case SCSI_CAP_WIDE_XFER:
681		rval = 1;
682		break;
683	case SCSI_CAP_TAGGED_QING:
684		rval = 1;
685		break;
686	case SCSI_CAP_UNTAGGED_QING:
687		rval = 1;
688		break;
689	case SCSI_CAP_PARITY:
690		rval = 1;
691		break;
692	case SCSI_CAP_INITIATOR_ID:
693		rval = emul64->emul64_initiator_id;
694		break;
695	case SCSI_CAP_ARQ:
696		rval = 1;
697		break;
698	case SCSI_CAP_LINKED_CMDS:
699		break;
700	case SCSI_CAP_RESET_NOTIFICATION:
701		rval = 1;
702		break;
703
704	default:
705		rval = -1;
706		break;
707	}
708
709	EMUL64_MUTEX_EXIT(emul64);
710
711	return (rval);
712}
713
714/*
715 * Function name : emul64_scsi_setcap()
716 *
717 * Return Values : 1 - capability exists and can be set to new value
718 *		   0 - capability could not be set to new value
719 *		  -1 - no such capability
720 *
721 * Description	 : sets a capability for a target
722 *
723 * Context	 : Can be called from different kernel process threads.
724 *		   Can be called by interrupt thread.
725 */
726static int
727emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
728{
729	struct emul64	*emul64	= ADDR2EMUL64(ap);
730	int		rval = 0;
731
732	/*
733	 * We don't allow setting capabilities for other targets
734	 */
735	if (cap == NULL || whom == 0) {
736		return (-1);
737	}
738
739	EMUL64_MUTEX_ENTER(emul64);
740
741	switch (scsi_hba_lookup_capstr(cap)) {
742	case SCSI_CAP_DMA_MAX:
743	case SCSI_CAP_MSG_OUT:
744	case SCSI_CAP_PARITY:
745	case SCSI_CAP_UNTAGGED_QING:
746	case SCSI_CAP_LINKED_CMDS:
747	case SCSI_CAP_RESET_NOTIFICATION:
748		/*
749		 * None of these are settable via
750		 * the capability interface.
751		 */
752		break;
753	case SCSI_CAP_DISCONNECT:
754		rval = 1;
755		break;
756	case SCSI_CAP_SYNCHRONOUS:
757		rval = 1;
758		break;
759	case SCSI_CAP_TAGGED_QING:
760		rval = 1;
761		break;
762	case SCSI_CAP_WIDE_XFER:
763		rval = 1;
764		break;
765	case SCSI_CAP_INITIATOR_ID:
766		rval = -1;
767		break;
768	case SCSI_CAP_ARQ:
769		rval = 1;
770		break;
771	case SCSI_CAP_TOTAL_SECTORS:
772		emul64->nt_total_sectors[ap->a_target][ap->a_lun] = value;
773		rval = TRUE;
774		break;
775	case SCSI_CAP_SECTOR_SIZE:
776		rval = TRUE;
777		break;
778	default:
779		rval = -1;
780		break;
781	}
782
783
784	EMUL64_MUTEX_EXIT(emul64);
785
786	return (rval);
787}
788
789/*
790 * Function name : emul64_scsi_init_pkt
791 *
792 * Return Values : pointer to scsi_pkt, or NULL
793 * Description	 : Called by kernel on behalf of a target driver
794 *		   calling scsi_init_pkt(9F).
795 *		   Refer to tran_init_pkt(9E) man page
796 *
797 * Context	 : Can be called from different kernel process threads.
798 *		   Can be called by interrupt thread.
799 */
800/* ARGSUSED */
801static struct scsi_pkt *
802emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
803    struct buf *bp, int cmdlen, int statuslen, int tgtlen,
804    int flags, int (*callback)(), caddr_t arg)
805{
806	struct emul64		*emul64	= ADDR2EMUL64(ap);
807	struct emul64_cmd	*sp;
808
809	ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
810
811	/*
812	 * First step of emul64_scsi_init_pkt:  pkt allocation
813	 */
814	if (pkt == NULL) {
815		pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen,
816		    statuslen,
817		    tgtlen, sizeof (struct emul64_cmd), callback, arg);
818		if (pkt == NULL) {
819			cmn_err(CE_WARN, "emul64_scsi_init_pkt: "
820			    "scsi_hba_pkt_alloc failed");
821			return (NULL);
822		}
823
824		sp = PKT2CMD(pkt);
825
826		/*
827		 * Initialize the new pkt - we redundantly initialize
828		 * all the fields for illustrative purposes.
829		 */
830		sp->cmd_pkt		= pkt;
831		sp->cmd_flags		= 0;
832		sp->cmd_scblen		= statuslen;
833		sp->cmd_cdblen		= cmdlen;
834		sp->cmd_emul64		= emul64;
835		pkt->pkt_address	= *ap;
836		pkt->pkt_comp		= (void (*)())NULL;
837		pkt->pkt_flags		= 0;
838		pkt->pkt_time		= 0;
839		pkt->pkt_resid		= 0;
840		pkt->pkt_statistics	= 0;
841		pkt->pkt_reason		= 0;
842
843	} else {
844		sp = PKT2CMD(pkt);
845	}
846
847	/*
848	 * Second step of emul64_scsi_init_pkt:  dma allocation/move
849	 */
850	if (bp && bp->b_bcount != 0) {
851		if (bp->b_flags & B_READ) {
852			sp->cmd_flags &= ~CFLAG_DMASEND;
853		} else {
854			sp->cmd_flags |= CFLAG_DMASEND;
855		}
856		bp_mapin(bp);
857		sp->cmd_addr = (unsigned char *) bp->b_un.b_addr;
858		sp->cmd_count = bp->b_bcount;
859		pkt->pkt_resid = 0;
860	}
861
862	return (pkt);
863}
864
865
866/*
867 * Function name : emul64_scsi_destroy_pkt
868 *
869 * Return Values : none
870 * Description	 : Called by kernel on behalf of a target driver
871 *		   calling scsi_destroy_pkt(9F).
872 *		   Refer to tran_destroy_pkt(9E) man page
873 *
874 * Context	 : Can be called from different kernel process threads.
875 *		   Can be called by interrupt thread.
876 */
877static void
878emul64_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
879{
880	struct emul64_cmd	*sp = PKT2CMD(pkt);
881
882	/*
883	 * emul64_scsi_dmafree inline to make things faster
884	 */
885	if (sp->cmd_flags & CFLAG_DMAVALID) {
886		/*
887		 * Free the mapping.
888		 */
889		sp->cmd_flags &= ~CFLAG_DMAVALID;
890	}
891
892	/*
893	 * Free the pkt
894	 */
895	scsi_hba_pkt_free(ap, pkt);
896}
897
898
899/*
900 * Function name : emul64_scsi_dmafree()
901 *
902 * Return Values : none
903 * Description	 : free dvma resources
904 *
905 * Context	 : Can be called from different kernel process threads.
906 *		   Can be called by interrupt thread.
907 */
908/*ARGSUSED*/
909static void
910emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
911{
912}
913
914/*
915 * Function name : emul64_scsi_sync_pkt()
916 *
917 * Return Values : none
918 * Description	 : sync dma
919 *
920 * Context	 : Can be called from different kernel process threads.
921 *		   Can be called by interrupt thread.
922 */
923/*ARGSUSED*/
924static void
925emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
926{
927}
928
929/*
930 * routine for reset notification setup, to register or cancel.
931 */
932static int
933emul64_scsi_reset_notify(struct scsi_address *ap, int flag,
934    void (*callback)(caddr_t), caddr_t arg)
935{
936	struct emul64				*emul64 = ADDR2EMUL64(ap);
937	struct emul64_reset_notify_entry	*p, *beforep;
938	int					rval = DDI_FAILURE;
939
940	mutex_enter(EMUL64_REQ_MUTEX(emul64));
941
942	p = emul64->emul64_reset_notify_listf;
943	beforep = NULL;
944
945	while (p) {
946		if (p->ap == ap)
947			break;	/* An entry exists for this target */
948		beforep = p;
949		p = p->next;
950	}
951
952	if ((flag & SCSI_RESET_CANCEL) && (p != NULL)) {
953		if (beforep == NULL) {
954			emul64->emul64_reset_notify_listf = p->next;
955		} else {
956			beforep->next = p->next;
957		}
958		kmem_free((caddr_t)p,
959		    sizeof (struct emul64_reset_notify_entry));
960		rval = DDI_SUCCESS;
961
962	} else if ((flag & SCSI_RESET_NOTIFY) && (p == NULL)) {
963		p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry),
964		    KM_SLEEP);
965		p->ap = ap;
966		p->callback = callback;
967		p->arg = arg;
968		p->next = emul64->emul64_reset_notify_listf;
969		emul64->emul64_reset_notify_listf = p;
970		rval = DDI_SUCCESS;
971	}
972
973	mutex_exit(EMUL64_REQ_MUTEX(emul64));
974
975	return (rval);
976}
977
978/*
979 * Function name : emul64_scsi_start()
980 *
981 * Return Values : TRAN_FATAL_ERROR	- emul64 has been shutdown
982 *		   TRAN_BUSY		- request queue is full
983 *		   TRAN_ACCEPT		- pkt has been submitted to emul64
984 *
985 * Description	 : init pkt, start the request
986 *
987 * Context	 : Can be called from different kernel process threads.
988 *		   Can be called by interrupt thread.
989 */
990static int
991emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
992{
993	struct emul64_cmd	*sp	= PKT2CMD(pkt);
994	int			rval	= TRAN_ACCEPT;
995	struct emul64		*emul64	= ADDR2EMUL64(ap);
996	clock_t			cur_lbolt;
997	taskqid_t		dispatched;
998
999	ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic());
1000	ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic());
1001
1002	EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp);
1003
1004	pkt->pkt_reason = CMD_CMPLT;
1005
1006#ifdef	EMUL64DEBUG
1007	if (emul64_cdb_debug) {
1008		emul64_debug_dump_cdb(ap, pkt);
1009	}
1010#endif	/* EMUL64DEBUG */
1011
1012	/*
1013	 * calculate deadline from pkt_time
1014	 * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so
1015	 * we can shift and at the same time have a 28% grace period
1016	 * we ignore the rare case of pkt_time == 0 and deal with it
1017	 * in emul64_i_watch()
1018	 */
1019	cur_lbolt = ddi_get_lbolt();
1020	sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128);
1021
1022	if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) {
1023		emul64_pkt_comp((caddr_t)pkt);
1024	} else {
1025		dispatched = TASKQID_INVALID;
1026		if (emul64_collect_stats) {
1027			/*
1028			 * If we are collecting statistics, call
1029			 * taskq_dispatch in no sleep mode, so that we can
1030			 * detect if we are exceeding the queue length that
1031			 * was established in the call to taskq_create in
1032			 * emul64_attach.  If the no sleep call fails
1033			 * (returns NULL), the task will be dispatched in
1034			 * sleep mode below.
1035			 */
1036			dispatched = taskq_dispatch(emul64->emul64_taskq,
1037			    emul64_pkt_comp, (void *)pkt, TQ_NOSLEEP);
1038			if (dispatched == TASKQID_INVALID) {
1039				/* Queue was full.  dispatch failed. */
1040				mutex_enter(&emul64_stats_mutex);
1041				emul64_taskq_max++;
1042				mutex_exit(&emul64_stats_mutex);
1043			}
1044		}
1045		if (dispatched == TASKQID_INVALID) {
1046			(void) taskq_dispatch(emul64->emul64_taskq,
1047			    emul64_pkt_comp, (void *)pkt, TQ_SLEEP);
1048		}
1049	}
1050
1051done:
1052	ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic());
1053	ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic());
1054
1055	return (rval);
1056}
1057
1058void
1059emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, uchar_t asc, uchar_t ascq)
1060{
1061	struct scsi_arq_status *arq =
1062	    (struct scsi_arq_status *)pkt->pkt_scbp;
1063
1064	/* got check, no data transferred and ARQ done */
1065	arq->sts_status.sts_chk = 1;
1066	pkt->pkt_state |= STATE_ARQ_DONE;
1067	pkt->pkt_state &= ~STATE_XFERRED_DATA;
1068
1069	/* for ARQ */
1070	arq->sts_rqpkt_reason = CMD_CMPLT;
1071	arq->sts_rqpkt_resid = 0;
1072	arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1073	    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1074	arq->sts_sensedata.es_valid = 1;
1075	arq->sts_sensedata.es_class = 0x7;
1076	arq->sts_sensedata.es_key = key;
1077	arq->sts_sensedata.es_add_code = asc;
1078	arq->sts_sensedata.es_qual_code = ascq;
1079}
1080
1081ushort_t
1082emul64_error_inject(struct scsi_pkt *pkt)
1083{
1084	struct emul64_cmd	*sp	= PKT2CMD(pkt);
1085	emul64_tgt_t		*tgt;
1086	struct scsi_arq_status *arq =
1087	    (struct scsi_arq_status *)pkt->pkt_scbp;
1088	uint_t			max_sense_len;
1089
1090	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
1091	tgt = find_tgt(sp->cmd_emul64,
1092	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
1093	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
1094
1095	/*
1096	 * If there is no target, skip the error injection and
1097	 * let the packet be handled normally.  This would normally
1098	 * never happen since a_target and a_lun are setup in
1099	 * emul64_scsi_init_pkt.
1100	 */
1101	if (tgt == NULL) {
1102		return (ERR_INJ_DISABLE);
1103	}
1104
1105	if (tgt->emul64_einj_state != ERR_INJ_DISABLE) {
1106		arq->sts_status = tgt->emul64_einj_scsi_status;
1107		pkt->pkt_state = tgt->emul64_einj_pkt_state;
1108		pkt->pkt_reason = tgt->emul64_einj_pkt_reason;
1109
1110		/*
1111		 * Calculate available sense buffer length.  We could just
1112		 * assume sizeof(struct scsi_extended_sense) but hopefully
1113		 * that limitation will go away soon.
1114		 */
1115		max_sense_len = sp->cmd_scblen  -
1116		    (sizeof (struct scsi_arq_status) -
1117		    sizeof (struct scsi_extended_sense));
1118		if (max_sense_len > tgt->emul64_einj_sense_length) {
1119			max_sense_len = tgt->emul64_einj_sense_length;
1120		}
1121
1122		/* for ARQ */
1123		arq->sts_rqpkt_reason = CMD_CMPLT;
1124		arq->sts_rqpkt_resid = 0;
1125		arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1126		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1127
1128		/* Copy sense data */
1129		if (tgt->emul64_einj_sense_data != 0) {
1130			bcopy(tgt->emul64_einj_sense_data,
1131			    (uint8_t *)&arq->sts_sensedata,
1132			    max_sense_len);
1133		}
1134	}
1135
1136	/* Return current error injection state */
1137	return (tgt->emul64_einj_state);
1138}
1139
1140int
1141emul64_error_inject_req(struct emul64 *emul64, intptr_t arg)
1142{
1143	emul64_tgt_t		*tgt;
1144	struct emul64_error_inj_data error_inj_req;
1145
1146	/* Check args */
1147	if (arg == (intptr_t)NULL) {
1148		return (EINVAL);
1149	}
1150
1151	if (ddi_copyin((void *)arg, &error_inj_req,
1152	    sizeof (error_inj_req), 0) != 0) {
1153		cmn_err(CE_WARN, "emul64: ioctl - inj copyin failed\n");
1154		return (EFAULT);
1155	}
1156
1157	EMUL64_MUTEX_ENTER(emul64);
1158	tgt = find_tgt(emul64, error_inj_req.eccd_target,
1159	    error_inj_req.eccd_lun);
1160	EMUL64_MUTEX_EXIT(emul64);
1161
1162	/* Make sure device exists */
1163	if (tgt == NULL) {
1164		return (ENODEV);
1165	}
1166
1167	/* Free old sense buffer if we have one */
1168	if (tgt->emul64_einj_sense_data != NULL) {
1169		ASSERT(tgt->emul64_einj_sense_length != 0);
1170		kmem_free(tgt->emul64_einj_sense_data,
1171		    tgt->emul64_einj_sense_length);
1172		tgt->emul64_einj_sense_data = NULL;
1173		tgt->emul64_einj_sense_length = 0;
1174	}
1175
1176	/*
1177	 * Now handle error injection request.  If error injection
1178	 * is requested we will return the sense data provided for
1179	 * any I/O to this target until told to stop.
1180	 */
1181	tgt->emul64_einj_state = error_inj_req.eccd_inj_state;
1182	tgt->emul64_einj_sense_length = error_inj_req.eccd_sns_dlen;
1183	tgt->emul64_einj_pkt_state = error_inj_req.eccd_pkt_state;
1184	tgt->emul64_einj_pkt_reason = error_inj_req.eccd_pkt_reason;
1185	tgt->emul64_einj_scsi_status = error_inj_req.eccd_scsi_status;
1186	switch (error_inj_req.eccd_inj_state) {
1187	case ERR_INJ_ENABLE:
1188	case ERR_INJ_ENABLE_NODATA:
1189		if (error_inj_req.eccd_sns_dlen) {
1190			tgt->emul64_einj_sense_data =
1191			    kmem_alloc(error_inj_req.eccd_sns_dlen, KM_SLEEP);
1192			/* Copy sense data */
1193			if (ddi_copyin((void *)(arg + sizeof (error_inj_req)),
1194			    tgt->emul64_einj_sense_data,
1195			    error_inj_req.eccd_sns_dlen, 0) != 0) {
1196				cmn_err(CE_WARN,
1197				    "emul64: sense data copy in failed\n");
1198				return (EFAULT);
1199			}
1200		}
1201		break;
1202	case ERR_INJ_DISABLE:
1203	default:
1204		break;
1205	}
1206
1207	return (0);
1208}
1209
1210int bsd_scsi_start_stop_unit(struct scsi_pkt *);
1211int bsd_scsi_test_unit_ready(struct scsi_pkt *);
1212int bsd_scsi_request_sense(struct scsi_pkt *);
1213int bsd_scsi_inquiry(struct scsi_pkt *);
1214int bsd_scsi_format(struct scsi_pkt *);
1215int bsd_scsi_io(struct scsi_pkt *);
1216int bsd_scsi_log_sense(struct scsi_pkt *);
1217int bsd_scsi_mode_sense(struct scsi_pkt *);
1218int bsd_scsi_mode_select(struct scsi_pkt *);
1219int bsd_scsi_read_capacity(struct scsi_pkt *);
1220int bsd_scsi_read_capacity_16(struct scsi_pkt *);
1221int bsd_scsi_reserve(struct scsi_pkt *);
1222int bsd_scsi_format(struct scsi_pkt *);
1223int bsd_scsi_release(struct scsi_pkt *);
1224int bsd_scsi_read_defect_list(struct scsi_pkt *);
1225int bsd_scsi_reassign_block(struct scsi_pkt *);
1226int bsd_freeblkrange(emul64_tgt_t *, emul64_range_t *);
1227
1228static void
1229emul64_handle_cmd(struct scsi_pkt *pkt)
1230{
1231	if (emul64_error_inject(pkt) == ERR_INJ_ENABLE_NODATA) {
1232		/*
1233		 * If error injection is configured to return with
1234		 * no data return now without handling the command.
1235		 * This is how normal check conditions work.
1236		 *
1237		 * If the error injection state is ERR_INJ_ENABLE
1238		 * (or if error injection is disabled) continue and
1239		 * handle the command.  This would be used for
1240		 * KEY_RECOVERABLE_ERROR type conditions.
1241		 */
1242		return;
1243	}
1244
1245	switch (pkt->pkt_cdbp[0]) {
1246	case SCMD_START_STOP:
1247		(void) bsd_scsi_start_stop_unit(pkt);
1248		break;
1249	case SCMD_TEST_UNIT_READY:
1250		(void) bsd_scsi_test_unit_ready(pkt);
1251		break;
1252	case SCMD_REQUEST_SENSE:
1253		(void) bsd_scsi_request_sense(pkt);
1254		break;
1255	case SCMD_INQUIRY:
1256		(void) bsd_scsi_inquiry(pkt);
1257		break;
1258	case SCMD_FORMAT:
1259		(void) bsd_scsi_format(pkt);
1260		break;
1261	case SCMD_READ:
1262	case SCMD_WRITE:
1263	case SCMD_READ_G1:
1264	case SCMD_WRITE_G1:
1265	case SCMD_READ_G4:
1266	case SCMD_WRITE_G4:
1267		(void) bsd_scsi_io(pkt);
1268		break;
1269	case SCMD_LOG_SENSE_G1:
1270		(void) bsd_scsi_log_sense(pkt);
1271		break;
1272	case SCMD_MODE_SENSE:
1273	case SCMD_MODE_SENSE_G1:
1274		(void) bsd_scsi_mode_sense(pkt);
1275		break;
1276	case SCMD_MODE_SELECT:
1277	case SCMD_MODE_SELECT_G1:
1278		(void) bsd_scsi_mode_select(pkt);
1279		break;
1280	case SCMD_READ_CAPACITY:
1281		(void) bsd_scsi_read_capacity(pkt);
1282		break;
1283	case SCMD_SVC_ACTION_IN_G4:
1284		if (pkt->pkt_cdbp[1] == SSVC_ACTION_READ_CAPACITY_G4) {
1285			(void) bsd_scsi_read_capacity_16(pkt);
1286		} else {
1287			cmn_err(CE_WARN, "emul64: unrecognized G4 service "
1288			    "action 0x%x", pkt->pkt_cdbp[1]);
1289		}
1290		break;
1291	case SCMD_RESERVE:
1292	case SCMD_RESERVE_G1:
1293		(void) bsd_scsi_reserve(pkt);
1294		break;
1295	case SCMD_RELEASE:
1296	case SCMD_RELEASE_G1:
1297		(void) bsd_scsi_release(pkt);
1298		break;
1299	case SCMD_REASSIGN_BLOCK:
1300		(void) bsd_scsi_reassign_block(pkt);
1301		break;
1302	case SCMD_READ_DEFECT_LIST:
1303		(void) bsd_scsi_read_defect_list(pkt);
1304		break;
1305	case SCMD_PRIN:
1306	case SCMD_PROUT:
1307	case SCMD_REPORT_LUNS:
1308		/* ASC 0x24 INVALID FIELD IN CDB */
1309		emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0);
1310		break;
1311	default:
1312		cmn_err(CE_WARN, "emul64: unrecognized "
1313		    "SCSI cmd 0x%x", pkt->pkt_cdbp[0]);
1314		emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0);
1315		break;
1316	case SCMD_GET_CONFIGURATION:
1317	case 0x35:			/* SCMD_SYNCHRONIZE_CACHE */
1318		/* Don't complain */
1319		break;
1320	}
1321}
1322
1323static void
1324emul64_pkt_comp(void * arg)
1325{
1326	struct scsi_pkt		*pkt = (struct scsi_pkt *)arg;
1327	struct emul64_cmd	*sp = PKT2CMD(pkt);
1328	emul64_tgt_t		*tgt;
1329
1330	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
1331	tgt = find_tgt(sp->cmd_emul64,
1332	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
1333	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
1334	if (!tgt) {
1335		pkt->pkt_reason = CMD_TIMEOUT;
1336		pkt->pkt_state = STATE_GOT_BUS | STATE_SENT_CMD;
1337		pkt->pkt_statistics = STAT_TIMEOUT;
1338	} else {
1339		pkt->pkt_reason = CMD_CMPLT;
1340		*pkt->pkt_scbp = STATUS_GOOD;
1341		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1342		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1343		pkt->pkt_statistics = 0;
1344		emul64_handle_cmd(pkt);
1345	}
1346	scsi_hba_pkt_comp(pkt);
1347}
1348
1349/* ARGSUSED */
1350static int
1351emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1352{
1353	return (1);
1354}
1355
1356/* ARGSUSED */
1357static int
1358emul64_scsi_reset(struct scsi_address *ap, int level)
1359{
1360	return (1);
1361}
1362
1363static int
1364emul64_get_tgtrange(struct emul64 *emul64, intptr_t arg, emul64_tgt_t **tgtp,
1365    emul64_tgt_range_t *tgtr)
1366{
1367	if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) {
1368		cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n");
1369		return (EFAULT);
1370	}
1371	EMUL64_MUTEX_ENTER(emul64);
1372	*tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun);
1373	EMUL64_MUTEX_EXIT(emul64);
1374	if (*tgtp == NULL) {
1375		cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d",
1376		    tgtr->emul64_target, tgtr->emul64_lun,
1377		    ddi_get_instance(emul64->emul64_dip));
1378		return (ENXIO);
1379	}
1380	return (0);
1381}
1382
1383static int
1384emul64_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1385    int *rvalp)
1386{
1387	struct emul64		*emul64;
1388	int			instance;
1389	int			rv = 0;
1390	emul64_tgt_range_t	tgtr;
1391	emul64_tgt_t		*tgt;
1392
1393	instance = MINOR2INST(getminor(dev));
1394	emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance);
1395	if (emul64 == NULL) {
1396		cmn_err(CE_WARN, "emul64: ioctl - no softstate for %d\n",
1397		    getminor(dev));
1398		return (ENXIO);
1399	}
1400
1401	switch (cmd) {
1402	case EMUL64_WRITE_OFF:
1403		rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1404		if (rv == 0) {
1405			rv = emul64_write_off(emul64, tgt, &tgtr);
1406		}
1407		break;
1408	case EMUL64_WRITE_ON:
1409		rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1410		if (rv == 0) {
1411			rv = emul64_write_on(emul64, tgt, &tgtr);
1412		}
1413		break;
1414	case EMUL64_ZERO_RANGE:
1415		rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1416		if (rv == 0) {
1417			mutex_enter(&tgt->emul64_tgt_blk_lock);
1418			rv = bsd_freeblkrange(tgt, &tgtr.emul64_blkrange);
1419			mutex_exit(&tgt->emul64_tgt_blk_lock);
1420		}
1421		break;
1422	case EMUL64_ERROR_INJECT:
1423		rv = emul64_error_inject_req(emul64, arg);
1424		break;
1425	default:
1426		rv  = scsi_hba_ioctl(dev, cmd, arg, mode, credp, rvalp);
1427		break;
1428	}
1429	return (rv);
1430}
1431
1432/* ARGSUSED */
1433static int
1434emul64_write_off(struct emul64 *emul64, emul64_tgt_t *tgt,
1435    emul64_tgt_range_t *tgtr)
1436{
1437	size_t			blkcnt = tgtr->emul64_blkrange.emul64_blkcnt;
1438	emul64_nowrite_t	*cur;
1439	emul64_nowrite_t	*nowrite;
1440	emul64_rng_overlap_t	overlap = O_NONE;
1441	emul64_nowrite_t	**prev = NULL;
1442	diskaddr_t		sb = tgtr->emul64_blkrange.emul64_sb;
1443
1444	nowrite = emul64_nowrite_alloc(&tgtr->emul64_blkrange);
1445
1446	/* Find spot in list */
1447	rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER);
1448	cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev);
1449	if (overlap == O_NONE) {
1450		/* Insert into list */
1451		*prev = nowrite;
1452		nowrite->emul64_nwnext = cur;
1453	}
1454	rw_exit(&tgt->emul64_tgt_nw_lock);
1455	if (overlap == O_NONE) {
1456		if (emul64_collect_stats) {
1457			mutex_enter(&emul64_stats_mutex);
1458			emul64_nowrite_count++;
1459			mutex_exit(&emul64_stats_mutex);
1460		}
1461	} else {
1462		cmn_err(CE_WARN, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%"
1463		    PRIx64 "overlaps 0x%llx,0x%" PRIx64 "\n",
1464		    nowrite->emul64_blocked.emul64_sb,
1465		    nowrite->emul64_blocked.emul64_blkcnt,
1466		    cur->emul64_blocked.emul64_sb,
1467		    cur->emul64_blocked.emul64_blkcnt);
1468		emul64_nowrite_free(nowrite);
1469		return (EINVAL);
1470	}
1471	return (0);
1472}
1473
1474/* ARGSUSED */
1475static int
1476emul64_write_on(struct emul64 *emul64, emul64_tgt_t *tgt,
1477    emul64_tgt_range_t *tgtr)
1478{
1479	size_t			blkcnt = tgtr->emul64_blkrange.emul64_blkcnt;
1480	emul64_nowrite_t	*cur;
1481	emul64_rng_overlap_t	overlap = O_NONE;
1482	emul64_nowrite_t	**prev = NULL;
1483	int			rv = 0;
1484	diskaddr_t		sb = tgtr->emul64_blkrange.emul64_sb;
1485
1486	/* Find spot in list */
1487	rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER);
1488	cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev);
1489	if (overlap == O_SAME) {
1490		/* Remove from list */
1491		*prev = cur->emul64_nwnext;
1492	}
1493	rw_exit(&tgt->emul64_tgt_nw_lock);
1494
1495	switch (overlap) {
1496	case O_NONE:
1497		cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx "
1498		    "range not found\n", sb, blkcnt);
1499		rv = ENXIO;
1500		break;
1501	case O_SAME:
1502		if (emul64_collect_stats) {
1503			mutex_enter(&emul64_stats_mutex);
1504			emul64_nowrite_count--;
1505			mutex_exit(&emul64_stats_mutex);
1506		}
1507		emul64_nowrite_free(cur);
1508		break;
1509	case O_OVERLAP:
1510	case O_SUBSET:
1511		cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx "
1512		    "overlaps 0x%llx,0x%" PRIx64 "\n",
1513		    sb, blkcnt, cur->emul64_blocked.emul64_sb,
1514		    cur->emul64_blocked.emul64_blkcnt);
1515		rv = EINVAL;
1516		break;
1517	}
1518	return (rv);
1519}
1520
1521static emul64_nowrite_t *
1522emul64_find_nowrite(emul64_tgt_t *tgt, diskaddr_t sb, size_t blkcnt,
1523    emul64_rng_overlap_t *overlap, emul64_nowrite_t ***prevp)
1524{
1525	emul64_nowrite_t	*cur;
1526	emul64_nowrite_t	**prev;
1527
1528	/* Find spot in list */
1529	*overlap = O_NONE;
1530	prev = &tgt->emul64_tgt_nowrite;
1531	cur = tgt->emul64_tgt_nowrite;
1532	while (cur != NULL) {
1533		*overlap = emul64_overlap(&cur->emul64_blocked, sb, blkcnt);
1534		if (*overlap != O_NONE)
1535			break;
1536		prev = &cur->emul64_nwnext;
1537		cur = cur->emul64_nwnext;
1538	}
1539
1540	*prevp = prev;
1541	return (cur);
1542}
1543
1544static emul64_nowrite_t *
1545emul64_nowrite_alloc(emul64_range_t *range)
1546{
1547	emul64_nowrite_t	*nw;
1548
1549	nw = kmem_zalloc(sizeof (*nw), KM_SLEEP);
1550	bcopy((void *) range,
1551	    (void *) &nw->emul64_blocked,
1552	    sizeof (nw->emul64_blocked));
1553	return (nw);
1554}
1555
1556static void
1557emul64_nowrite_free(emul64_nowrite_t *nw)
1558{
1559	kmem_free((void *) nw, sizeof (*nw));
1560}
1561
1562emul64_rng_overlap_t
1563emul64_overlap(emul64_range_t *rng, diskaddr_t sb, size_t cnt)
1564{
1565
1566	if (rng->emul64_sb >= sb + cnt)
1567		return (O_NONE);
1568	if (rng->emul64_sb + rng->emul64_blkcnt <= sb)
1569		return (O_NONE);
1570	if ((rng->emul64_sb == sb) && (rng->emul64_blkcnt == cnt))
1571		return (O_SAME);
1572	if ((sb >= rng->emul64_sb) &&
1573	    ((sb + cnt) <= (rng->emul64_sb + rng->emul64_blkcnt))) {
1574		return (O_SUBSET);
1575	}
1576	return (O_OVERLAP);
1577}
1578
1579#include <sys/varargs.h>
1580
1581/*
1582 * Error logging, printing, and debug print routines
1583 */
1584
1585/*VARARGS3*/
1586static void
1587emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...)
1588{
1589	char	buf[256];
1590	va_list	ap;
1591
1592	va_start(ap, fmt);
1593	(void) vsnprintf(buf, sizeof (buf), fmt, ap);
1594	va_end(ap);
1595
1596	scsi_log(emul64 ? emul64->emul64_dip : NULL,
1597	    "emul64", level, "%s\n", buf);
1598}
1599
1600
1601#ifdef EMUL64DEBUG
1602
1603static void
1604emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt)
1605{
1606	static char	hex[]	= "0123456789abcdef";
1607	struct emul64	*emul64	= ADDR2EMUL64(ap);
1608	struct emul64_cmd	*sp	= PKT2CMD(pkt);
1609	uint8_t		*cdb	= pkt->pkt_cdbp;
1610	char		buf[256];
1611	char		*p;
1612	int		i;
1613
1614	(void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ",
1615	    ddi_get_instance(emul64->emul64_dip),
1616	    ap->a_target, ap->a_lun);
1617
1618	p = buf + strlen(buf);
1619
1620	*p++ = '[';
1621	for (i = 0; i < sp->cmd_cdblen; i++, cdb++) {
1622		if (i != 0)
1623			*p++ = ' ';
1624		*p++ = hex[(*cdb >> 4) & 0x0f];
1625		*p++ = hex[*cdb & 0x0f];
1626	}
1627	*p++ = ']';
1628	*p++ = '\n';
1629	*p = '\0';
1630
1631	cmn_err(CE_CONT, buf);
1632}
1633#endif	/* EMUL64DEBUG */
1634