1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2008 QLogic Corporation */
23 
24 /*
25  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2008 QLogic Corporation; ql_xioctl.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2008 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
61 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
62     boolean_t (*)(EXT_IOCTL *));
63 static boolean_t ql_validate_signature(EXT_IOCTL *);
64 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
65 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
66 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
67 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
90 
91 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
92 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
93 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
94 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
95     uint8_t);
96 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
97 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
98 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
99 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
100 static int ql_load_fcode(ql_adapter_state_t *, uint8_t *, uint32_t);
101 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t, int);
102 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
103     uint8_t);
104 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
105 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
106 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
107 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
108 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
109 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
110 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
113 static uint32_t ql_setup_led(ql_adapter_state_t *);
114 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
115 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
116 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
117 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
118 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
119 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
120 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
121 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
122 static void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
123 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
124 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
125 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
126 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
127 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
128 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
129 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
130 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
131 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
132 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
134 static void ql_restart_hba(ql_adapter_state_t *);
135 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
138 
139 /* ******************************************************************** */
140 /*			External IOCTL support.				*/
141 /* ******************************************************************** */
142 
143 /*
144  * ql_alloc_xioctl_resource
145  *	Allocates resources needed by module code.
146  *
147  * Input:
148  *	ha:		adapter state pointer.
149  *
150  * Returns:
151  *	SYS_ERRNO
152  *
153  * Context:
154  *	Kernel context.
155  */
156 int
157 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
158 {
159 	ql_xioctl_t	*xp;
160 
161 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
162 
163 	if (ha->xioctl != NULL) {
164 		QL_PRINT_9(CE_CONT, "(%d): already allocated exiting\n",
165 		    ha->instance);
166 		return (0);
167 	}
168 
169 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
170 	if (xp == NULL) {
171 		EL(ha, "failed, kmem_zalloc\n");
172 		return (ENOMEM);
173 	}
174 	ha->xioctl = xp;
175 
176 	/* Allocate AEN tracking buffer */
177 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
178 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
179 	if (xp->aen_tracking_queue == NULL) {
180 		EL(ha, "failed, kmem_zalloc-2\n");
181 		ql_free_xioctl_resource(ha);
182 		return (ENOMEM);
183 	}
184 
185 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
186 
187 	return (0);
188 }
189 
190 /*
191  * ql_free_xioctl_resource
192  *	Frees resources used by module code.
193  *
194  * Input:
195  *	ha:		adapter state pointer.
196  *
197  * Context:
198  *	Kernel context.
199  */
200 void
201 ql_free_xioctl_resource(ql_adapter_state_t *ha)
202 {
203 	ql_xioctl_t	*xp = ha->xioctl;
204 
205 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
206 
207 	if (xp == NULL) {
208 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
209 		return;
210 	}
211 
212 	if (xp->aen_tracking_queue != NULL) {
213 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
214 		    sizeof (EXT_ASYNC_EVENT));
215 		xp->aen_tracking_queue = NULL;
216 	}
217 
218 	kmem_free(xp, sizeof (ql_xioctl_t));
219 	ha->xioctl = NULL;
220 
221 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
222 }
223 
224 /*
225  * ql_xioctl
226  *	External IOCTL processing.
227  *
228  * Input:
229  *	ha:	adapter state pointer.
230  *	cmd:	function to perform
231  *	arg:	data type varies with request
232  *	mode:	flags
233  *	cred_p:	credentials pointer
234  *	rval_p:	pointer to result value
235  *
236  * Returns:
237  *	0:		success
238  *	ENXIO:		No such device or address
239  *	ENOPROTOOPT:	Protocol not available
240  *
241  * Context:
242  *	Kernel context.
243  */
244 /* ARGSUSED */
245 int
246 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
247     cred_t *cred_p, int *rval_p)
248 {
249 	int	rval;
250 
251 	QL_PRINT_9(CE_CONT, "(%d): entered, cmd=%d\n", ha->instance, cmd);
252 
253 	if (ha->xioctl == NULL) {
254 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
255 		return (ENXIO);
256 	}
257 
258 	switch (cmd) {
259 	case EXT_CC_QUERY:
260 	case EXT_CC_SEND_FCCT_PASSTHRU:
261 	case EXT_CC_REG_AEN:
262 	case EXT_CC_GET_AEN:
263 	case EXT_CC_SEND_SCSI_PASSTHRU:
264 	case EXT_CC_WWPN_TO_SCSIADDR:
265 	case EXT_CC_SEND_ELS_RNID:
266 	case EXT_CC_SET_DATA:
267 	case EXT_CC_GET_DATA:
268 	case EXT_CC_HOST_IDX:
269 	case EXT_CC_READ_NVRAM:
270 	case EXT_CC_UPDATE_NVRAM:
271 	case EXT_CC_READ_OPTION_ROM:
272 	case EXT_CC_READ_OPTION_ROM_EX:
273 	case EXT_CC_UPDATE_OPTION_ROM:
274 	case EXT_CC_UPDATE_OPTION_ROM_EX:
275 	case EXT_CC_GET_VPD:
276 	case EXT_CC_SET_VPD:
277 	case EXT_CC_LOOPBACK:
278 	case EXT_CC_GET_FCACHE:
279 	case EXT_CC_GET_FCACHE_EX:
280 	case EXT_CC_HOST_DRVNAME:
281 	case EXT_CC_GET_SFP_DATA:
282 	case EXT_CC_PORT_PARAM:
283 	case EXT_CC_GET_PCI_DATA:
284 	case EXT_CC_GET_FWEXTTRACE:
285 	case EXT_CC_GET_FWFCETRACE:
286 	case EXT_CC_GET_VP_CNT_ID:
287 	case EXT_CC_VPORT_CMD:
288 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
289 		break;
290 	default:
291 		/* function not supported. */
292 		EL(ha, "function=%d not supported\n", cmd);
293 		rval = ENOPROTOOPT;
294 	}
295 
296 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
297 
298 	return (rval);
299 }
300 
301 /*
302  * ql_sdm_ioctl
303  *	Provides ioctl functions for SAN/Device Management functions
304  *	AKA External Ioctl functions.
305  *
306  * Input:
307  *	ha:		adapter state pointer.
308  *	ioctl_code:	ioctl function to perform
309  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
310  *	mode:		flags
311  *
312  * Returns:
313  *	0:	success
314  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
315  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
316  *		copyout of EXT_IOCTL status info failed.
317  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
318  *	EBUSY:	Device busy
319  *
320  * Context:
321  *	Kernel context.
322  */
323 static int
324 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
325 {
326 	EXT_IOCTL		*cmd;
327 	int			rval;
328 	ql_adapter_state_t	*vha;
329 
330 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
331 
332 	/* Copy argument structure (EXT_IOCTL) from application land. */
333 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
334 	    ql_validate_signature)) != 0) {
335 		/*
336 		 * a non-zero value at this time means a problem getting
337 		 * the requested information from application land, just
338 		 * return the error code and hope for the best.
339 		 */
340 		EL(ha, "failed, sdm_setup\n");
341 		return (rval);
342 	}
343 
344 	/*
345 	 * Map the physical ha ptr (which the ioctl is called with)
346 	 * to the virtual ha that the caller is addressing.
347 	 */
348 	if (ha->flags & VP_ENABLED) {
349 		/*
350 		 * Special case: HbaSelect == 0 is physical ha
351 		 */
352 		if (cmd->HbaSelect != 0) {
353 			vha = ha->vp_next;
354 			while (vha != NULL) {
355 				if (vha->vp_index == cmd->HbaSelect) {
356 					ha = vha;
357 					break;
358 				}
359 				vha = vha->vp_next;
360 			}
361 
362 			/*
363 			 * If we can't find the specified vp index then
364 			 * we probably have an error (vp indexes shifting
365 			 * under our feet?).
366 			 */
367 			if (vha == NULL) {
368 				EL(ha, "Invalid HbaSelect vp index: %xh\n",
369 				    cmd->HbaSelect);
370 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
371 				cmd->ResponseLen = 0;
372 				return (EFAULT);
373 			}
374 		}
375 	}
376 
377 	/*
378 	 * If driver is suspended or stalled, rtn BUSY so caller
379 	 * can try again at some later time
380 	 */
381 	if (ha->flags & ADAPTER_SUSPENDED ||
382 	    ha->task_daemon_flags & DRIVER_STALL) {
383 		EL(ha, "driver %s\n",
384 		    ha->flags & ADAPTER_SUSPENDED ? "suspended" : "stalled");
385 		cmd->Status = EXT_STATUS_BUSY;
386 		cmd->ResponseLen = 0;
387 		rval = EBUSY;
388 
389 		/* Return results to caller */
390 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
391 			EL(ha, "failed, sdm_return\n");
392 			rval = EFAULT;
393 		}
394 		return (rval);
395 	}
396 
397 	switch (ioctl_code) {
398 	case EXT_CC_QUERY_OS:
399 		ql_query(ha, cmd, mode);
400 		break;
401 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
402 		ql_fcct(ha, cmd, mode);
403 		break;
404 	case EXT_CC_REG_AEN_OS:
405 		ql_aen_reg(ha, cmd, mode);
406 		break;
407 	case EXT_CC_GET_AEN_OS:
408 		ql_aen_get(ha, cmd, mode);
409 		break;
410 	case EXT_CC_GET_DATA_OS:
411 		ql_get_host_data(ha, cmd, mode);
412 		break;
413 	case EXT_CC_SET_DATA_OS:
414 		ql_set_host_data(ha, cmd, mode);
415 		break;
416 	case EXT_CC_SEND_ELS_RNID_OS:
417 		ql_send_els_rnid(ha, cmd, mode);
418 		break;
419 	case EXT_CC_SCSI_PASSTHRU_OS:
420 		ql_scsi_passthru(ha, cmd, mode);
421 		break;
422 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
423 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
424 		break;
425 	case EXT_CC_HOST_IDX_OS:
426 		ql_host_idx(ha, cmd, mode);
427 		break;
428 	case EXT_CC_HOST_DRVNAME_OS:
429 		ql_host_drvname(ha, cmd, mode);
430 		break;
431 	case EXT_CC_READ_NVRAM_OS:
432 		ql_read_nvram(ha, cmd, mode);
433 		break;
434 	case EXT_CC_UPDATE_NVRAM_OS:
435 		ql_write_nvram(ha, cmd, mode);
436 		break;
437 	case EXT_CC_READ_OPTION_ROM_OS:
438 	case EXT_CC_READ_OPTION_ROM_EX_OS:
439 		ql_read_flash(ha, cmd, mode);
440 		break;
441 	case EXT_CC_UPDATE_OPTION_ROM_OS:
442 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
443 		ql_write_flash(ha, cmd, mode);
444 		break;
445 	case EXT_CC_LOOPBACK_OS:
446 		ql_diagnostic_loopback(ha, cmd, mode);
447 		break;
448 	case EXT_CC_GET_VPD_OS:
449 		ql_read_vpd(ha, cmd, mode);
450 		break;
451 	case EXT_CC_SET_VPD_OS:
452 		ql_write_vpd(ha, cmd, mode);
453 		break;
454 	case EXT_CC_GET_FCACHE_OS:
455 		ql_get_fcache(ha, cmd, mode);
456 		break;
457 	case EXT_CC_GET_FCACHE_EX_OS:
458 		ql_get_fcache_ex(ha, cmd, mode);
459 		break;
460 	case EXT_CC_GET_SFP_DATA_OS:
461 		ql_get_sfp(ha, cmd, mode);
462 		break;
463 	case EXT_CC_PORT_PARAM_OS:
464 		ql_port_param(ha, cmd, mode);
465 		break;
466 	case EXT_CC_GET_PCI_DATA_OS:
467 		ql_get_pci_data(ha, cmd, mode);
468 		break;
469 	case EXT_CC_GET_FWEXTTRACE_OS:
470 		ql_get_fwexttrace(ha, cmd, mode);
471 		break;
472 	case EXT_CC_GET_FWFCETRACE_OS:
473 		ql_get_fwfcetrace(ha, cmd, mode);
474 		break;
475 	case EXT_CC_MENLO_RESET:
476 		ql_menlo_reset(ha, cmd, mode);
477 		break;
478 	case EXT_CC_MENLO_GET_FW_VERSION:
479 		ql_menlo_get_fw_version(ha, cmd, mode);
480 		break;
481 	case EXT_CC_MENLO_UPDATE_FW:
482 		ql_menlo_update_fw(ha, cmd, mode);
483 		break;
484 	case EXT_CC_MENLO_MANAGE_INFO:
485 		ql_menlo_manage_info(ha, cmd, mode);
486 		break;
487 	case EXT_CC_GET_VP_CNT_ID_OS:
488 		ql_get_vp_cnt_id(ha, cmd, mode);
489 		break;
490 	case EXT_CC_VPORT_CMD_OS:
491 		ql_vp_ioctl(ha, cmd, mode);
492 		break;
493 	default:
494 		/* function not supported. */
495 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
496 
497 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
498 		cmd->ResponseLen = 0;
499 		break;
500 	}
501 
502 	/* Return results to caller */
503 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
504 		EL(ha, "failed, sdm_return\n");
505 		return (EFAULT);
506 	}
507 
508 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
509 
510 	return (0);
511 }
512 
513 /*
514  * ql_sdm_setup
515  *	Make a local copy of the EXT_IOCTL struct and validate it.
516  *
517  * Input:
518  *	ha:		adapter state pointer.
519  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
520  *	arg:		Address of application EXT_IOCTL cmd data
521  *	mode:		flags
522  *	val_sig:	Pointer to a function to validate the ioctl signature.
523  *
524  * Returns:
525  *	0:		success
526  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
527  *	EINVAL:		Invalid version, signature.
528  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
529  *
530  * Context:
531  *	Kernel context.
532  */
533 static int
534 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
535     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
536 {
537 	int		rval;
538 	EXT_IOCTL	*cmd;
539 
540 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
541 
542 	/* Allocate local memory for EXT_IOCTL. */
543 	*cmd_struct = NULL;
544 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
545 	if (cmd == NULL) {
546 		EL(ha, "failed, kmem_zalloc\n");
547 		return (ENOMEM);
548 	}
549 	/* Get argument structure. */
550 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
551 	if (rval != 0) {
552 		EL(ha, "failed, ddi_copyin\n");
553 		rval = EFAULT;
554 	} else {
555 		/*
556 		 * Check signature and the version.
557 		 * If either are not valid then neither is the
558 		 * structure so don't attempt to return any error status
559 		 * because we can't trust what caller's arg points to.
560 		 * Just return the errno.
561 		 */
562 		if (val_sig(cmd) == 0) {
563 			EL(ha, "failed, signature\n");
564 			rval = EINVAL;
565 		} else if (cmd->Version > EXT_VERSION) {
566 			EL(ha, "failed, version\n");
567 			rval = EINVAL;
568 		}
569 	}
570 
571 	if (rval == 0) {
572 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
573 		*cmd_struct = cmd;
574 		cmd->Status = EXT_STATUS_OK;
575 		cmd->DetailStatus = 0;
576 	} else {
577 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
578 	}
579 
580 	return (rval);
581 }
582 
583 /*
584  * ql_validate_signature
585  *	Validate the signature string for an external ioctl call.
586  *
587  * Input:
588  *	sg:	Pointer to EXT_IOCTL signature to validate.
589  *
590  * Returns:
591  *	B_TRUE:		Signature is valid.
592  *	B_FALSE:	Signature is NOT valid.
593  *
594  * Context:
595  *	Kernel context.
596  */
597 static boolean_t
598 ql_validate_signature(EXT_IOCTL *cmd_struct)
599 {
600 	/*
601 	 * Check signature.
602 	 *
603 	 * If signature is not valid then neither is the rest of
604 	 * the structure (e.g., can't trust it), so don't attempt
605 	 * to return any error status other than the errno.
606 	 */
607 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
608 		QL_PRINT_2(CE_CONT, "failed,\n");
609 		return (B_FALSE);
610 	}
611 
612 	return (B_TRUE);
613 }
614 
615 /*
616  * ql_sdm_return
617  *	Copies return data/status to application land for
618  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
619  *
620  * Input:
621  *	ha:		adapter state pointer.
622  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
623  *	ioctl_code:	ioctl function to perform
624  *	arg:		EXT_IOCTL cmd data in application land.
625  *	mode:		flags
626  *
627  * Returns:
628  *	0:	success
629  *	EFAULT:	Copy out error.
630  *
631  * Context:
632  *	Kernel context.
633  */
634 /* ARGSUSED */
635 static int
636 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
637 {
638 	int	rval = 0;
639 
640 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
641 
642 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
643 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
644 	    mode);
645 
646 	rval |= ddi_copyout((void *)&cmd->Status,
647 	    (void *)&(((EXT_IOCTL*)arg)->Status),
648 	    sizeof (cmd->Status), mode);
649 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
650 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
651 	    sizeof (cmd->DetailStatus), mode);
652 
653 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
654 
655 	if (rval != 0) {
656 		/* Some copyout operation failed */
657 		EL(ha, "failed, ddi_copyout\n");
658 		return (EFAULT);
659 	}
660 
661 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
662 
663 	return (0);
664 }
665 
666 /*
667  * ql_query
668  *	Performs all EXT_CC_QUERY functions.
669  *
670  * Input:
671  *	ha:	adapter state pointer.
672  *	cmd:	Local EXT_IOCTL cmd struct pointer.
673  *	mode:	flags.
674  *
675  * Returns:
676  *	None, request status indicated in cmd->Status.
677  *
678  * Context:
679  *	Kernel context.
680  */
681 static void
682 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
683 {
684 	QL_PRINT_9(CE_CONT, "(%d): entered, cmd=%d\n", ha->instance,
685 	    cmd->SubCode);
686 
687 	/* case off on command subcode */
688 	switch (cmd->SubCode) {
689 	case EXT_SC_QUERY_HBA_NODE:
690 		ql_qry_hba_node(ha, cmd, mode);
691 		break;
692 	case EXT_SC_QUERY_HBA_PORT:
693 		ql_qry_hba_port(ha, cmd, mode);
694 		break;
695 	case EXT_SC_QUERY_DISC_PORT:
696 		ql_qry_disc_port(ha, cmd, mode);
697 		break;
698 	case EXT_SC_QUERY_DISC_TGT:
699 		ql_qry_disc_tgt(ha, cmd, mode);
700 		break;
701 	case EXT_SC_QUERY_DRIVER:
702 		ql_qry_driver(ha, cmd, mode);
703 		break;
704 	case EXT_SC_QUERY_FW:
705 		ql_qry_fw(ha, cmd, mode);
706 		break;
707 	case EXT_SC_QUERY_CHIP:
708 		ql_qry_chip(ha, cmd, mode);
709 		break;
710 	case EXT_SC_QUERY_DISC_LUN:
711 	default:
712 		/* function not supported. */
713 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
714 		EL(ha, "failed, Unsupported Subcode=%xh\n",
715 		    cmd->SubCode);
716 		break;
717 	}
718 
719 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
720 }
721 
722 /*
723  * ql_qry_hba_node
724  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
725  *
726  * Input:
727  *	ha:	adapter state pointer.
728  *	cmd:	EXT_IOCTL cmd struct pointer.
729  *	mode:	flags.
730  *
731  * Returns:
732  *	None, request status indicated in cmd->Status.
733  *
734  * Context:
735  *	Kernel context.
736  */
737 static void
738 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
739 {
740 	EXT_HBA_NODE	tmp_node = {0};
741 	uint_t		len;
742 	caddr_t		bufp;
743 	ql_mbx_data_t	mr;
744 
745 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
746 
747 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
748 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
749 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
750 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
751 		    "Len=%xh\n", cmd->ResponseLen);
752 		cmd->ResponseLen = 0;
753 		return;
754 	}
755 
756 	/* fill in the values */
757 
758 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
759 	    EXT_DEF_WWN_NAME_SIZE);
760 
761 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
762 
763 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
764 
765 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
766 
767 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
768 
769 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
770 		size_t		verlen;
771 		uint16_t	w;
772 		char		*tmpptr;
773 
774 		verlen = strlen((char *)(tmp_node.DriverVersion));
775 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
776 			EL(ha, "failed, No room for fpga version string\n");
777 		} else {
778 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
779 			    (uint16_t *)
780 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
781 
782 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
783 			if (tmpptr == NULL) {
784 				EL(ha, "Unable to insert fpga version str\n");
785 			} else {
786 				(void) sprintf(tmpptr, "%d.%d",
787 				    ((w & 0xf0) >> 4), (w & 0x0f));
788 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
789 			}
790 		}
791 	}
792 	(void) ql_get_fw_version(ha, &mr);
793 
794 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
795 	    mr.mb[1], mr.mb[2], mr.mb[3]);
796 
797 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
798 		switch (mr.mb[6]) {
799 		case FWATTRIB_EF:
800 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
801 			break;
802 		case FWATTRIB_TP:
803 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
804 			break;
805 		case FWATTRIB_IP:
806 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
807 			break;
808 		case FWATTRIB_IPX:
809 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
810 			break;
811 		case FWATTRIB_FL:
812 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
813 			break;
814 		case FWATTRIB_FPX:
815 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
816 			break;
817 		default:
818 			break;
819 		}
820 	}
821 
822 	/* FCode version. */
823 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
824 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
825 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
826 	    (int *)&len) == DDI_PROP_SUCCESS) {
827 		if (len < EXT_DEF_MAX_STR_SIZE) {
828 			bcopy(bufp, tmp_node.OptRomVersion, len);
829 		} else {
830 			bcopy(bufp, tmp_node.OptRomVersion,
831 			    EXT_DEF_MAX_STR_SIZE - 1);
832 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
833 			    '\0';
834 		}
835 		kmem_free(bufp, len);
836 	} else {
837 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
838 	}
839 	tmp_node.PortCount = 1;
840 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
841 
842 	if (ddi_copyout((void *)&tmp_node,
843 	    (void *)(uintptr_t)(cmd->ResponseAdr),
844 	    sizeof (EXT_HBA_NODE), mode) != 0) {
845 		cmd->Status = EXT_STATUS_COPY_ERR;
846 		cmd->ResponseLen = 0;
847 		EL(ha, "failed, ddi_copyout\n");
848 	} else {
849 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
850 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
851 	}
852 }
853 
854 /*
855  * ql_qry_hba_port
856  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
857  *
858  * Input:
859  *	ha:	adapter state pointer.
860  *	cmd:	EXT_IOCTL cmd struct pointer.
861  *	mode:	flags.
862  *
863  * Returns:
864  *	None, request status indicated in cmd->Status.
865  *
866  * Context:
867  *	Kernel context.
868  */
869 static void
870 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
871 {
872 	ql_link_t	*link;
873 	ql_tgt_t	*tq;
874 	ql_mbx_data_t	mr;
875 	EXT_HBA_PORT	tmp_port = {0};
876 	int		rval;
877 	uint16_t	port_cnt, tgt_cnt, index;
878 
879 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
880 
881 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
882 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
883 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
884 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
885 		    cmd->ResponseLen);
886 		cmd->ResponseLen = 0;
887 		return;
888 	}
889 
890 	/* fill in the values */
891 
892 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
893 	    EXT_DEF_WWN_NAME_SIZE);
894 	tmp_port.Id[0] = 0;
895 	tmp_port.Id[1] = ha->d_id.b.domain;
896 	tmp_port.Id[2] = ha->d_id.b.area;
897 	tmp_port.Id[3] = ha->d_id.b.al_pa;
898 
899 	/* For now we are initiator only driver */
900 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
901 
902 	if (ha->task_daemon_flags & LOOP_DOWN) {
903 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
904 	} else if (DRIVER_SUSPENDED(ha)) {
905 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
906 	} else {
907 		tmp_port.State = EXT_DEF_HBA_OK;
908 	}
909 
910 	if (ha->flags & POINT_TO_POINT) {
911 		tmp_port.Mode = EXT_DEF_P2P_MODE;
912 	} else {
913 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
914 	}
915 	/*
916 	 * fill in the portspeed values.
917 	 *
918 	 * default to not yet negotiated state
919 	 */
920 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
921 
922 	if (tmp_port.State == EXT_DEF_HBA_OK) {
923 		if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
924 			mr.mb[1] = 0;
925 			mr.mb[2] = 0;
926 			rval = ql_data_rate(ha, &mr);
927 			if (rval != QL_SUCCESS) {
928 				EL(ha, "failed, data_rate=%xh\n", rval);
929 			} else {
930 				switch (mr.mb[1]) {
931 				case 0:
932 					tmp_port.PortSpeed =
933 					    EXT_DEF_PORTSPEED_1GBIT;
934 					break;
935 				case 1:
936 					tmp_port.PortSpeed =
937 					    EXT_DEF_PORTSPEED_2GBIT;
938 					break;
939 				case 3:
940 					tmp_port.PortSpeed =
941 					    EXT_DEF_PORTSPEED_4GBIT;
942 					break;
943 				case 4:
944 					tmp_port.PortSpeed =
945 					    EXT_DEF_PORTSPEED_8GBIT;
946 					break;
947 				default:
948 					tmp_port.PortSpeed =
949 					    EXT_DEF_PORTSPEED_UNKNOWN;
950 					EL(ha, "failed, data rate=%xh\n",
951 					    mr.mb[1]);
952 					break;
953 				}
954 			}
955 		} else {
956 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
957 		}
958 	}
959 
960 	/* Report all supported port speeds */
961 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
962 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
963 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
964 		    EXT_DEF_PORTSPEED_1GBIT);
965 		/*
966 		 * Correct supported speeds based on type of
967 		 * sfp that is present
968 		 */
969 		switch (ha->sfp_stat) {
970 		case 1:
971 			/* no sfp detected */
972 			break;
973 		case 2:
974 		case 4:
975 			/* 4GB sfp */
976 			tmp_port.PortSupportedSpeed &=
977 			    ~EXT_DEF_PORTSPEED_8GBIT;
978 			break;
979 		case 3:
980 		case 5:
981 			/* 8GB sfp */
982 			tmp_port.PortSupportedSpeed &=
983 			    ~EXT_DEF_PORTSPEED_1GBIT;
984 			break;
985 		default:
986 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
987 			break;
988 
989 		}
990 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
991 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
992 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
993 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
994 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
995 		    EXT_DEF_PORTSPEED_1GBIT);
996 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
997 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
998 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
999 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1000 	} else {
1001 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1002 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1003 	}
1004 	tmp_port.sfp_status = LSB(ha->sfp_stat);
1005 	port_cnt = 0;
1006 	tgt_cnt = 0;
1007 
1008 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1009 		for (link = ha->dev[index].first; link != NULL;
1010 		    link = link->next) {
1011 			tq = link->base_address;
1012 
1013 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1014 				continue;
1015 			}
1016 
1017 			port_cnt++;
1018 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1019 				tgt_cnt++;
1020 			}
1021 		}
1022 	}
1023 
1024 	tmp_port.DiscPortCount = port_cnt;
1025 	tmp_port.DiscTargetCount = tgt_cnt;
1026 
1027 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1028 
1029 	rval = ddi_copyout((void *)&tmp_port,
1030 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1031 	    sizeof (EXT_HBA_PORT), mode);
1032 	if (rval != 0) {
1033 		cmd->Status = EXT_STATUS_COPY_ERR;
1034 		cmd->ResponseLen = 0;
1035 		EL(ha, "failed, ddi_copyout\n");
1036 	} else {
1037 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1038 		QL_PRINT_9(CE_CONT, "(%d): exiting, ports=%d, targets=%d\n",
1039 		    ha->instance, port_cnt, tgt_cnt);
1040 	}
1041 }
1042 
1043 /*
1044  * ql_qry_disc_port
1045  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1046  *
1047  * Input:
1048  *	ha:	adapter state pointer.
1049  *	cmd:	EXT_IOCTL cmd struct pointer.
1050  *	mode:	flags.
1051  *
1052  *	cmd->Instance = Port instance in fcport chain.
1053  *
1054  * Returns:
1055  *	None, request status indicated in cmd->Status.
1056  *
1057  * Context:
1058  *	Kernel context.
1059  */
1060 static void
1061 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1062 {
1063 	EXT_DISC_PORT	tmp_port = {0};
1064 	ql_link_t	*link;
1065 	ql_tgt_t	*tq;
1066 	uint16_t	index;
1067 	uint16_t	inst = 0;
1068 
1069 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1070 
1071 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1072 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1073 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1074 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1075 		    cmd->ResponseLen);
1076 		cmd->ResponseLen = 0;
1077 		return;
1078 	}
1079 
1080 	for (link = NULL, index = 0;
1081 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1082 		for (link = ha->dev[index].first; link != NULL;
1083 		    link = link->next) {
1084 			tq = link->base_address;
1085 
1086 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1087 				continue;
1088 			}
1089 			if (inst != cmd->Instance) {
1090 				inst++;
1091 				continue;
1092 			}
1093 
1094 			/* fill in the values */
1095 			bcopy(tq->node_name, tmp_port.WWNN,
1096 			    EXT_DEF_WWN_NAME_SIZE);
1097 			bcopy(tq->port_name, tmp_port.WWPN,
1098 			    EXT_DEF_WWN_NAME_SIZE);
1099 
1100 			break;
1101 		}
1102 	}
1103 
1104 	if (link == NULL) {
1105 		/* no matching device */
1106 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1107 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1108 		cmd->ResponseLen = 0;
1109 		return;
1110 	}
1111 
1112 	tmp_port.Id[0] = 0;
1113 	tmp_port.Id[1] = tq->d_id.b.domain;
1114 	tmp_port.Id[2] = tq->d_id.b.area;
1115 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1116 
1117 	tmp_port.Type = 0;
1118 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1119 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1120 		    EXT_DEF_INITIATOR_DEV);
1121 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1122 		(void) ql_inq_scan(ha, tq, 1);
1123 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1124 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1125 	}
1126 
1127 	if (tq->flags & TQF_FABRIC_DEVICE) {
1128 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1129 	} else {
1130 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1131 	}
1132 
1133 	tmp_port.Status = 0;
1134 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1135 
1136 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1137 
1138 	if (ddi_copyout((void *)&tmp_port,
1139 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1140 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1141 		cmd->Status = EXT_STATUS_COPY_ERR;
1142 		cmd->ResponseLen = 0;
1143 		EL(ha, "failed, ddi_copyout\n");
1144 	} else {
1145 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1146 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1147 	}
1148 }
1149 
1150 /*
1151  * ql_qry_disc_tgt
1152  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1153  *
1154  * Input:
1155  *	ha:		adapter state pointer.
1156  *	cmd:		EXT_IOCTL cmd struct pointer.
1157  *	mode:		flags.
1158  *
1159  *	cmd->Instance = Port instance in fcport chain.
1160  *
1161  * Returns:
1162  *	None, request status indicated in cmd->Status.
1163  *
1164  * Context:
1165  *	Kernel context.
1166  */
1167 static void
1168 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1169 {
1170 	EXT_DISC_TARGET	tmp_tgt = {0};
1171 	ql_link_t	*link;
1172 	ql_tgt_t	*tq;
1173 	uint16_t	index;
1174 	uint16_t	inst = 0;
1175 
1176 	QL_PRINT_9(CE_CONT, "(%d): entered, target=%d\n", ha->instance,
1177 	    cmd->Instance);
1178 
1179 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1180 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1181 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1182 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1183 		    cmd->ResponseLen);
1184 		cmd->ResponseLen = 0;
1185 		return;
1186 	}
1187 
1188 	/* Scan port list for requested target and fill in the values */
1189 	for (link = NULL, index = 0;
1190 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1191 		for (link = ha->dev[index].first; link != NULL;
1192 		    link = link->next) {
1193 			tq = link->base_address;
1194 
1195 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1196 			    tq->flags & TQF_INITIATOR_DEVICE) {
1197 				continue;
1198 			}
1199 			if (inst != cmd->Instance) {
1200 				inst++;
1201 				continue;
1202 			}
1203 
1204 			/* fill in the values */
1205 			bcopy(tq->node_name, tmp_tgt.WWNN,
1206 			    EXT_DEF_WWN_NAME_SIZE);
1207 			bcopy(tq->port_name, tmp_tgt.WWPN,
1208 			    EXT_DEF_WWN_NAME_SIZE);
1209 
1210 			break;
1211 		}
1212 	}
1213 
1214 	if (link == NULL) {
1215 		/* no matching device */
1216 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1217 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1218 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1219 		cmd->ResponseLen = 0;
1220 		return;
1221 	}
1222 	tmp_tgt.Id[0] = 0;
1223 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1224 	tmp_tgt.Id[2] = tq->d_id.b.area;
1225 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1226 
1227 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1228 
1229 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1230 		(void) ql_inq_scan(ha, tq, 1);
1231 	}
1232 
1233 	tmp_tgt.Type = 0;
1234 	if (tq->flags & TQF_TAPE_DEVICE) {
1235 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1236 	}
1237 
1238 	if (tq->flags & TQF_FABRIC_DEVICE) {
1239 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1240 	} else {
1241 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1242 	}
1243 
1244 	tmp_tgt.Status = 0;
1245 
1246 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1247 
1248 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1249 
1250 	if (ddi_copyout((void *)&tmp_tgt,
1251 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1252 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1253 		cmd->Status = EXT_STATUS_COPY_ERR;
1254 		cmd->ResponseLen = 0;
1255 		EL(ha, "failed, ddi_copyout\n");
1256 	} else {
1257 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1258 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1259 	}
1260 }
1261 
1262 /*
1263  * ql_qry_fw
1264  *	Performs EXT_SC_QUERY_FW subfunction.
1265  *
1266  * Input:
1267  *	ha:	adapter state pointer.
1268  *	cmd:	EXT_IOCTL cmd struct pointer.
1269  *	mode:	flags.
1270  *
1271  * Returns:
1272  *	None, request status indicated in cmd->Status.
1273  *
1274  * Context:
1275  *	Kernel context.
1276  */
1277 static void
1278 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1279 {
1280 	ql_mbx_data_t	mr;
1281 	EXT_FW		fw_info = {0};
1282 
1283 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1284 
1285 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1286 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1287 		cmd->DetailStatus = sizeof (EXT_FW);
1288 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1289 		    cmd->ResponseLen);
1290 		cmd->ResponseLen = 0;
1291 		return;
1292 	}
1293 
1294 	(void) ql_get_fw_version(ha, &mr);
1295 
1296 	(void) sprintf((char *)(fw_info.Version), "%d.%d.%d", mr.mb[1],
1297 	    mr.mb[2], mr.mb[2]);
1298 
1299 	fw_info.Attrib = mr.mb[6];
1300 
1301 	if (ddi_copyout((void *)&fw_info, (void *)(uintptr_t)(cmd->ResponseAdr),
1302 	    sizeof (EXT_FW), mode) != 0) {
1303 		cmd->Status = EXT_STATUS_COPY_ERR;
1304 		cmd->ResponseLen = 0;
1305 		EL(ha, "failed, ddi_copyout\n");
1306 		return;
1307 	} else {
1308 		cmd->ResponseLen = sizeof (EXT_FW);
1309 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1310 	}
1311 }
1312 
1313 /*
1314  * ql_qry_chip
1315  *	Performs EXT_SC_QUERY_CHIP subfunction.
1316  *
1317  * Input:
1318  *	ha:	adapter state pointer.
1319  *	cmd:	EXT_IOCTL cmd struct pointer.
1320  *	mode:	flags.
1321  *
1322  * Returns:
1323  *	None, request status indicated in cmd->Status.
1324  *
1325  * Context:
1326  *	Kernel context.
1327  */
1328 static void
1329 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1330 {
1331 	EXT_CHIP	chip = {0};
1332 
1333 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1334 
1335 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1336 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1337 		cmd->DetailStatus = sizeof (EXT_CHIP);
1338 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1339 		    cmd->ResponseLen);
1340 		cmd->ResponseLen = 0;
1341 		return;
1342 	}
1343 
1344 	chip.VendorId = ha->ven_id;
1345 	chip.DeviceId = ha->device_id;
1346 	chip.SubVendorId = ha->subven_id;
1347 	chip.SubSystemId = ha->subsys_id;
1348 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1349 	chip.IoAddrLen = 0x100;
1350 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1351 	chip.MemAddrLen = 0x100;
1352 	chip.ChipRevID = ha->rev_id;
1353 
1354 	if (ddi_copyout((void *)&chip, (void *)(uintptr_t)(cmd->ResponseAdr),
1355 	    sizeof (EXT_CHIP), mode) != 0) {
1356 		cmd->Status = EXT_STATUS_COPY_ERR;
1357 		cmd->ResponseLen = 0;
1358 		EL(ha, "failed, ddi_copyout\n");
1359 	} else {
1360 		cmd->ResponseLen = sizeof (EXT_CHIP);
1361 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1362 	}
1363 }
1364 
1365 /*
1366  * ql_qry_driver
1367  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1368  *
1369  * Input:
1370  *	ha:	adapter state pointer.
1371  *	cmd:	EXT_IOCTL cmd struct pointer.
1372  *	mode:	flags.
1373  *
1374  * Returns:
1375  *	None, request status indicated in cmd->Status.
1376  *
1377  * Context:
1378  *	Kernel context.
1379  */
1380 static void
1381 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1382 {
1383 	EXT_DRIVER	qd = {0};
1384 
1385 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1386 
1387 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1388 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1389 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1390 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1391 		    cmd->ResponseLen);
1392 		cmd->ResponseLen = 0;
1393 		return;
1394 	}
1395 
1396 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1397 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1398 	qd.TargetsPerBus = (uint16_t)
1399 	    (CFG_IST(ha, (CFG_CTRL_2425|CFG_EXT_FW_INTERFACE)) ?
1400 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1401 	qd.LunsPerTarget = 2030;
1402 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1403 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1404 
1405 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1406 	    sizeof (EXT_DRIVER), mode) != 0) {
1407 		cmd->Status = EXT_STATUS_COPY_ERR;
1408 		cmd->ResponseLen = 0;
1409 		EL(ha, "failed, ddi_copyout\n");
1410 	} else {
1411 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1412 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1413 	}
1414 }
1415 
1416 /*
1417  * ql_fcct
1418  *	IOCTL management server FC-CT passthrough.
1419  *
1420  * Input:
1421  *	ha:	adapter state pointer.
1422  *	cmd:	User space CT arguments pointer.
1423  *	mode:	flags.
1424  *
1425  * Returns:
1426  *	None, request status indicated in cmd->Status.
1427  *
1428  * Context:
1429  *	Kernel context.
1430  */
1431 static void
1432 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1433 {
1434 	ql_mbx_iocb_t		*pkt;
1435 	ql_mbx_data_t		mr;
1436 	dma_mem_t		*dma_mem;
1437 	caddr_t			pld;
1438 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1439 	int			rval;
1440 	ql_ct_iu_preamble_t	*ct;
1441 	ql_xioctl_t		*xp = ha->xioctl;
1442 	ql_tgt_t		tq;
1443 	uint16_t		comp_status, loop_id;
1444 
1445 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1446 
1447 	/* Get CT argument structure. */
1448 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1449 		EL(ha, "failed, No switch\n");
1450 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1451 		cmd->ResponseLen = 0;
1452 		return;
1453 	}
1454 
1455 	if (DRIVER_SUSPENDED(ha)) {
1456 		EL(ha, "failed, LOOP_NOT_READY\n");
1457 		cmd->Status = EXT_STATUS_BUSY;
1458 		cmd->ResponseLen = 0;
1459 		return;
1460 	}
1461 
1462 	/* Login management server device. */
1463 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1464 		tq.d_id.b.al_pa = 0xfa;
1465 		tq.d_id.b.area = 0xff;
1466 		tq.d_id.b.domain = 0xff;
1467 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ?
1468 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1469 		    MANAGEMENT_SERVER_LOOP_ID);
1470 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1471 		if (rval != QL_SUCCESS) {
1472 			EL(ha, "failed, server login\n");
1473 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1474 			cmd->ResponseLen = 0;
1475 			return;
1476 		} else {
1477 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1478 		}
1479 	}
1480 
1481 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1482 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1483 
1484 	/* Allocate a DMA Memory Descriptor */
1485 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1486 	if (dma_mem == NULL) {
1487 		EL(ha, "failed, kmem_zalloc\n");
1488 		cmd->Status = EXT_STATUS_NO_MEMORY;
1489 		cmd->ResponseLen = 0;
1490 		return;
1491 	}
1492 	/* Determine maximum buffer size. */
1493 	if (cmd->RequestLen < cmd->ResponseLen) {
1494 		pld_byte_cnt = cmd->ResponseLen;
1495 	} else {
1496 		pld_byte_cnt = cmd->RequestLen;
1497 	}
1498 
1499 	/* Allocate command block. */
1500 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1501 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1502 	if (pkt == NULL) {
1503 		EL(ha, "failed, kmem_zalloc\n");
1504 		cmd->Status = EXT_STATUS_NO_MEMORY;
1505 		cmd->ResponseLen = 0;
1506 		return;
1507 	}
1508 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1509 
1510 	/* Get command payload data. */
1511 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1512 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1513 		EL(ha, "failed, get_buffer_data\n");
1514 		kmem_free(pkt, pkt_size);
1515 		cmd->Status = EXT_STATUS_COPY_ERR;
1516 		cmd->ResponseLen = 0;
1517 		return;
1518 	}
1519 
1520 	/* Get DMA memory for the IOCB */
1521 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1522 	    MEM_RING_ALIGN) != QL_SUCCESS) {
1523 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1524 		    "alloc failed", QL_NAME, ha->instance);
1525 		kmem_free(pkt, pkt_size);
1526 		kmem_free(dma_mem, sizeof (dma_mem_t));
1527 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1528 		cmd->ResponseLen = 0;
1529 		return;
1530 	}
1531 
1532 	/* Copy out going payload data to IOCB DMA buffer. */
1533 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1534 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1535 
1536 	/* Sync IOCB DMA buffer. */
1537 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1538 	    DDI_DMA_SYNC_FORDEV);
1539 
1540 	/*
1541 	 * Setup IOCB
1542 	 */
1543 	ct = (ql_ct_iu_preamble_t *)pld;
1544 	if (CFG_IST(ha, CFG_CTRL_2425)) {
1545 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1546 		pkt->ms24.entry_count = 1;
1547 
1548 		/* Set loop ID */
1549 		pkt->ms24.n_port_hdl = (uint16_t)
1550 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1551 		    LE_16(SNS_24XX_HDL) :
1552 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1553 
1554 		/* Set ISP command timeout. */
1555 		pkt->ms24.timeout = LE_16(120);
1556 
1557 		/* Set cmd/response data segment counts. */
1558 		pkt->ms24.cmd_dseg_count = LE_16(1);
1559 		pkt->ms24.resp_dseg_count = LE_16(1);
1560 
1561 		/* Load ct cmd byte count. */
1562 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1563 
1564 		/* Load ct rsp byte count. */
1565 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1566 
1567 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1568 
1569 		/* Load MS command entry data segments. */
1570 		*long_ptr++ = (uint32_t)
1571 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1572 		*long_ptr++ = (uint32_t)
1573 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1574 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1575 
1576 		/* Load MS response entry data segments. */
1577 		*long_ptr++ = (uint32_t)
1578 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1579 		*long_ptr++ = (uint32_t)
1580 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1581 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1582 
1583 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1584 		    sizeof (ql_mbx_iocb_t));
1585 
1586 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1587 		if (comp_status == CS_DATA_UNDERRUN) {
1588 			if ((BE_16(ct->max_residual_size)) == 0) {
1589 				comp_status = CS_COMPLETE;
1590 			}
1591 		}
1592 
1593 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1594 		    0) {
1595 			EL(ha, "failed, I/O timeout or "
1596 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1597 			    pkt->sts24.entry_status,
1598 			    pkt->sts24.scsi_status_l, rval);
1599 			kmem_free(pkt, pkt_size);
1600 			ql_free_dma_resource(ha, dma_mem);
1601 			kmem_free(dma_mem, sizeof (dma_mem_t));
1602 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1603 			cmd->ResponseLen = 0;
1604 			return;
1605 		}
1606 	} else {
1607 		pkt->ms.entry_type = MS_TYPE;
1608 		pkt->ms.entry_count = 1;
1609 
1610 		/* Set loop ID */
1611 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1612 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1613 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1614 			pkt->ms.loop_id_l = LSB(loop_id);
1615 			pkt->ms.loop_id_h = MSB(loop_id);
1616 		} else {
1617 			pkt->ms.loop_id_h = LSB(loop_id);
1618 		}
1619 
1620 		/* Set ISP command timeout. */
1621 		pkt->ms.timeout = LE_16(120);
1622 
1623 		/* Set data segment counts. */
1624 		pkt->ms.cmd_dseg_count_l = 1;
1625 		pkt->ms.total_dseg_count = LE_16(2);
1626 
1627 		/* Response total byte count. */
1628 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1629 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1630 
1631 		/* Command total byte count. */
1632 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1633 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1634 
1635 		/* Load command/response data segments. */
1636 		pkt->ms.dseg_0_address[0] = (uint32_t)
1637 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1638 		pkt->ms.dseg_0_address[1] = (uint32_t)
1639 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1640 		pkt->ms.dseg_1_address[0] = (uint32_t)
1641 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1642 		pkt->ms.dseg_1_address[1] = (uint32_t)
1643 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1644 
1645 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1646 		    sizeof (ql_mbx_iocb_t));
1647 
1648 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1649 		if (comp_status == CS_DATA_UNDERRUN) {
1650 			if ((BE_16(ct->max_residual_size)) == 0) {
1651 				comp_status = CS_COMPLETE;
1652 			}
1653 		}
1654 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1655 			EL(ha, "failed, I/O timeout or "
1656 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1657 			kmem_free(pkt, pkt_size);
1658 			ql_free_dma_resource(ha, dma_mem);
1659 			kmem_free(dma_mem, sizeof (dma_mem_t));
1660 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1661 			cmd->ResponseLen = 0;
1662 			return;
1663 		}
1664 	}
1665 
1666 	/* Sync in coming DMA buffer. */
1667 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1668 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1669 	/* Copy in coming DMA data. */
1670 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1671 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1672 	    DDI_DEV_AUTOINCR);
1673 
1674 	/* Copy response payload from DMA buffer to application. */
1675 	if (cmd->ResponseLen != 0) {
1676 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1677 		    cmd->ResponseLen);
1678 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1679 
1680 		/* Send response payload. */
1681 		if (ql_send_buffer_data(pld,
1682 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1683 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1684 			EL(ha, "failed, send_buffer_data\n");
1685 			cmd->Status = EXT_STATUS_COPY_ERR;
1686 			cmd->ResponseLen = 0;
1687 		}
1688 	}
1689 
1690 	kmem_free(pkt, pkt_size);
1691 	ql_free_dma_resource(ha, dma_mem);
1692 	kmem_free(dma_mem, sizeof (dma_mem_t));
1693 
1694 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1695 }
1696 
1697 /*
1698  * ql_aen_reg
1699  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1700  *
1701  * Input:
1702  *	ha:	adapter state pointer.
1703  *	cmd:	EXT_IOCTL cmd struct pointer.
1704  *	mode:	flags.
1705  *
1706  * Returns:
1707  *	None, request status indicated in cmd->Status.
1708  *
1709  * Context:
1710  *	Kernel context.
1711  */
1712 static void
1713 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1714 {
1715 	EXT_REG_AEN	reg_struct;
1716 	int		rval = 0;
1717 	ql_xioctl_t	*xp = ha->xioctl;
1718 
1719 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1720 
1721 	rval = ddi_copyin((void*)(uintptr_t)(cmd->RequestAdr), &reg_struct,
1722 	    cmd->RequestLen, mode);
1723 
1724 	if (rval == 0) {
1725 		if (reg_struct.Enable) {
1726 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1727 		} else {
1728 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1729 			/* Empty the queue. */
1730 			INTR_LOCK(ha);
1731 			xp->aen_q_head = 0;
1732 			xp->aen_q_tail = 0;
1733 			INTR_UNLOCK(ha);
1734 		}
1735 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1736 	} else {
1737 		cmd->Status = EXT_STATUS_COPY_ERR;
1738 		EL(ha, "failed, ddi_copyin\n");
1739 	}
1740 }
1741 
1742 /*
1743  * ql_aen_get
1744  *	IOCTL management server Asynchronous Event Record Transfer.
1745  *
1746  * Input:
1747  *	ha:	adapter state pointer.
1748  *	cmd:	EXT_IOCTL cmd struct pointer.
1749  *	mode:	flags.
1750  *
1751  * Returns:
1752  *	None, request status indicated in cmd->Status.
1753  *
1754  * Context:
1755  *	Kernel context.
1756  */
1757 static void
1758 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1759 {
1760 	uint32_t	out_size;
1761 	EXT_ASYNC_EVENT	*tmp_q;
1762 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1763 	uint8_t		i;
1764 	uint8_t		queue_cnt;
1765 	uint8_t		request_cnt;
1766 	ql_xioctl_t	*xp = ha->xioctl;
1767 
1768 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1769 
1770 	/* Compute the number of events that can be returned */
1771 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1772 
1773 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1774 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1775 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1776 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1777 		    "Len=%xh\n", request_cnt);
1778 		cmd->ResponseLen = 0;
1779 		return;
1780 	}
1781 
1782 	/* 1st: Make a local copy of the entire queue content. */
1783 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1784 	queue_cnt = 0;
1785 
1786 	INTR_LOCK(ha);
1787 	i = xp->aen_q_head;
1788 
1789 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1790 		if (tmp_q[i].AsyncEventCode != 0) {
1791 			bcopy(&tmp_q[i], &aen[queue_cnt],
1792 			    sizeof (EXT_ASYNC_EVENT));
1793 			queue_cnt++;
1794 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1795 		}
1796 		if (i == xp->aen_q_tail) {
1797 			/* done. */
1798 			break;
1799 		}
1800 		i++;
1801 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1802 			i = 0;
1803 		}
1804 	}
1805 
1806 	/* Empty the queue. */
1807 	xp->aen_q_head = 0;
1808 	xp->aen_q_tail = 0;
1809 
1810 	INTR_UNLOCK(ha);
1811 
1812 	/* 2nd: Now transfer the queue content to user buffer */
1813 	/* Copy the entire queue to user's buffer. */
1814 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1815 	if (queue_cnt == 0) {
1816 		cmd->ResponseLen = 0;
1817 	} else if (ddi_copyout((void *)&aen[0],
1818 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1819 	    out_size, mode) != 0) {
1820 		cmd->Status = EXT_STATUS_COPY_ERR;
1821 		cmd->ResponseLen = 0;
1822 		EL(ha, "failed, ddi_copyout\n");
1823 	} else {
1824 		cmd->ResponseLen = out_size;
1825 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1826 	}
1827 }
1828 
1829 /*
1830  * ql_enqueue_aen
1831  *
1832  * Input:
1833  *	ha:		adapter state pointer.
1834  *	event_code:	async event code of the event to add to queue.
1835  *	payload:	event payload for the queue.
1836  *	INTR_LOCK must be already obtained.
1837  *
1838  * Context:
1839  *	Interrupt or Kernel context, no mailbox commands allowed.
1840  */
1841 void
1842 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1843 {
1844 	uint8_t			new_entry;	/* index to current entry */
1845 	uint16_t		*mbx;
1846 	EXT_ASYNC_EVENT		*aen_queue;
1847 	ql_xioctl_t		*xp = ha->xioctl;
1848 
1849 	QL_PRINT_9(CE_CONT, "(%d): entered, event_code=%d\n", ha->instance,
1850 	    event_code);
1851 
1852 	if (xp == NULL) {
1853 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1854 		return;
1855 	}
1856 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1857 
1858 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1859 		/* Need to change queue pointers to make room. */
1860 
1861 		/* Increment tail for adding new entry. */
1862 		xp->aen_q_tail++;
1863 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1864 			xp->aen_q_tail = 0;
1865 		}
1866 		if (xp->aen_q_head == xp->aen_q_tail) {
1867 			/*
1868 			 * We're overwriting the oldest entry, so need to
1869 			 * update the head pointer.
1870 			 */
1871 			xp->aen_q_head++;
1872 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1873 				xp->aen_q_head = 0;
1874 			}
1875 		}
1876 	}
1877 
1878 	new_entry = xp->aen_q_tail;
1879 	aen_queue[new_entry].AsyncEventCode = event_code;
1880 
1881 	/* Update payload */
1882 	if (payload != NULL) {
1883 		switch (event_code) {
1884 		case MBA_LIP_OCCURRED:
1885 		case MBA_LOOP_UP:
1886 		case MBA_LOOP_DOWN:
1887 		case MBA_LIP_F8:
1888 		case MBA_LIP_RESET:
1889 		case MBA_PORT_UPDATE:
1890 			break;
1891 		case MBA_RSCN_UPDATE:
1892 			mbx = (uint16_t *)payload;
1893 			/* al_pa */
1894 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1895 			    LSB(mbx[2]);
1896 			/* area */
1897 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1898 			    MSB(mbx[2]);
1899 			/* domain */
1900 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1901 			    LSB(mbx[1]);
1902 			/* save in big endian */
1903 			BIG_ENDIAN_24(&aen_queue[new_entry].
1904 			    Payload.RSCN.RSCNInfo[0]);
1905 
1906 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1907 			    MSB(mbx[1]);
1908 
1909 			break;
1910 		default:
1911 			/* Not supported */
1912 			EL(ha, "failed, event code not supported=%xh\n",
1913 			    event_code);
1914 			aen_queue[new_entry].AsyncEventCode = 0;
1915 			break;
1916 		}
1917 	}
1918 
1919 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1920 }
1921 
1922 /*
1923  * ql_scsi_passthru
1924  *	IOCTL SCSI passthrough.
1925  *
1926  * Input:
1927  *	ha:	adapter state pointer.
1928  *	cmd:	User space SCSI command pointer.
1929  *	mode:	flags.
1930  *
1931  * Returns:
1932  *	None, request status indicated in cmd->Status.
1933  *
1934  * Context:
1935  *	Kernel context.
1936  */
1937 static void
1938 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1939 {
1940 	ql_mbx_iocb_t		*pkt;
1941 	ql_mbx_data_t		mr;
1942 	dma_mem_t		*dma_mem;
1943 	caddr_t			pld;
1944 	uint32_t		pkt_size, pld_size;
1945 	uint16_t		qlnt, retries, cnt, cnt2;
1946 	uint8_t			*name;
1947 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1948 	EXT_SCSI_PASSTHRU	*usp_req;
1949 	int			rval;
1950 	union _passthru {
1951 		EXT_SCSI_PASSTHRU	sp_cmd;
1952 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
1953 	} pt_req;		/* Passthru request */
1954 	uint32_t		status, sense_sz = 0;
1955 	ql_tgt_t		*tq = NULL;
1956 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
1957 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
1958 
1959 	/* SCSI request struct for SCSI passthrough IOs. */
1960 	struct {
1961 		uint16_t	lun;
1962 		uint16_t	sense_length;	/* Sense buffer size */
1963 		size_t		resid;		/* Residual */
1964 		uint8_t		*cdbp;		/* Requestor's CDB */
1965 		uint8_t		*u_sense;	/* Requestor's sense buffer */
1966 		uint8_t		cdb_len;	/* Requestor's CDB length */
1967 		uint8_t		direction;
1968 	} scsi_req;
1969 
1970 	struct {
1971 		uint8_t		*rsp_info;
1972 		uint8_t		*req_sense_data;
1973 		uint32_t	residual_length;
1974 		uint32_t	rsp_info_length;
1975 		uint32_t	req_sense_length;
1976 		uint16_t	comp_status;
1977 		uint8_t		state_flags_l;
1978 		uint8_t		state_flags_h;
1979 		uint8_t		scsi_status_l;
1980 		uint8_t		scsi_status_h;
1981 	} sts;
1982 
1983 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1984 
1985 	/* Verify Sub Code and set cnt to needed request size. */
1986 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
1987 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
1988 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
1989 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
1990 	} else {
1991 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
1992 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
1993 		cmd->ResponseLen = 0;
1994 		return;
1995 	}
1996 
1997 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1998 	if (dma_mem == NULL) {
1999 		EL(ha, "failed, kmem_zalloc\n");
2000 		cmd->Status = EXT_STATUS_NO_MEMORY;
2001 		cmd->ResponseLen = 0;
2002 		return;
2003 	}
2004 	/*  Verify the size of and copy in the passthru request structure. */
2005 	if (cmd->RequestLen != pld_size) {
2006 		/* Return error */
2007 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2008 		    cmd->RequestLen, pld_size);
2009 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2010 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2011 		cmd->ResponseLen = 0;
2012 		return;
2013 	}
2014 
2015 	if (ddi_copyin((const void *)(uintptr_t)(uintptr_t)cmd->RequestAdr,
2016 	    &pt_req, pld_size, mode) != 0) {
2017 		EL(ha, "failed, ddi_copyin\n");
2018 		cmd->Status = EXT_STATUS_COPY_ERR;
2019 		cmd->ResponseLen = 0;
2020 		return;
2021 	}
2022 
2023 	/*
2024 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2025 	 * request data structure.
2026 	 */
2027 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2028 		scsi_req.lun = sp_req->TargetAddr.Lun;
2029 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2030 		scsi_req.cdbp = &sp_req->Cdb[0];
2031 		scsi_req.cdb_len = sp_req->CdbLength;
2032 		scsi_req.direction = sp_req->Direction;
2033 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2034 		scsi_req.u_sense = &usp_req->SenseData[0];
2035 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2036 
2037 		qlnt = QLNT_PORT;
2038 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2039 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2040 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2041 		tq = ql_find_port(ha, name, qlnt);
2042 	} else {
2043 		/*
2044 		 * Must be FC PASSTHRU, verified above.
2045 		 */
2046 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2047 			qlnt = QLNT_PORT;
2048 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2049 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2050 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2051 			    ha->instance, cmd->SubCode, name[0], name[1],
2052 			    name[2], name[3], name[4], name[5], name[6],
2053 			    name[7]);
2054 			tq = ql_find_port(ha, name, qlnt);
2055 		} else if (fc_req->FCScsiAddr.DestType ==
2056 		    EXT_DEF_DESTTYPE_WWNN) {
2057 			qlnt = QLNT_NODE;
2058 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2059 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2060 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2061 			    ha->instance, cmd->SubCode, name[0], name[1],
2062 			    name[2], name[3], name[4], name[5], name[6],
2063 			    name[7]);
2064 			tq = ql_find_port(ha, name, qlnt);
2065 		} else if (fc_req->FCScsiAddr.DestType ==
2066 		    EXT_DEF_DESTTYPE_PORTID) {
2067 			qlnt = QLNT_PID;
2068 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2069 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2070 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2071 			    name[0], name[1], name[2]);
2072 			tq = ql_find_port(ha, name, qlnt);
2073 		} else {
2074 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2075 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2076 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2077 			cmd->ResponseLen = 0;
2078 			return;
2079 		}
2080 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2081 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2082 		scsi_req.cdbp = &sp_req->Cdb[0];
2083 		scsi_req.cdb_len = sp_req->CdbLength;
2084 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2085 		scsi_req.u_sense = &ufc_req->SenseData[0];
2086 		scsi_req.direction = fc_req->Direction;
2087 	}
2088 
2089 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2090 		EL(ha, "failed, fc_port not found\n");
2091 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2092 		cmd->ResponseLen = 0;
2093 		return;
2094 	}
2095 
2096 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2097 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2098 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2099 		cmd->ResponseLen = 0;
2100 		return;
2101 	}
2102 
2103 	/* Allocate command block. */
2104 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2105 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2106 	    cmd->ResponseLen) {
2107 		pld_size = cmd->ResponseLen;
2108 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2109 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2110 		if (pkt == NULL) {
2111 			EL(ha, "failed, kmem_zalloc\n");
2112 			cmd->Status = EXT_STATUS_NO_MEMORY;
2113 			cmd->ResponseLen = 0;
2114 			return;
2115 		}
2116 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2117 
2118 		/* Get DMA memory for the IOCB */
2119 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2120 		    MEM_DATA_ALIGN) != QL_SUCCESS) {
2121 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2122 			    "alloc failed", QL_NAME, ha->instance);
2123 			kmem_free(pkt, pkt_size);
2124 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2125 			cmd->ResponseLen = 0;
2126 			return;
2127 		}
2128 
2129 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2130 			scsi_req.direction = (uint8_t)
2131 			    (CFG_IST(ha, CFG_CTRL_2425) ?
2132 			    CF_RD : CF_DATA_IN | CF_STAG);
2133 		} else {
2134 			scsi_req.direction = (uint8_t)
2135 			    (CFG_IST(ha, CFG_CTRL_2425) ?
2136 			    CF_WR : CF_DATA_OUT | CF_STAG);
2137 			cmd->ResponseLen = 0;
2138 
2139 			/* Get command payload. */
2140 			if (ql_get_buffer_data(
2141 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2142 			    pld, pld_size, mode) != pld_size) {
2143 				EL(ha, "failed, get_buffer_data\n");
2144 				cmd->Status = EXT_STATUS_COPY_ERR;
2145 
2146 				kmem_free(pkt, pkt_size);
2147 				ql_free_dma_resource(ha, dma_mem);
2148 				kmem_free(dma_mem, sizeof (dma_mem_t));
2149 				return;
2150 			}
2151 
2152 			/* Copy out going data to DMA buffer. */
2153 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2154 			    (uint8_t *)dma_mem->bp, pld_size,
2155 			    DDI_DEV_AUTOINCR);
2156 
2157 			/* Sync DMA buffer. */
2158 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2159 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2160 		}
2161 	} else {
2162 		scsi_req.direction = (uint8_t)
2163 		    (CFG_IST(ha, CFG_CTRL_2425) ? 0 : CF_STAG);
2164 		cmd->ResponseLen = 0;
2165 
2166 		pkt_size = sizeof (ql_mbx_iocb_t);
2167 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2168 		if (pkt == NULL) {
2169 			EL(ha, "failed, kmem_zalloc-2\n");
2170 			cmd->Status = EXT_STATUS_NO_MEMORY;
2171 			return;
2172 		}
2173 		pld = NULL;
2174 		pld_size = 0;
2175 	}
2176 
2177 	/* retries = ha->port_down_retry_count; */
2178 	retries = 1;
2179 	cmd->Status = EXT_STATUS_OK;
2180 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2181 
2182 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2183 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2184 
2185 	do {
2186 		if (DRIVER_SUSPENDED(ha)) {
2187 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2188 			break;
2189 		}
2190 
2191 		if (CFG_IST(ha, CFG_CTRL_2425)) {
2192 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2193 			pkt->cmd24.entry_count = 1;
2194 
2195 			/* Set LUN number */
2196 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2197 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2198 
2199 			/* Set N_port handle */
2200 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2201 
2202 			/* Set VP Index */
2203 			pkt->cmd24.vp_index = ha->vp_index;
2204 
2205 			/* Set target ID */
2206 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2207 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2208 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2209 
2210 			/* Set ISP command timeout. */
2211 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2212 
2213 			/* Load SCSI CDB */
2214 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2215 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2216 			    DDI_DEV_AUTOINCR);
2217 			for (cnt = 0; cnt < MAX_CMDSZ;
2218 			    cnt = (uint16_t)(cnt + 4)) {
2219 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2220 				    + cnt, 4);
2221 			}
2222 
2223 			/* Set tag queue control flags */
2224 			pkt->cmd24.task = TA_STAG;
2225 
2226 			if (pld_size) {
2227 				/* Set transfer direction. */
2228 				pkt->cmd24.control_flags = scsi_req.direction;
2229 
2230 				/* Set data segment count. */
2231 				pkt->cmd24.dseg_count = LE_16(1);
2232 
2233 				/* Load total byte count. */
2234 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2235 
2236 				/* Load data descriptor. */
2237 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2238 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2239 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2240 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2241 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2242 			}
2243 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2244 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2245 			pkt->cmd3.entry_count = 1;
2246 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2247 				pkt->cmd3.target_l = LSB(tq->loop_id);
2248 				pkt->cmd3.target_h = MSB(tq->loop_id);
2249 			} else {
2250 				pkt->cmd3.target_h = LSB(tq->loop_id);
2251 			}
2252 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2253 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2254 			pkt->cmd3.control_flags_l = scsi_req.direction;
2255 			pkt->cmd3.timeout = LE_16(15);
2256 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2257 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2258 			}
2259 			if (pld_size) {
2260 				pkt->cmd3.dseg_count = LE_16(1);
2261 				pkt->cmd3.byte_count = LE_32(pld_size);
2262 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2263 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2264 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2265 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2266 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2267 			}
2268 		} else {
2269 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2270 			pkt->cmd.entry_count = 1;
2271 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2272 				pkt->cmd.target_l = LSB(tq->loop_id);
2273 				pkt->cmd.target_h = MSB(tq->loop_id);
2274 			} else {
2275 				pkt->cmd.target_h = LSB(tq->loop_id);
2276 			}
2277 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2278 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2279 			pkt->cmd.control_flags_l = scsi_req.direction;
2280 			pkt->cmd.timeout = LE_16(15);
2281 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2282 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2283 			}
2284 			if (pld_size) {
2285 				pkt->cmd.dseg_count = LE_16(1);
2286 				pkt->cmd.byte_count = LE_32(pld_size);
2287 				pkt->cmd.dseg_0_address = (uint32_t)
2288 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2289 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2290 			}
2291 		}
2292 		/* Go issue command and wait for completion. */
2293 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2294 		QL_DUMP_9(pkt, 8, pkt_size);
2295 
2296 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2297 
2298 		if (pld_size) {
2299 			/* Sync in coming DMA buffer. */
2300 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2301 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2302 			/* Copy in coming DMA data. */
2303 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2304 			    (uint8_t *)dma_mem->bp, pld_size,
2305 			    DDI_DEV_AUTOINCR);
2306 		}
2307 
2308 		if (CFG_IST(ha, CFG_CTRL_2425)) {
2309 			pkt->sts24.entry_status = (uint8_t)
2310 			    (pkt->sts24.entry_status & 0x3c);
2311 		} else {
2312 			pkt->sts.entry_status = (uint8_t)
2313 			    (pkt->sts.entry_status & 0x7e);
2314 		}
2315 
2316 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2317 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2318 			    pkt->sts.entry_status, tq->d_id.b24);
2319 			status = QL_FUNCTION_PARAMETER_ERROR;
2320 		}
2321 
2322 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ?
2323 		    LE_16(pkt->sts24.comp_status) :
2324 		    LE_16(pkt->sts.comp_status));
2325 
2326 		/*
2327 		 * We have verified about all the request that can be so far.
2328 		 * Now we need to start verification of our ability to
2329 		 * actually issue the CDB.
2330 		 */
2331 		if (DRIVER_SUSPENDED(ha)) {
2332 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2333 			break;
2334 		} else if (status == QL_SUCCESS &&
2335 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2336 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2337 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2338 			if (tq->flags & TQF_FABRIC_DEVICE) {
2339 				rval = ql_login_fport(ha, tq, tq->loop_id,
2340 				    LFF_NO_PLOGI, &mr);
2341 				if (rval != QL_SUCCESS) {
2342 					EL(ha, "failed, login_fport=%xh, "
2343 					    "d_id=%xh\n", rval, tq->d_id.b24);
2344 				}
2345 			} else {
2346 				rval = ql_login_lport(ha, tq, tq->loop_id,
2347 				    LLF_NONE);
2348 				if (rval != QL_SUCCESS) {
2349 					EL(ha, "failed, login_lport=%xh, "
2350 					    "d_id=%xh\n", rval, tq->d_id.b24);
2351 				}
2352 			}
2353 		} else {
2354 			break;
2355 		}
2356 
2357 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2358 
2359 	} while (retries--);
2360 
2361 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2362 		/* Cannot issue command now, maybe later */
2363 		EL(ha, "failed, suspended\n");
2364 		kmem_free(pkt, pkt_size);
2365 		ql_free_dma_resource(ha, dma_mem);
2366 		kmem_free(dma_mem, sizeof (dma_mem_t));
2367 		cmd->Status = EXT_STATUS_SUSPENDED;
2368 		cmd->ResponseLen = 0;
2369 		return;
2370 	}
2371 
2372 	if (status != QL_SUCCESS) {
2373 		/* Command error */
2374 		EL(ha, "failed, I/O\n");
2375 		kmem_free(pkt, pkt_size);
2376 		ql_free_dma_resource(ha, dma_mem);
2377 		kmem_free(dma_mem, sizeof (dma_mem_t));
2378 		cmd->Status = EXT_STATUS_ERR;
2379 		cmd->DetailStatus = status;
2380 		cmd->ResponseLen = 0;
2381 		return;
2382 	}
2383 
2384 	/* Setup status. */
2385 	if (CFG_IST(ha, CFG_CTRL_2425)) {
2386 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2387 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2388 
2389 		/* Setup residuals. */
2390 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2391 
2392 		/* Setup state flags. */
2393 		sts.state_flags_l = pkt->sts24.state_flags_l;
2394 		sts.state_flags_h = pkt->sts24.state_flags_h;
2395 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2396 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2397 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2398 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2399 		} else {
2400 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2401 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2402 			    SF_GOT_STATUS);
2403 		}
2404 		if (scsi_req.direction & CF_WR) {
2405 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2406 			    SF_DATA_OUT);
2407 		} else if (scsi_req.direction & CF_RD) {
2408 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2409 			    SF_DATA_IN);
2410 		}
2411 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2412 
2413 		/* Setup FCP response info. */
2414 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2415 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2416 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2417 		for (cnt = 0; cnt < sts.rsp_info_length;
2418 		    cnt = (uint16_t)(cnt + 4)) {
2419 			ql_chg_endian(sts.rsp_info + cnt, 4);
2420 		}
2421 
2422 		/* Setup sense data. */
2423 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2424 			sts.req_sense_length =
2425 			    LE_32(pkt->sts24.fcp_sense_length);
2426 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2427 			    SF_ARQ_DONE);
2428 		} else {
2429 			sts.req_sense_length = 0;
2430 		}
2431 		sts.req_sense_data =
2432 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2433 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2434 		    (uintptr_t)sts.req_sense_data);
2435 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2436 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2437 		}
2438 	} else {
2439 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2440 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2441 
2442 		/* Setup residuals. */
2443 		sts.residual_length = LE_32(pkt->sts.residual_length);
2444 
2445 		/* Setup state flags. */
2446 		sts.state_flags_l = pkt->sts.state_flags_l;
2447 		sts.state_flags_h = pkt->sts.state_flags_h;
2448 
2449 		/* Setup FCP response info. */
2450 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2451 		    LE_16(pkt->sts.rsp_info_length) : 0;
2452 		sts.rsp_info = &pkt->sts.rsp_info[0];
2453 
2454 		/* Setup sense data. */
2455 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2456 		    LE_16(pkt->sts.req_sense_length) : 0;
2457 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2458 	}
2459 
2460 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2461 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2462 
2463 	switch (sts.comp_status) {
2464 	case CS_INCOMPLETE:
2465 	case CS_ABORTED:
2466 	case CS_DEVICE_UNAVAILABLE:
2467 	case CS_PORT_UNAVAILABLE:
2468 	case CS_PORT_LOGGED_OUT:
2469 	case CS_PORT_CONFIG_CHG:
2470 	case CS_PORT_BUSY:
2471 	case CS_LOOP_DOWN_ABORT:
2472 		cmd->Status = EXT_STATUS_BUSY;
2473 		break;
2474 	case CS_RESET:
2475 	case CS_QUEUE_FULL:
2476 		cmd->Status = EXT_STATUS_ERR;
2477 		break;
2478 	case CS_TIMEOUT:
2479 		cmd->Status = EXT_STATUS_ERR;
2480 		break;
2481 	case CS_DATA_OVERRUN:
2482 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2483 		break;
2484 	case CS_DATA_UNDERRUN:
2485 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2486 		break;
2487 	}
2488 
2489 	/*
2490 	 * If non data transfer commands fix tranfer counts.
2491 	 */
2492 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2493 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2494 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2495 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2496 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2497 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2498 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2499 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2500 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2501 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2502 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2503 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2504 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2505 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2506 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2507 		/*
2508 		 * Non data transfer command, clear sts_entry residual
2509 		 * length.
2510 		 */
2511 		sts.residual_length = 0;
2512 		cmd->ResponseLen = 0;
2513 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2514 			sts.comp_status = CS_COMPLETE;
2515 			cmd->Status = EXT_STATUS_OK;
2516 		}
2517 	} else {
2518 		cmd->ResponseLen = pld_size;
2519 	}
2520 
2521 	/* Correct ISP completion status */
2522 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2523 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2524 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2525 		    ha->instance);
2526 		scsi_req.resid = 0;
2527 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2528 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2529 		    ha->instance);
2530 		scsi_req.resid = sts.residual_length;
2531 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2532 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2533 
2534 			cmd->ResponseLen = (uint32_t)
2535 			    (pld_size - scsi_req.resid);
2536 		} else {
2537 			EL(ha, "failed, Transfer ERROR\n");
2538 			cmd->Status = EXT_STATUS_ERR;
2539 			cmd->ResponseLen = 0;
2540 		}
2541 	} else {
2542 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2543 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2544 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2545 		    sts.scsi_status_l);
2546 
2547 		scsi_req.resid = pld_size;
2548 		/*
2549 		 * Handle residual count on SCSI check
2550 		 * condition.
2551 		 *
2552 		 * - If Residual Under / Over is set, use the
2553 		 *   Residual Transfer Length field in IOCB.
2554 		 * - If Residual Under / Over is not set, and
2555 		 *   Transferred Data bit is set in State Flags
2556 		 *   field of IOCB, report residual value of 0
2557 		 *   (you may want to do this for tape
2558 		 *   Write-type commands only). This takes care
2559 		 *   of logical end of tape problem and does
2560 		 *   not break Unit Attention.
2561 		 * - If Residual Under / Over is not set, and
2562 		 *   Transferred Data bit is not set in State
2563 		 *   Flags, report residual value equal to
2564 		 *   original data transfer length.
2565 		 */
2566 		if (sts.scsi_status_l & STATUS_CHECK) {
2567 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2568 			cmd->DetailStatus = sts.scsi_status_l;
2569 			if (sts.scsi_status_h &
2570 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2571 				scsi_req.resid = sts.residual_length;
2572 			} else if (sts.state_flags_h &
2573 			    STATE_XFERRED_DATA) {
2574 				scsi_req.resid = 0;
2575 			}
2576 		}
2577 	}
2578 
2579 	if (sts.scsi_status_l & STATUS_CHECK &&
2580 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2581 	    sts.req_sense_length) {
2582 		/*
2583 		 * Check condition with vaild sense data flag set and sense
2584 		 * length != 0
2585 		 */
2586 		if (sts.req_sense_length > scsi_req.sense_length) {
2587 			sense_sz = scsi_req.sense_length;
2588 		} else {
2589 			sense_sz = sts.req_sense_length;
2590 		}
2591 
2592 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2593 		    tq->d_id.b24);
2594 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2595 
2596 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2597 		    (size_t)sense_sz, mode) != 0) {
2598 			EL(ha, "failed, request sense ddi_copyout\n");
2599 		}
2600 
2601 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2602 		cmd->DetailStatus = sts.scsi_status_l;
2603 	}
2604 
2605 	/* Copy response payload from DMA buffer to application. */
2606 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2607 	    cmd->ResponseLen != 0) {
2608 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2609 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2610 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2611 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2612 
2613 		/* Send response payload. */
2614 		if (ql_send_buffer_data(pld,
2615 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2616 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2617 			EL(ha, "failed, send_buffer_data\n");
2618 			cmd->Status = EXT_STATUS_COPY_ERR;
2619 			cmd->ResponseLen = 0;
2620 		}
2621 	}
2622 
2623 	if (cmd->Status != EXT_STATUS_OK) {
2624 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2625 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2626 	} else {
2627 		/*EMPTY*/
2628 		QL_PRINT_9(CE_CONT, "(%d): exiting, ResponseLen=%d\n",
2629 		    ha->instance, cmd->ResponseLen);
2630 	}
2631 
2632 	kmem_free(pkt, pkt_size);
2633 	ql_free_dma_resource(ha, dma_mem);
2634 	kmem_free(dma_mem, sizeof (dma_mem_t));
2635 }
2636 
2637 /*
2638  * ql_wwpn_to_scsiaddr
2639  *
2640  * Input:
2641  *	ha:	adapter state pointer.
2642  *	cmd:	EXT_IOCTL cmd struct pointer.
2643  *	mode:	flags.
2644  *
2645  * Context:
2646  *	Kernel context.
2647  */
2648 static void
2649 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2650 {
2651 	int		status;
2652 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2653 	EXT_SCSI_ADDR	*tmp_addr;
2654 	ql_tgt_t	*tq;
2655 
2656 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2657 
2658 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2659 		/* Return error */
2660 		EL(ha, "incorrect RequestLen\n");
2661 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2662 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2663 		return;
2664 	}
2665 
2666 	status = ddi_copyin((void*)(uintptr_t)(cmd->RequestAdr), wwpn,
2667 	    cmd->RequestLen, mode);
2668 
2669 	if (status != 0) {
2670 		cmd->Status = EXT_STATUS_COPY_ERR;
2671 		EL(ha, "failed, ddi_copyin\n");
2672 		return;
2673 	}
2674 
2675 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2676 
2677 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2678 		/* no matching device */
2679 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2680 		EL(ha, "failed, device not found\n");
2681 		return;
2682 	}
2683 
2684 	/* Copy out the IDs found.  For now we can only return target ID. */
2685 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2686 
2687 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2688 
2689 	if (status != 0) {
2690 		cmd->Status = EXT_STATUS_COPY_ERR;
2691 		EL(ha, "failed, ddi_copyout\n");
2692 	} else {
2693 		cmd->Status = EXT_STATUS_OK;
2694 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2695 	}
2696 }
2697 
2698 /*
2699  * ql_host_idx
2700  *	Gets host order index.
2701  *
2702  * Input:
2703  *	ha:	adapter state pointer.
2704  *	cmd:	EXT_IOCTL cmd struct pointer.
2705  *	mode:	flags.
2706  *
2707  * Returns:
2708  *	None, request status indicated in cmd->Status.
2709  *
2710  * Context:
2711  *	Kernel context.
2712  */
2713 static void
2714 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2715 {
2716 	uint16_t	idx;
2717 
2718 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2719 
2720 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2721 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2722 		cmd->DetailStatus = sizeof (uint16_t);
2723 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2724 		cmd->ResponseLen = 0;
2725 		return;
2726 	}
2727 
2728 	idx = (uint16_t)ha->instance;
2729 
2730 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2731 	    sizeof (uint16_t), mode) != 0) {
2732 		cmd->Status = EXT_STATUS_COPY_ERR;
2733 		cmd->ResponseLen = 0;
2734 		EL(ha, "failed, ddi_copyout\n");
2735 	} else {
2736 		cmd->ResponseLen = sizeof (uint16_t);
2737 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2738 	}
2739 }
2740 
2741 /*
2742  * ql_host_drvname
2743  *	Gets host driver name
2744  *
2745  * Input:
2746  *	ha:	adapter state pointer.
2747  *	cmd:	EXT_IOCTL cmd struct pointer.
2748  *	mode:	flags.
2749  *
2750  * Returns:
2751  *	None, request status indicated in cmd->Status.
2752  *
2753  * Context:
2754  *	Kernel context.
2755  */
2756 static void
2757 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2758 {
2759 
2760 	char		drvname[] = QL_NAME;
2761 	uint32_t	qlnamelen;
2762 
2763 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2764 
2765 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2766 
2767 	if (cmd->ResponseLen < qlnamelen) {
2768 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2769 		cmd->DetailStatus = qlnamelen;
2770 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2771 		    cmd->ResponseLen, qlnamelen);
2772 		cmd->ResponseLen = 0;
2773 		return;
2774 	}
2775 
2776 	if (ddi_copyout((void *)&drvname, (void *)(uintptr_t)(cmd->ResponseAdr),
2777 	    qlnamelen, mode) != 0) {
2778 		cmd->Status = EXT_STATUS_COPY_ERR;
2779 		cmd->ResponseLen = 0;
2780 		EL(ha, "failed, ddi_copyout\n");
2781 	} else {
2782 		cmd->ResponseLen = qlnamelen-1;
2783 	}
2784 
2785 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2786 }
2787 
2788 /*
2789  * ql_read_nvram
2790  *	Get NVRAM contents.
2791  *
2792  * Input:
2793  *	ha:	adapter state pointer.
2794  *	cmd:	EXT_IOCTL cmd struct pointer.
2795  *	mode:	flags.
2796  *
2797  * Returns:
2798  *	None, request status indicated in cmd->Status.
2799  *
2800  * Context:
2801  *	Kernel context.
2802  */
2803 static void
2804 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2805 {
2806 	uint32_t	nv_size;
2807 
2808 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2809 
2810 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_2425) ?
2811 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2812 	if (cmd->ResponseLen < nv_size) {
2813 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2814 		cmd->DetailStatus = nv_size;
2815 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2816 		    cmd->ResponseLen);
2817 		cmd->ResponseLen = 0;
2818 		return;
2819 	}
2820 
2821 	/* Get NVRAM data. */
2822 	if (ql_nv_util_dump(ha,
2823 	    (void *)(uintptr_t)(cmd->ResponseAdr), mode) != 0) {
2824 		cmd->Status = EXT_STATUS_COPY_ERR;
2825 		cmd->ResponseLen = 0;
2826 		EL(ha, "failed, copy error\n");
2827 	} else {
2828 		cmd->ResponseLen = nv_size;
2829 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2830 	}
2831 }
2832 
2833 /*
2834  * ql_write_nvram
2835  *	Loads NVRAM contents.
2836  *
2837  * Input:
2838  *	ha:	adapter state pointer.
2839  *	cmd:	EXT_IOCTL cmd struct pointer.
2840  *	mode:	flags.
2841  *
2842  * Returns:
2843  *	None, request status indicated in cmd->Status.
2844  *
2845  * Context:
2846  *	Kernel context.
2847  */
2848 static void
2849 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2850 {
2851 	uint32_t	nv_size;
2852 
2853 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2854 
2855 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_2425) ?
2856 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2857 	if (cmd->RequestLen < nv_size) {
2858 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2859 		cmd->DetailStatus = sizeof (nvram_t);
2860 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2861 		    cmd->RequestLen);
2862 		return;
2863 	}
2864 
2865 	/* Load NVRAM data. */
2866 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2867 	    mode) != 0) {
2868 		cmd->Status = EXT_STATUS_COPY_ERR;
2869 		EL(ha, "failed, copy error\n");
2870 	} else {
2871 		/*EMPTY*/
2872 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2873 	}
2874 }
2875 
2876 /*
2877  * ql_write_vpd
2878  *	Loads VPD contents.
2879  *
2880  * Input:
2881  *	ha:	adapter state pointer.
2882  *	cmd:	EXT_IOCTL cmd struct pointer.
2883  *	mode:	flags.
2884  *
2885  * Returns:
2886  *	None, request status indicated in cmd->Status.
2887  *
2888  * Context:
2889  *	Kernel context.
2890  */
2891 static void
2892 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2893 {
2894 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2895 
2896 	int32_t		rval = 0;
2897 
2898 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
2899 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2900 		EL(ha, "failed, invalid request for HBA\n");
2901 		return;
2902 	}
2903 
2904 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2905 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2906 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2907 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2908 		    cmd->RequestLen);
2909 		return;
2910 	}
2911 
2912 	/* Load VPD data. */
2913 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2914 	    mode)) != 0) {
2915 		cmd->Status = EXT_STATUS_COPY_ERR;
2916 		cmd->DetailStatus = rval;
2917 		EL(ha, "failed, errno=%x\n", rval);
2918 	} else {
2919 		/*EMPTY*/
2920 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2921 	}
2922 }
2923 
2924 /*
2925  * ql_read_vpd
2926  *	Dumps VPD contents.
2927  *
2928  * Input:
2929  *	ha:	adapter state pointer.
2930  *	cmd:	EXT_IOCTL cmd struct pointer.
2931  *	mode:	flags.
2932  *
2933  * Returns:
2934  *	None, request status indicated in cmd->Status.
2935  *
2936  * Context:
2937  *	Kernel context.
2938  */
2939 static void
2940 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2941 {
2942 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2943 
2944 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
2945 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2946 		EL(ha, "failed, invalid request for HBA\n");
2947 		return;
2948 	}
2949 
2950 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2951 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2952 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2953 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2954 		    cmd->ResponseLen);
2955 		return;
2956 	}
2957 
2958 	/* Dump VPD data. */
2959 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2960 	    mode)) != 0) {
2961 		cmd->Status = EXT_STATUS_COPY_ERR;
2962 		EL(ha, "failed,\n");
2963 	} else {
2964 		/*EMPTY*/
2965 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2966 	}
2967 }
2968 
2969 /*
2970  * ql_get_fcache
2971  *	Dumps flash cache contents.
2972  *
2973  * Input:
2974  *	ha:	adapter state pointer.
2975  *	cmd:	EXT_IOCTL cmd struct pointer.
2976  *	mode:	flags.
2977  *
2978  * Returns:
2979  *	None, request status indicated in cmd->Status.
2980  *
2981  * Context:
2982  *	Kernel context.
2983  */
2984 static void
2985 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2986 {
2987 	uint32_t	bsize, boff, types, cpsize, hsize;
2988 	ql_fcache_t	*fptr;
2989 
2990 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2991 
2992 	CACHE_LOCK(ha);
2993 
2994 	if (ha->fcache == NULL) {
2995 		CACHE_UNLOCK(ha);
2996 		cmd->Status = EXT_STATUS_ERR;
2997 		EL(ha, "failed, adapter fcache not setup\n");
2998 		return;
2999 	}
3000 
3001 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
3002 		bsize = 100;
3003 	} else {
3004 		bsize = 400;
3005 	}
3006 
3007 	if (cmd->ResponseLen < bsize) {
3008 		CACHE_UNLOCK(ha);
3009 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3010 		cmd->DetailStatus = bsize;
3011 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3012 		    bsize, cmd->ResponseLen);
3013 		return;
3014 	}
3015 
3016 	boff = 0;
3017 	bsize = 0;
3018 	fptr = ha->fcache;
3019 
3020 	/*
3021 	 * For backwards compatibility, get one of each image type
3022 	 */
3023 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3024 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3025 		/* Get the next image */
3026 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3027 
3028 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3029 
3030 			if (ddi_copyout(fptr->buf,
3031 			    (void *)(uintptr_t)(cmd->ResponseAdr +
3032 			    boff), cpsize, mode) != 0) {
3033 				CACHE_UNLOCK(ha);
3034 				EL(ha, "ddicopy failed, exiting\n");
3035 				cmd->Status = EXT_STATUS_COPY_ERR;
3036 				cmd->DetailStatus = 0;
3037 				return;
3038 			}
3039 			boff += 100;
3040 			bsize += cpsize;
3041 			types &= ~(fptr->type);
3042 		}
3043 	}
3044 
3045 	/*
3046 	 * Get the firmware image -- it needs to be last in the
3047 	 * buffer at offset 300 for backwards compatibility. Also for
3048 	 * backwards compatibility, the pci header is stripped off.
3049 	 */
3050 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3051 
3052 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3053 		if (hsize > fptr->buflen) {
3054 			CACHE_UNLOCK(ha);
3055 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3056 			    hsize, fptr->buflen);
3057 			cmd->Status = EXT_STATUS_COPY_ERR;
3058 			cmd->DetailStatus = 0;
3059 			return;
3060 		}
3061 
3062 		cpsize = ((fptr->buflen - hsize) < 100 ?
3063 		    fptr->buflen - hsize : 100);
3064 
3065 		if (ddi_copyout(fptr->buf+hsize,
3066 		    (void *)(uintptr_t)(cmd->ResponseAdr +
3067 		    300), cpsize, mode) != 0) {
3068 			CACHE_UNLOCK(ha);
3069 			EL(ha, "fw ddicopy failed, exiting\n");
3070 			cmd->Status = EXT_STATUS_COPY_ERR;
3071 			cmd->DetailStatus = 0;
3072 			return;
3073 		}
3074 		bsize += 100;
3075 	}
3076 
3077 	CACHE_UNLOCK(ha);
3078 	cmd->Status = EXT_STATUS_OK;
3079 	cmd->DetailStatus = bsize;
3080 
3081 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3082 }
3083 
3084 /*
3085  * ql_get_fcache_ex
3086  *	Dumps flash cache contents.
3087  *
3088  * Input:
3089  *	ha:	adapter state pointer.
3090  *	cmd:	EXT_IOCTL cmd struct pointer.
3091  *	mode:	flags.
3092  *
3093  * Returns:
3094  *	None, request status indicated in cmd->Status.
3095  *
3096  * Context:
3097  *	Kernel context.
3098  */
3099 static void
3100 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3101 {
3102 	uint32_t	bsize = 0;
3103 	uint32_t	boff = 0;
3104 	ql_fcache_t	*fptr;
3105 
3106 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3107 
3108 	CACHE_LOCK(ha);
3109 	if (ha->fcache == NULL) {
3110 		CACHE_UNLOCK(ha);
3111 		cmd->Status = EXT_STATUS_ERR;
3112 		EL(ha, "failed, adapter fcache not setup\n");
3113 		return;
3114 	}
3115 
3116 	/* Make sure user passed enough buffer space */
3117 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3118 		bsize += FBUFSIZE;
3119 	}
3120 
3121 	if (cmd->ResponseLen < bsize) {
3122 		CACHE_UNLOCK(ha);
3123 		if (cmd->ResponseLen != 0) {
3124 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3125 			    bsize, cmd->ResponseLen);
3126 		}
3127 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3128 		cmd->DetailStatus = bsize;
3129 		return;
3130 	}
3131 
3132 	boff = 0;
3133 	fptr = ha->fcache;
3134 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3135 		/* Get the next image */
3136 		if (ddi_copyout(fptr->buf,
3137 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3138 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3139 		    mode) != 0) {
3140 			CACHE_UNLOCK(ha);
3141 			EL(ha, "failed, ddicopy at %xh, exiting\n", boff);
3142 			cmd->Status = EXT_STATUS_COPY_ERR;
3143 			cmd->DetailStatus = 0;
3144 			return;
3145 		}
3146 		boff += FBUFSIZE;
3147 		fptr = fptr->next;
3148 	}
3149 
3150 	CACHE_UNLOCK(ha);
3151 	cmd->Status = EXT_STATUS_OK;
3152 	cmd->DetailStatus = bsize;
3153 
3154 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3155 }
3156 
3157 
3158 /*
3159  * ql_read_flash
3160  *	Get flash contents.
3161  *
3162  * Input:
3163  *	ha:	adapter state pointer.
3164  *	cmd:	EXT_IOCTL cmd struct pointer.
3165  *	mode:	flags.
3166  *
3167  * Returns:
3168  *	None, request status indicated in cmd->Status.
3169  *
3170  * Context:
3171  *	Kernel context.
3172  */
3173 static void
3174 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3175 {
3176 	ql_xioctl_t	*xp = ha->xioctl;
3177 
3178 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3179 
3180 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3181 		EL(ha, "ql_stall_driver failed\n");
3182 		cmd->Status = EXT_STATUS_BUSY;
3183 		cmd->DetailStatus = xp->fdesc.flash_size;
3184 		cmd->ResponseLen = 0;
3185 		return;
3186 	}
3187 
3188 	if (ql_setup_flash(ha) != QL_SUCCESS) {
3189 		cmd->Status = EXT_STATUS_ERR;
3190 		cmd->DetailStatus = xp->fdesc.flash_size;
3191 		EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3192 		    cmd->ResponseLen, xp->fdesc.flash_size);
3193 		cmd->ResponseLen = 0;
3194 	} else {
3195 		/* adjust read size to flash size */
3196 		if (cmd->ResponseLen > xp->fdesc.flash_size) {
3197 			EL(ha, "adjusting req=%xh, max=%xh\n",
3198 			    cmd->ResponseLen, xp->fdesc.flash_size);
3199