1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_xioctl.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
61 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
62     boolean_t (*)(EXT_IOCTL *));
63 static boolean_t ql_validate_signature(EXT_IOCTL *);
64 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
65 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
66 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
67 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
90 
91 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
92 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
93 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
94 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
95     uint8_t);
96 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
97 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
98 static int ql_24xx_flash_desc(ql_adapter_state_t *);
99 static int ql_setup_flash(ql_adapter_state_t *);
100 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
101 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
102 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
103     uint32_t, int);
104 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
105     uint8_t);
106 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
107 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
108 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
109 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
110 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
113 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
115 static uint32_t ql_setup_led(ql_adapter_state_t *);
116 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
117 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
118 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
119 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
120 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
121 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
122 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
123 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
124 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
125 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
126 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
127 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
128 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
129 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
130 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
131 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
132 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
134 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
135 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
138 static void ql_restart_hba(ql_adapter_state_t *);
139 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
140 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
141 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
143 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
144 static void ql_update_flash_caches(ql_adapter_state_t *);
145 
146 /* ******************************************************************** */
147 /*			External IOCTL support.				*/
148 /* ******************************************************************** */
149 
150 /*
151  * ql_alloc_xioctl_resource
152  *	Allocates resources needed by module code.
153  *
154  * Input:
155  *	ha:		adapter state pointer.
156  *
157  * Returns:
158  *	SYS_ERRNO
159  *
160  * Context:
161  *	Kernel context.
162  */
163 int
164 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
165 {
166 	ql_xioctl_t	*xp;
167 
168 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
169 
170 	if (ha->xioctl != NULL) {
171 		QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
172 		    ha->instance);
173 		return (0);
174 	}
175 
176 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
177 	if (xp == NULL) {
178 		EL(ha, "failed, kmem_zalloc\n");
179 		return (ENOMEM);
180 	}
181 	ha->xioctl = xp;
182 
183 	/* Allocate AEN tracking buffer */
184 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
185 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
186 	if (xp->aen_tracking_queue == NULL) {
187 		EL(ha, "failed, kmem_zalloc-2\n");
188 		ql_free_xioctl_resource(ha);
189 		return (ENOMEM);
190 	}
191 
192 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
193 
194 	return (0);
195 }
196 
197 /*
198  * ql_free_xioctl_resource
199  *	Frees resources used by module code.
200  *
201  * Input:
202  *	ha:		adapter state pointer.
203  *
204  * Context:
205  *	Kernel context.
206  */
207 void
208 ql_free_xioctl_resource(ql_adapter_state_t *ha)
209 {
210 	ql_xioctl_t	*xp = ha->xioctl;
211 
212 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
213 
214 	if (xp == NULL) {
215 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
216 		return;
217 	}
218 
219 	if (xp->aen_tracking_queue != NULL) {
220 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
221 		    sizeof (EXT_ASYNC_EVENT));
222 		xp->aen_tracking_queue = NULL;
223 	}
224 
225 	kmem_free(xp, sizeof (ql_xioctl_t));
226 	ha->xioctl = NULL;
227 
228 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
229 }
230 
231 /*
232  * ql_xioctl
233  *	External IOCTL processing.
234  *
235  * Input:
236  *	ha:	adapter state pointer.
237  *	cmd:	function to perform
238  *	arg:	data type varies with request
239  *	mode:	flags
240  *	cred_p:	credentials pointer
241  *	rval_p:	pointer to result value
242  *
243  * Returns:
244  *	0:		success
245  *	ENXIO:		No such device or address
246  *	ENOPROTOOPT:	Protocol not available
247  *
248  * Context:
249  *	Kernel context.
250  */
251 /* ARGSUSED */
252 int
253 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
254     cred_t *cred_p, int *rval_p)
255 {
256 	int	rval;
257 
258 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
259 
260 	if (ha->xioctl == NULL) {
261 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
262 		return (ENXIO);
263 	}
264 
265 	switch (cmd) {
266 	case EXT_CC_QUERY:
267 	case EXT_CC_SEND_FCCT_PASSTHRU:
268 	case EXT_CC_REG_AEN:
269 	case EXT_CC_GET_AEN:
270 	case EXT_CC_SEND_SCSI_PASSTHRU:
271 	case EXT_CC_WWPN_TO_SCSIADDR:
272 	case EXT_CC_SEND_ELS_RNID:
273 	case EXT_CC_SET_DATA:
274 	case EXT_CC_GET_DATA:
275 	case EXT_CC_HOST_IDX:
276 	case EXT_CC_READ_NVRAM:
277 	case EXT_CC_UPDATE_NVRAM:
278 	case EXT_CC_READ_OPTION_ROM:
279 	case EXT_CC_READ_OPTION_ROM_EX:
280 	case EXT_CC_UPDATE_OPTION_ROM:
281 	case EXT_CC_UPDATE_OPTION_ROM_EX:
282 	case EXT_CC_GET_VPD:
283 	case EXT_CC_SET_VPD:
284 	case EXT_CC_LOOPBACK:
285 	case EXT_CC_GET_FCACHE:
286 	case EXT_CC_GET_FCACHE_EX:
287 	case EXT_CC_HOST_DRVNAME:
288 	case EXT_CC_GET_SFP_DATA:
289 	case EXT_CC_PORT_PARAM:
290 	case EXT_CC_GET_PCI_DATA:
291 	case EXT_CC_GET_FWEXTTRACE:
292 	case EXT_CC_GET_FWFCETRACE:
293 	case EXT_CC_GET_VP_CNT_ID:
294 	case EXT_CC_VPORT_CMD:
295 	case EXT_CC_ACCESS_FLASH:
296 	case EXT_CC_RESET_FW:
297 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
298 		break;
299 	default:
300 		/* function not supported. */
301 		EL(ha, "function=%d not supported\n", cmd);
302 		rval = ENOPROTOOPT;
303 	}
304 
305 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
306 
307 	return (rval);
308 }
309 
310 /*
311  * ql_sdm_ioctl
312  *	Provides ioctl functions for SAN/Device Management functions
313  *	AKA External Ioctl functions.
314  *
315  * Input:
316  *	ha:		adapter state pointer.
317  *	ioctl_code:	ioctl function to perform
318  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
319  *	mode:		flags
320  *
321  * Returns:
322  *	0:	success
323  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
324  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
325  *		copyout of EXT_IOCTL status info failed.
326  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
327  *	EBUSY:	Device busy
328  *
329  * Context:
330  *	Kernel context.
331  */
332 static int
333 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
334 {
335 	EXT_IOCTL		*cmd;
336 	int			rval;
337 	ql_adapter_state_t	*vha;
338 
339 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
340 
341 	/* Copy argument structure (EXT_IOCTL) from application land. */
342 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
343 	    ql_validate_signature)) != 0) {
344 		/*
345 		 * a non-zero value at this time means a problem getting
346 		 * the requested information from application land, just
347 		 * return the error code and hope for the best.
348 		 */
349 		EL(ha, "failed, sdm_setup\n");
350 		return (rval);
351 	}
352 
353 	/*
354 	 * Map the physical ha ptr (which the ioctl is called with)
355 	 * to the virtual ha that the caller is addressing.
356 	 */
357 	if (ha->flags & VP_ENABLED) {
358 		/*
359 		 * Special case: HbaSelect == 0 is physical ha
360 		 */
361 		if (cmd->HbaSelect != 0) {
362 			vha = ha->vp_next;
363 			while (vha != NULL) {
364 				if (vha->vp_index == cmd->HbaSelect) {
365 					ha = vha;
366 					break;
367 				}
368 				vha = vha->vp_next;
369 			}
370 
371 			/*
372 			 * If we can't find the specified vp index then
373 			 * we probably have an error (vp indexes shifting
374 			 * under our feet?).
375 			 */
376 			if (vha == NULL) {
377 				EL(ha, "Invalid HbaSelect vp index: %xh\n",
378 				    cmd->HbaSelect);
379 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
380 				cmd->ResponseLen = 0;
381 				return (EFAULT);
382 			}
383 		}
384 	}
385 
386 	/*
387 	 * If driver is suspended, stalled, or powered down rtn BUSY
388 	 */
389 	if (ha->flags & ADAPTER_SUSPENDED ||
390 	    ha->task_daemon_flags & DRIVER_STALL ||
391 	    ha->power_level != PM_LEVEL_D0) {
392 		EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
393 		    "driver suspended" :
394 		    (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
395 		    "FCA powered down"));
396 		cmd->Status = EXT_STATUS_BUSY;
397 		cmd->ResponseLen = 0;
398 		rval = EBUSY;
399 
400 		/* Return results to caller */
401 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
402 			EL(ha, "failed, sdm_return\n");
403 			rval = EFAULT;
404 		}
405 		return (rval);
406 	}
407 
408 	switch (ioctl_code) {
409 	case EXT_CC_QUERY_OS:
410 		ql_query(ha, cmd, mode);
411 		break;
412 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
413 		ql_fcct(ha, cmd, mode);
414 		break;
415 	case EXT_CC_REG_AEN_OS:
416 		ql_aen_reg(ha, cmd, mode);
417 		break;
418 	case EXT_CC_GET_AEN_OS:
419 		ql_aen_get(ha, cmd, mode);
420 		break;
421 	case EXT_CC_GET_DATA_OS:
422 		ql_get_host_data(ha, cmd, mode);
423 		break;
424 	case EXT_CC_SET_DATA_OS:
425 		ql_set_host_data(ha, cmd, mode);
426 		break;
427 	case EXT_CC_SEND_ELS_RNID_OS:
428 		ql_send_els_rnid(ha, cmd, mode);
429 		break;
430 	case EXT_CC_SCSI_PASSTHRU_OS:
431 		ql_scsi_passthru(ha, cmd, mode);
432 		break;
433 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
434 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
435 		break;
436 	case EXT_CC_HOST_IDX_OS:
437 		ql_host_idx(ha, cmd, mode);
438 		break;
439 	case EXT_CC_HOST_DRVNAME_OS:
440 		ql_host_drvname(ha, cmd, mode);
441 		break;
442 	case EXT_CC_READ_NVRAM_OS:
443 		ql_read_nvram(ha, cmd, mode);
444 		break;
445 	case EXT_CC_UPDATE_NVRAM_OS:
446 		ql_write_nvram(ha, cmd, mode);
447 		break;
448 	case EXT_CC_READ_OPTION_ROM_OS:
449 	case EXT_CC_READ_OPTION_ROM_EX_OS:
450 		ql_read_flash(ha, cmd, mode);
451 		break;
452 	case EXT_CC_UPDATE_OPTION_ROM_OS:
453 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
454 		ql_write_flash(ha, cmd, mode);
455 		break;
456 	case EXT_CC_LOOPBACK_OS:
457 		ql_diagnostic_loopback(ha, cmd, mode);
458 		break;
459 	case EXT_CC_GET_VPD_OS:
460 		ql_read_vpd(ha, cmd, mode);
461 		break;
462 	case EXT_CC_SET_VPD_OS:
463 		ql_write_vpd(ha, cmd, mode);
464 		break;
465 	case EXT_CC_GET_FCACHE_OS:
466 		ql_get_fcache(ha, cmd, mode);
467 		break;
468 	case EXT_CC_GET_FCACHE_EX_OS:
469 		ql_get_fcache_ex(ha, cmd, mode);
470 		break;
471 	case EXT_CC_GET_SFP_DATA_OS:
472 		ql_get_sfp(ha, cmd, mode);
473 		break;
474 	case EXT_CC_PORT_PARAM_OS:
475 		ql_port_param(ha, cmd, mode);
476 		break;
477 	case EXT_CC_GET_PCI_DATA_OS:
478 		ql_get_pci_data(ha, cmd, mode);
479 		break;
480 	case EXT_CC_GET_FWEXTTRACE_OS:
481 		ql_get_fwexttrace(ha, cmd, mode);
482 		break;
483 	case EXT_CC_GET_FWFCETRACE_OS:
484 		ql_get_fwfcetrace(ha, cmd, mode);
485 		break;
486 	case EXT_CC_MENLO_RESET:
487 		ql_menlo_reset(ha, cmd, mode);
488 		break;
489 	case EXT_CC_MENLO_GET_FW_VERSION:
490 		ql_menlo_get_fw_version(ha, cmd, mode);
491 		break;
492 	case EXT_CC_MENLO_UPDATE_FW:
493 		ql_menlo_update_fw(ha, cmd, mode);
494 		break;
495 	case EXT_CC_MENLO_MANAGE_INFO:
496 		ql_menlo_manage_info(ha, cmd, mode);
497 		break;
498 	case EXT_CC_GET_VP_CNT_ID_OS:
499 		ql_get_vp_cnt_id(ha, cmd, mode);
500 		break;
501 	case EXT_CC_VPORT_CMD_OS:
502 		ql_vp_ioctl(ha, cmd, mode);
503 		break;
504 	case EXT_CC_ACCESS_FLASH_OS:
505 		ql_access_flash(ha, cmd, mode);
506 		break;
507 	case EXT_CC_RESET_FW_OS:
508 		ql_reset_cmd(ha, cmd);
509 		break;
510 	default:
511 		/* function not supported. */
512 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
513 
514 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
515 		cmd->ResponseLen = 0;
516 		break;
517 	}
518 
519 	/* Return results to caller */
520 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
521 		EL(ha, "failed, sdm_return\n");
522 		return (EFAULT);
523 	}
524 
525 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
526 
527 	return (0);
528 }
529 
530 /*
531  * ql_sdm_setup
532  *	Make a local copy of the EXT_IOCTL struct and validate it.
533  *
534  * Input:
535  *	ha:		adapter state pointer.
536  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
537  *	arg:		Address of application EXT_IOCTL cmd data
538  *	mode:		flags
539  *	val_sig:	Pointer to a function to validate the ioctl signature.
540  *
541  * Returns:
542  *	0:		success
543  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
544  *	EINVAL:		Invalid version, signature.
545  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
546  *
547  * Context:
548  *	Kernel context.
549  */
550 static int
551 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
552     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
553 {
554 	int		rval;
555 	EXT_IOCTL	*cmd;
556 
557 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
558 
559 	/* Allocate local memory for EXT_IOCTL. */
560 	*cmd_struct = NULL;
561 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
562 	if (cmd == NULL) {
563 		EL(ha, "failed, kmem_zalloc\n");
564 		return (ENOMEM);
565 	}
566 	/* Get argument structure. */
567 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
568 	if (rval != 0) {
569 		EL(ha, "failed, ddi_copyin\n");
570 		rval = EFAULT;
571 	} else {
572 		/*
573 		 * Check signature and the version.
574 		 * If either are not valid then neither is the
575 		 * structure so don't attempt to return any error status
576 		 * because we can't trust what caller's arg points to.
577 		 * Just return the errno.
578 		 */
579 		if (val_sig(cmd) == 0) {
580 			EL(ha, "failed, signature\n");
581 			rval = EINVAL;
582 		} else if (cmd->Version > EXT_VERSION) {
583 			EL(ha, "failed, version\n");
584 			rval = EINVAL;
585 		}
586 	}
587 
588 	if (rval == 0) {
589 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
590 		*cmd_struct = cmd;
591 		cmd->Status = EXT_STATUS_OK;
592 		cmd->DetailStatus = 0;
593 	} else {
594 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
595 	}
596 
597 	return (rval);
598 }
599 
600 /*
601  * ql_validate_signature
602  *	Validate the signature string for an external ioctl call.
603  *
604  * Input:
605  *	sg:	Pointer to EXT_IOCTL signature to validate.
606  *
607  * Returns:
608  *	B_TRUE:		Signature is valid.
609  *	B_FALSE:	Signature is NOT valid.
610  *
611  * Context:
612  *	Kernel context.
613  */
614 static boolean_t
615 ql_validate_signature(EXT_IOCTL *cmd_struct)
616 {
617 	/*
618 	 * Check signature.
619 	 *
620 	 * If signature is not valid then neither is the rest of
621 	 * the structure (e.g., can't trust it), so don't attempt
622 	 * to return any error status other than the errno.
623 	 */
624 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
625 		QL_PRINT_2(CE_CONT, "failed,\n");
626 		return (B_FALSE);
627 	}
628 
629 	return (B_TRUE);
630 }
631 
632 /*
633  * ql_sdm_return
634  *	Copies return data/status to application land for
635  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
636  *
637  * Input:
638  *	ha:		adapter state pointer.
639  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
640  *	ioctl_code:	ioctl function to perform
641  *	arg:		EXT_IOCTL cmd data in application land.
642  *	mode:		flags
643  *
644  * Returns:
645  *	0:	success
646  *	EFAULT:	Copy out error.
647  *
648  * Context:
649  *	Kernel context.
650  */
651 /* ARGSUSED */
652 static int
653 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
654 {
655 	int	rval = 0;
656 
657 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
658 
659 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
660 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
661 	    mode);
662 
663 	rval |= ddi_copyout((void *)&cmd->Status,
664 	    (void *)&(((EXT_IOCTL*)arg)->Status),
665 	    sizeof (cmd->Status), mode);
666 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
667 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
668 	    sizeof (cmd->DetailStatus), mode);
669 
670 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
671 
672 	if (rval != 0) {
673 		/* Some copyout operation failed */
674 		EL(ha, "failed, ddi_copyout\n");
675 		return (EFAULT);
676 	}
677 
678 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
679 
680 	return (0);
681 }
682 
683 /*
684  * ql_query
685  *	Performs all EXT_CC_QUERY functions.
686  *
687  * Input:
688  *	ha:	adapter state pointer.
689  *	cmd:	Local EXT_IOCTL cmd struct pointer.
690  *	mode:	flags.
691  *
692  * Returns:
693  *	None, request status indicated in cmd->Status.
694  *
695  * Context:
696  *	Kernel context.
697  */
698 static void
699 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
700 {
701 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
702 	    cmd->SubCode);
703 
704 	/* case off on command subcode */
705 	switch (cmd->SubCode) {
706 	case EXT_SC_QUERY_HBA_NODE:
707 		ql_qry_hba_node(ha, cmd, mode);
708 		break;
709 	case EXT_SC_QUERY_HBA_PORT:
710 		ql_qry_hba_port(ha, cmd, mode);
711 		break;
712 	case EXT_SC_QUERY_DISC_PORT:
713 		ql_qry_disc_port(ha, cmd, mode);
714 		break;
715 	case EXT_SC_QUERY_DISC_TGT:
716 		ql_qry_disc_tgt(ha, cmd, mode);
717 		break;
718 	case EXT_SC_QUERY_DRIVER:
719 		ql_qry_driver(ha, cmd, mode);
720 		break;
721 	case EXT_SC_QUERY_FW:
722 		ql_qry_fw(ha, cmd, mode);
723 		break;
724 	case EXT_SC_QUERY_CHIP:
725 		ql_qry_chip(ha, cmd, mode);
726 		break;
727 	case EXT_SC_QUERY_DISC_LUN:
728 	default:
729 		/* function not supported. */
730 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
731 		EL(ha, "failed, Unsupported Subcode=%xh\n",
732 		    cmd->SubCode);
733 		break;
734 	}
735 
736 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
737 }
738 
739 /*
740  * ql_qry_hba_node
741  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
742  *
743  * Input:
744  *	ha:	adapter state pointer.
745  *	cmd:	EXT_IOCTL cmd struct pointer.
746  *	mode:	flags.
747  *
748  * Returns:
749  *	None, request status indicated in cmd->Status.
750  *
751  * Context:
752  *	Kernel context.
753  */
754 static void
755 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
756 {
757 	EXT_HBA_NODE	tmp_node = {0};
758 	uint_t		len;
759 	caddr_t		bufp;
760 	ql_mbx_data_t	mr;
761 
762 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
763 
764 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
765 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
766 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
767 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
768 		    "Len=%xh\n", cmd->ResponseLen);
769 		cmd->ResponseLen = 0;
770 		return;
771 	}
772 
773 	/* fill in the values */
774 
775 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
776 	    EXT_DEF_WWN_NAME_SIZE);
777 
778 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
779 
780 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
781 
782 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
783 
784 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
785 
786 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
787 		size_t		verlen;
788 		uint16_t	w;
789 		char		*tmpptr;
790 
791 		verlen = strlen((char *)(tmp_node.DriverVersion));
792 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
793 			EL(ha, "failed, No room for fpga version string\n");
794 		} else {
795 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
796 			    (uint16_t *)
797 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
798 
799 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
800 			if (tmpptr == NULL) {
801 				EL(ha, "Unable to insert fpga version str\n");
802 			} else {
803 				(void) sprintf(tmpptr, "%d.%d",
804 				    ((w & 0xf0) >> 4), (w & 0x0f));
805 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
806 			}
807 		}
808 	}
809 	(void) ql_get_fw_version(ha, &mr);
810 
811 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
812 	    mr.mb[1], mr.mb[2], mr.mb[3]);
813 
814 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
815 		switch (mr.mb[6]) {
816 		case FWATTRIB_EF:
817 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
818 			break;
819 		case FWATTRIB_TP:
820 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
821 			break;
822 		case FWATTRIB_IP:
823 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
824 			break;
825 		case FWATTRIB_IPX:
826 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
827 			break;
828 		case FWATTRIB_FL:
829 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
830 			break;
831 		case FWATTRIB_FPX:
832 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
833 			break;
834 		default:
835 			break;
836 		}
837 	}
838 
839 	/* FCode version. */
840 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
841 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
842 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
843 	    (int *)&len) == DDI_PROP_SUCCESS) {
844 		if (len < EXT_DEF_MAX_STR_SIZE) {
845 			bcopy(bufp, tmp_node.OptRomVersion, len);
846 		} else {
847 			bcopy(bufp, tmp_node.OptRomVersion,
848 			    EXT_DEF_MAX_STR_SIZE - 1);
849 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
850 			    '\0';
851 		}
852 		kmem_free(bufp, len);
853 	} else {
854 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
855 	}
856 	tmp_node.PortCount = 1;
857 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
858 
859 	if (ddi_copyout((void *)&tmp_node,
860 	    (void *)(uintptr_t)(cmd->ResponseAdr),
861 	    sizeof (EXT_HBA_NODE), mode) != 0) {
862 		cmd->Status = EXT_STATUS_COPY_ERR;
863 		cmd->ResponseLen = 0;
864 		EL(ha, "failed, ddi_copyout\n");
865 	} else {
866 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
867 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
868 	}
869 }
870 
871 /*
872  * ql_qry_hba_port
873  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
874  *
875  * Input:
876  *	ha:	adapter state pointer.
877  *	cmd:	EXT_IOCTL cmd struct pointer.
878  *	mode:	flags.
879  *
880  * Returns:
881  *	None, request status indicated in cmd->Status.
882  *
883  * Context:
884  *	Kernel context.
885  */
886 static void
887 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
888 {
889 	ql_link_t	*link;
890 	ql_tgt_t	*tq;
891 	ql_mbx_data_t	mr;
892 	EXT_HBA_PORT	tmp_port = {0};
893 	int		rval;
894 	uint16_t	port_cnt, tgt_cnt, index;
895 
896 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
897 
898 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
899 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
900 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
901 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
902 		    cmd->ResponseLen);
903 		cmd->ResponseLen = 0;
904 		return;
905 	}
906 
907 	/* fill in the values */
908 
909 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
910 	    EXT_DEF_WWN_NAME_SIZE);
911 	tmp_port.Id[0] = 0;
912 	tmp_port.Id[1] = ha->d_id.b.domain;
913 	tmp_port.Id[2] = ha->d_id.b.area;
914 	tmp_port.Id[3] = ha->d_id.b.al_pa;
915 
916 	/* For now we are initiator only driver */
917 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
918 
919 	if (ha->task_daemon_flags & LOOP_DOWN) {
920 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
921 	} else if (DRIVER_SUSPENDED(ha)) {
922 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
923 	} else {
924 		tmp_port.State = EXT_DEF_HBA_OK;
925 	}
926 
927 	if (ha->flags & POINT_TO_POINT) {
928 		tmp_port.Mode = EXT_DEF_P2P_MODE;
929 	} else {
930 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
931 	}
932 	/*
933 	 * fill in the portspeed values.
934 	 *
935 	 * default to not yet negotiated state
936 	 */
937 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
938 
939 	if (tmp_port.State == EXT_DEF_HBA_OK) {
940 		if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
941 			mr.mb[1] = 0;
942 			mr.mb[2] = 0;
943 			rval = ql_data_rate(ha, &mr);
944 			if (rval != QL_SUCCESS) {
945 				EL(ha, "failed, data_rate=%xh\n", rval);
946 			} else {
947 				switch (mr.mb[1]) {
948 				case IIDMA_RATE_1GB:
949 					tmp_port.PortSpeed =
950 					    EXT_DEF_PORTSPEED_1GBIT;
951 					break;
952 				case IIDMA_RATE_2GB:
953 					tmp_port.PortSpeed =
954 					    EXT_DEF_PORTSPEED_2GBIT;
955 					break;
956 				case IIDMA_RATE_4GB:
957 					tmp_port.PortSpeed =
958 					    EXT_DEF_PORTSPEED_4GBIT;
959 					break;
960 				case IIDMA_RATE_8GB:
961 					tmp_port.PortSpeed =
962 					    EXT_DEF_PORTSPEED_8GBIT;
963 					break;
964 				case IIDMA_RATE_10GB:
965 					tmp_port.PortSpeed =
966 					    EXT_DEF_PORTSPEED_10GBIT;
967 					break;
968 				default:
969 					tmp_port.PortSpeed =
970 					    EXT_DEF_PORTSPEED_UNKNOWN;
971 					EL(ha, "failed, data rate=%xh\n",
972 					    mr.mb[1]);
973 					break;
974 				}
975 			}
976 		} else {
977 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
978 		}
979 	}
980 
981 	/* Report all supported port speeds */
982 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
983 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
984 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
985 		    EXT_DEF_PORTSPEED_1GBIT);
986 		/*
987 		 * Correct supported speeds based on type of
988 		 * sfp that is present
989 		 */
990 		switch (ha->sfp_stat) {
991 		case 1:
992 			/* no sfp detected */
993 			break;
994 		case 2:
995 		case 4:
996 			/* 4GB sfp */
997 			tmp_port.PortSupportedSpeed &=
998 			    ~EXT_DEF_PORTSPEED_8GBIT;
999 			break;
1000 		case 3:
1001 		case 5:
1002 			/* 8GB sfp */
1003 			tmp_port.PortSupportedSpeed &=
1004 			    ~EXT_DEF_PORTSPEED_1GBIT;
1005 			break;
1006 		default:
1007 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1008 			break;
1009 
1010 		}
1011 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
1012 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1013 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1014 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1015 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1016 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
1017 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1018 		    EXT_DEF_PORTSPEED_1GBIT);
1019 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
1020 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1021 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
1022 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1023 	} else {
1024 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1025 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1026 	}
1027 	tmp_port.LinkState2 = LSB(ha->sfp_stat);
1028 	port_cnt = 0;
1029 	tgt_cnt = 0;
1030 
1031 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1032 		for (link = ha->dev[index].first; link != NULL;
1033 		    link = link->next) {
1034 			tq = link->base_address;
1035 
1036 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1037 				continue;
1038 			}
1039 
1040 			port_cnt++;
1041 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1042 				tgt_cnt++;
1043 			}
1044 		}
1045 	}
1046 
1047 	tmp_port.DiscPortCount = port_cnt;
1048 	tmp_port.DiscTargetCount = tgt_cnt;
1049 
1050 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1051 
1052 	rval = ddi_copyout((void *)&tmp_port,
1053 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1054 	    sizeof (EXT_HBA_PORT), mode);
1055 	if (rval != 0) {
1056 		cmd->Status = EXT_STATUS_COPY_ERR;
1057 		cmd->ResponseLen = 0;
1058 		EL(ha, "failed, ddi_copyout\n");
1059 	} else {
1060 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1061 		QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1062 		    ha->instance, port_cnt, tgt_cnt);
1063 	}
1064 }
1065 
1066 /*
1067  * ql_qry_disc_port
1068  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1069  *
1070  * Input:
1071  *	ha:	adapter state pointer.
1072  *	cmd:	EXT_IOCTL cmd struct pointer.
1073  *	mode:	flags.
1074  *
1075  *	cmd->Instance = Port instance in fcport chain.
1076  *
1077  * Returns:
1078  *	None, request status indicated in cmd->Status.
1079  *
1080  * Context:
1081  *	Kernel context.
1082  */
1083 static void
1084 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1085 {
1086 	EXT_DISC_PORT	tmp_port = {0};
1087 	ql_link_t	*link;
1088 	ql_tgt_t	*tq;
1089 	uint16_t	index;
1090 	uint16_t	inst = 0;
1091 
1092 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1093 
1094 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1095 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1096 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1097 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1098 		    cmd->ResponseLen);
1099 		cmd->ResponseLen = 0;
1100 		return;
1101 	}
1102 
1103 	for (link = NULL, index = 0;
1104 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1105 		for (link = ha->dev[index].first; link != NULL;
1106 		    link = link->next) {
1107 			tq = link->base_address;
1108 
1109 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1110 				continue;
1111 			}
1112 			if (inst != cmd->Instance) {
1113 				inst++;
1114 				continue;
1115 			}
1116 
1117 			/* fill in the values */
1118 			bcopy(tq->node_name, tmp_port.WWNN,
1119 			    EXT_DEF_WWN_NAME_SIZE);
1120 			bcopy(tq->port_name, tmp_port.WWPN,
1121 			    EXT_DEF_WWN_NAME_SIZE);
1122 
1123 			break;
1124 		}
1125 	}
1126 
1127 	if (link == NULL) {
1128 		/* no matching device */
1129 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1130 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1131 		cmd->ResponseLen = 0;
1132 		return;
1133 	}
1134 
1135 	tmp_port.Id[0] = 0;
1136 	tmp_port.Id[1] = tq->d_id.b.domain;
1137 	tmp_port.Id[2] = tq->d_id.b.area;
1138 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1139 
1140 	tmp_port.Type = 0;
1141 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1142 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1143 		    EXT_DEF_INITIATOR_DEV);
1144 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1145 		(void) ql_inq_scan(ha, tq, 1);
1146 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1147 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1148 	}
1149 
1150 	if (tq->flags & TQF_FABRIC_DEVICE) {
1151 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1152 	} else {
1153 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1154 	}
1155 
1156 	tmp_port.Status = 0;
1157 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1158 
1159 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1160 
1161 	if (ddi_copyout((void *)&tmp_port,
1162 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1163 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1164 		cmd->Status = EXT_STATUS_COPY_ERR;
1165 		cmd->ResponseLen = 0;
1166 		EL(ha, "failed, ddi_copyout\n");
1167 	} else {
1168 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1169 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1170 	}
1171 }
1172 
1173 /*
1174  * ql_qry_disc_tgt
1175  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1176  *
1177  * Input:
1178  *	ha:		adapter state pointer.
1179  *	cmd:		EXT_IOCTL cmd struct pointer.
1180  *	mode:		flags.
1181  *
1182  *	cmd->Instance = Port instance in fcport chain.
1183  *
1184  * Returns:
1185  *	None, request status indicated in cmd->Status.
1186  *
1187  * Context:
1188  *	Kernel context.
1189  */
1190 static void
1191 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1192 {
1193 	EXT_DISC_TARGET	tmp_tgt = {0};
1194 	ql_link_t	*link;
1195 	ql_tgt_t	*tq;
1196 	uint16_t	index;
1197 	uint16_t	inst = 0;
1198 
1199 	QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1200 	    cmd->Instance);
1201 
1202 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1203 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1204 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1205 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1206 		    cmd->ResponseLen);
1207 		cmd->ResponseLen = 0;
1208 		return;
1209 	}
1210 
1211 	/* Scan port list for requested target and fill in the values */
1212 	for (link = NULL, index = 0;
1213 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1214 		for (link = ha->dev[index].first; link != NULL;
1215 		    link = link->next) {
1216 			tq = link->base_address;
1217 
1218 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1219 			    tq->flags & TQF_INITIATOR_DEVICE) {
1220 				continue;
1221 			}
1222 			if (inst != cmd->Instance) {
1223 				inst++;
1224 				continue;
1225 			}
1226 
1227 			/* fill in the values */
1228 			bcopy(tq->node_name, tmp_tgt.WWNN,
1229 			    EXT_DEF_WWN_NAME_SIZE);
1230 			bcopy(tq->port_name, tmp_tgt.WWPN,
1231 			    EXT_DEF_WWN_NAME_SIZE);
1232 
1233 			break;
1234 		}
1235 	}
1236 
1237 	if (link == NULL) {
1238 		/* no matching device */
1239 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1240 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1241 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1242 		cmd->ResponseLen = 0;
1243 		return;
1244 	}
1245 	tmp_tgt.Id[0] = 0;
1246 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1247 	tmp_tgt.Id[2] = tq->d_id.b.area;
1248 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1249 
1250 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1251 
1252 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1253 		(void) ql_inq_scan(ha, tq, 1);
1254 	}
1255 
1256 	tmp_tgt.Type = 0;
1257 	if (tq->flags & TQF_TAPE_DEVICE) {
1258 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1259 	}
1260 
1261 	if (tq->flags & TQF_FABRIC_DEVICE) {
1262 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1263 	} else {
1264 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1265 	}
1266 
1267 	tmp_tgt.Status = 0;
1268 
1269 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1270 
1271 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1272 
1273 	if (ddi_copyout((void *)&tmp_tgt,
1274 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1275 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1276 		cmd->Status = EXT_STATUS_COPY_ERR;
1277 		cmd->ResponseLen = 0;
1278 		EL(ha, "failed, ddi_copyout\n");
1279 	} else {
1280 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1281 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1282 	}
1283 }
1284 
1285 /*
1286  * ql_qry_fw
1287  *	Performs EXT_SC_QUERY_FW subfunction.
1288  *
1289  * Input:
1290  *	ha:	adapter state pointer.
1291  *	cmd:	EXT_IOCTL cmd struct pointer.
1292  *	mode:	flags.
1293  *
1294  * Returns:
1295  *	None, request status indicated in cmd->Status.
1296  *
1297  * Context:
1298  *	Kernel context.
1299  */
1300 static void
1301 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1302 {
1303 	ql_mbx_data_t	mr;
1304 	EXT_FW		fw_info = {0};
1305 
1306 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1307 
1308 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1309 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1310 		cmd->DetailStatus = sizeof (EXT_FW);
1311 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1312 		    cmd->ResponseLen);
1313 		cmd->ResponseLen = 0;
1314 		return;
1315 	}
1316 
1317 	(void) ql_get_fw_version(ha, &mr);
1318 
1319 	(void) sprintf((char *)(fw_info.Version), "%d.%d.%d", mr.mb[1],
1320 	    mr.mb[2], mr.mb[2]);
1321 
1322 	fw_info.Attrib = mr.mb[6];
1323 
1324 	if (ddi_copyout((void *)&fw_info,
1325 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1326 	    sizeof (EXT_FW), mode) != 0) {
1327 		cmd->Status = EXT_STATUS_COPY_ERR;
1328 		cmd->ResponseLen = 0;
1329 		EL(ha, "failed, ddi_copyout\n");
1330 		return;
1331 	} else {
1332 		cmd->ResponseLen = sizeof (EXT_FW);
1333 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1334 	}
1335 }
1336 
1337 /*
1338  * ql_qry_chip
1339  *	Performs EXT_SC_QUERY_CHIP subfunction.
1340  *
1341  * Input:
1342  *	ha:	adapter state pointer.
1343  *	cmd:	EXT_IOCTL cmd struct pointer.
1344  *	mode:	flags.
1345  *
1346  * Returns:
1347  *	None, request status indicated in cmd->Status.
1348  *
1349  * Context:
1350  *	Kernel context.
1351  */
1352 static void
1353 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1354 {
1355 	EXT_CHIP	chip = {0};
1356 
1357 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1358 
1359 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1360 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1361 		cmd->DetailStatus = sizeof (EXT_CHIP);
1362 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1363 		    cmd->ResponseLen);
1364 		cmd->ResponseLen = 0;
1365 		return;
1366 	}
1367 
1368 	chip.VendorId = ha->ven_id;
1369 	chip.DeviceId = ha->device_id;
1370 	chip.SubVendorId = ha->subven_id;
1371 	chip.SubSystemId = ha->subsys_id;
1372 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1373 	chip.IoAddrLen = 0x100;
1374 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1375 	chip.MemAddrLen = 0x100;
1376 	chip.ChipRevID = ha->rev_id;
1377 	if (ha->flags & FUNCTION_1) {
1378 		chip.FuncNo = 1;
1379 	}
1380 
1381 	if (ddi_copyout((void *)&chip,
1382 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1383 	    sizeof (EXT_CHIP), mode) != 0) {
1384 		cmd->Status = EXT_STATUS_COPY_ERR;
1385 		cmd->ResponseLen = 0;
1386 		EL(ha, "failed, ddi_copyout\n");
1387 	} else {
1388 		cmd->ResponseLen = sizeof (EXT_CHIP);
1389 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1390 	}
1391 }
1392 
1393 /*
1394  * ql_qry_driver
1395  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1396  *
1397  * Input:
1398  *	ha:	adapter state pointer.
1399  *	cmd:	EXT_IOCTL cmd struct pointer.
1400  *	mode:	flags.
1401  *
1402  * Returns:
1403  *	None, request status indicated in cmd->Status.
1404  *
1405  * Context:
1406  *	Kernel context.
1407  */
1408 static void
1409 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1410 {
1411 	EXT_DRIVER	qd = {0};
1412 
1413 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1414 
1415 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1416 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1417 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1418 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1419 		    cmd->ResponseLen);
1420 		cmd->ResponseLen = 0;
1421 		return;
1422 	}
1423 
1424 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1425 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1426 	qd.TargetsPerBus = (uint16_t)
1427 	    (CFG_IST(ha, (CFG_CTRL_242581 | CFG_EXT_FW_INTERFACE)) ?
1428 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1429 	qd.LunsPerTarget = 2030;
1430 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1431 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1432 
1433 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1434 	    sizeof (EXT_DRIVER), mode) != 0) {
1435 		cmd->Status = EXT_STATUS_COPY_ERR;
1436 		cmd->ResponseLen = 0;
1437 		EL(ha, "failed, ddi_copyout\n");
1438 	} else {
1439 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1440 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1441 	}
1442 }
1443 
1444 /*
1445  * ql_fcct
1446  *	IOCTL management server FC-CT passthrough.
1447  *
1448  * Input:
1449  *	ha:	adapter state pointer.
1450  *	cmd:	User space CT arguments pointer.
1451  *	mode:	flags.
1452  *
1453  * Returns:
1454  *	None, request status indicated in cmd->Status.
1455  *
1456  * Context:
1457  *	Kernel context.
1458  */
1459 static void
1460 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1461 {
1462 	ql_mbx_iocb_t		*pkt;
1463 	ql_mbx_data_t		mr;
1464 	dma_mem_t		*dma_mem;
1465 	caddr_t			pld;
1466 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1467 	int			rval;
1468 	ql_ct_iu_preamble_t	*ct;
1469 	ql_xioctl_t		*xp = ha->xioctl;
1470 	ql_tgt_t		tq;
1471 	uint16_t		comp_status, loop_id;
1472 
1473 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1474 
1475 	/* Get CT argument structure. */
1476 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1477 		EL(ha, "failed, No switch\n");
1478 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1479 		cmd->ResponseLen = 0;
1480 		return;
1481 	}
1482 
1483 	if (DRIVER_SUSPENDED(ha)) {
1484 		EL(ha, "failed, LOOP_NOT_READY\n");
1485 		cmd->Status = EXT_STATUS_BUSY;
1486 		cmd->ResponseLen = 0;
1487 		return;
1488 	}
1489 
1490 	/* Login management server device. */
1491 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1492 		tq.d_id.b.al_pa = 0xfa;
1493 		tq.d_id.b.area = 0xff;
1494 		tq.d_id.b.domain = 0xff;
1495 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1496 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1497 		    MANAGEMENT_SERVER_LOOP_ID);
1498 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1499 		if (rval != QL_SUCCESS) {
1500 			EL(ha, "failed, server login\n");
1501 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1502 			cmd->ResponseLen = 0;
1503 			return;
1504 		} else {
1505 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1506 		}
1507 	}
1508 
1509 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1510 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1511 
1512 	/* Allocate a DMA Memory Descriptor */
1513 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1514 	if (dma_mem == NULL) {
1515 		EL(ha, "failed, kmem_zalloc\n");
1516 		cmd->Status = EXT_STATUS_NO_MEMORY;
1517 		cmd->ResponseLen = 0;
1518 		return;
1519 	}
1520 	/* Determine maximum buffer size. */
1521 	if (cmd->RequestLen < cmd->ResponseLen) {
1522 		pld_byte_cnt = cmd->ResponseLen;
1523 	} else {
1524 		pld_byte_cnt = cmd->RequestLen;
1525 	}
1526 
1527 	/* Allocate command block. */
1528 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1529 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1530 	if (pkt == NULL) {
1531 		EL(ha, "failed, kmem_zalloc\n");
1532 		cmd->Status = EXT_STATUS_NO_MEMORY;
1533 		cmd->ResponseLen = 0;
1534 		return;
1535 	}
1536 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1537 
1538 	/* Get command payload data. */
1539 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1540 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1541 		EL(ha, "failed, get_buffer_data\n");
1542 		kmem_free(pkt, pkt_size);
1543 		cmd->Status = EXT_STATUS_COPY_ERR;
1544 		cmd->ResponseLen = 0;
1545 		return;
1546 	}
1547 
1548 	/* Get DMA memory for the IOCB */
1549 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1550 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1551 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1552 		    "alloc failed", QL_NAME, ha->instance);
1553 		kmem_free(pkt, pkt_size);
1554 		kmem_free(dma_mem, sizeof (dma_mem_t));
1555 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1556 		cmd->ResponseLen = 0;
1557 		return;
1558 	}
1559 
1560 	/* Copy out going payload data to IOCB DMA buffer. */
1561 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1562 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1563 
1564 	/* Sync IOCB DMA buffer. */
1565 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1566 	    DDI_DMA_SYNC_FORDEV);
1567 
1568 	/*
1569 	 * Setup IOCB
1570 	 */
1571 	ct = (ql_ct_iu_preamble_t *)pld;
1572 	if (CFG_IST(ha, CFG_CTRL_242581)) {
1573 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1574 		pkt->ms24.entry_count = 1;
1575 
1576 		/* Set loop ID */
1577 		pkt->ms24.n_port_hdl = (uint16_t)
1578 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1579 		    LE_16(SNS_24XX_HDL) :
1580 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1581 
1582 		/* Set ISP command timeout. */
1583 		pkt->ms24.timeout = LE_16(120);
1584 
1585 		/* Set cmd/response data segment counts. */
1586 		pkt->ms24.cmd_dseg_count = LE_16(1);
1587 		pkt->ms24.resp_dseg_count = LE_16(1);
1588 
1589 		/* Load ct cmd byte count. */
1590 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1591 
1592 		/* Load ct rsp byte count. */
1593 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1594 
1595 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1596 
1597 		/* Load MS command entry data segments. */
1598 		*long_ptr++ = (uint32_t)
1599 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1600 		*long_ptr++ = (uint32_t)
1601 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1602 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1603 
1604 		/* Load MS response entry data segments. */
1605 		*long_ptr++ = (uint32_t)
1606 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1607 		*long_ptr++ = (uint32_t)
1608 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1609 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1610 
1611 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1612 		    sizeof (ql_mbx_iocb_t));
1613 
1614 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1615 		if (comp_status == CS_DATA_UNDERRUN) {
1616 			if ((BE_16(ct->max_residual_size)) == 0) {
1617 				comp_status = CS_COMPLETE;
1618 			}
1619 		}
1620 
1621 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1622 		    0) {
1623 			EL(ha, "failed, I/O timeout or "
1624 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1625 			    pkt->sts24.entry_status,
1626 			    pkt->sts24.scsi_status_l, rval);
1627 			kmem_free(pkt, pkt_size);
1628 			ql_free_dma_resource(ha, dma_mem);
1629 			kmem_free(dma_mem, sizeof (dma_mem_t));
1630 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1631 			cmd->ResponseLen = 0;
1632 			return;
1633 		}
1634 	} else {
1635 		pkt->ms.entry_type = MS_TYPE;
1636 		pkt->ms.entry_count = 1;
1637 
1638 		/* Set loop ID */
1639 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1640 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1641 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1642 			pkt->ms.loop_id_l = LSB(loop_id);
1643 			pkt->ms.loop_id_h = MSB(loop_id);
1644 		} else {
1645 			pkt->ms.loop_id_h = LSB(loop_id);
1646 		}
1647 
1648 		/* Set ISP command timeout. */
1649 		pkt->ms.timeout = LE_16(120);
1650 
1651 		/* Set data segment counts. */
1652 		pkt->ms.cmd_dseg_count_l = 1;
1653 		pkt->ms.total_dseg_count = LE_16(2);
1654 
1655 		/* Response total byte count. */
1656 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1657 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1658 
1659 		/* Command total byte count. */
1660 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1661 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1662 
1663 		/* Load command/response data segments. */
1664 		pkt->ms.dseg_0_address[0] = (uint32_t)
1665 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1666 		pkt->ms.dseg_0_address[1] = (uint32_t)
1667 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1668 		pkt->ms.dseg_1_address[0] = (uint32_t)
1669 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1670 		pkt->ms.dseg_1_address[1] = (uint32_t)
1671 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1672 
1673 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1674 		    sizeof (ql_mbx_iocb_t));
1675 
1676 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1677 		if (comp_status == CS_DATA_UNDERRUN) {
1678 			if ((BE_16(ct->max_residual_size)) == 0) {
1679 				comp_status = CS_COMPLETE;
1680 			}
1681 		}
1682 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1683 			EL(ha, "failed, I/O timeout or "
1684 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1685 			kmem_free(pkt, pkt_size);
1686 			ql_free_dma_resource(ha, dma_mem);
1687 			kmem_free(dma_mem, sizeof (dma_mem_t));
1688 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1689 			cmd->ResponseLen = 0;
1690 			return;
1691 		}
1692 	}
1693 
1694 	/* Sync in coming DMA buffer. */
1695 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1696 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1697 	/* Copy in coming DMA data. */
1698 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1699 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1700 	    DDI_DEV_AUTOINCR);
1701 
1702 	/* Copy response payload from DMA buffer to application. */
1703 	if (cmd->ResponseLen != 0) {
1704 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1705 		    cmd->ResponseLen);
1706 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1707 
1708 		/* Send response payload. */
1709 		if (ql_send_buffer_data(pld,
1710 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1711 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1712 			EL(ha, "failed, send_buffer_data\n");
1713 			cmd->Status = EXT_STATUS_COPY_ERR;
1714 			cmd->ResponseLen = 0;
1715 		}
1716 	}
1717 
1718 	kmem_free(pkt, pkt_size);
1719 	ql_free_dma_resource(ha, dma_mem);
1720 	kmem_free(dma_mem, sizeof (dma_mem_t));
1721 
1722 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1723 }
1724 
1725 /*
1726  * ql_aen_reg
1727  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1728  *
1729  * Input:
1730  *	ha:	adapter state pointer.
1731  *	cmd:	EXT_IOCTL cmd struct pointer.
1732  *	mode:	flags.
1733  *
1734  * Returns:
1735  *	None, request status indicated in cmd->Status.
1736  *
1737  * Context:
1738  *	Kernel context.
1739  */
1740 static void
1741 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1742 {
1743 	EXT_REG_AEN	reg_struct;
1744 	int		rval = 0;
1745 	ql_xioctl_t	*xp = ha->xioctl;
1746 
1747 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1748 
1749 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &reg_struct,
1750 	    cmd->RequestLen, mode);
1751 
1752 	if (rval == 0) {
1753 		if (reg_struct.Enable) {
1754 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1755 		} else {
1756 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1757 			/* Empty the queue. */
1758 			INTR_LOCK(ha);
1759 			xp->aen_q_head = 0;
1760 			xp->aen_q_tail = 0;
1761 			INTR_UNLOCK(ha);
1762 		}
1763 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1764 	} else {
1765 		cmd->Status = EXT_STATUS_COPY_ERR;
1766 		EL(ha, "failed, ddi_copyin\n");
1767 	}
1768 }
1769 
1770 /*
1771  * ql_aen_get
1772  *	IOCTL management server Asynchronous Event Record Transfer.
1773  *
1774  * Input:
1775  *	ha:	adapter state pointer.
1776  *	cmd:	EXT_IOCTL cmd struct pointer.
1777  *	mode:	flags.
1778  *
1779  * Returns:
1780  *	None, request status indicated in cmd->Status.
1781  *
1782  * Context:
1783  *	Kernel context.
1784  */
1785 static void
1786 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1787 {
1788 	uint32_t	out_size;
1789 	EXT_ASYNC_EVENT	*tmp_q;
1790 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1791 	uint8_t		i;
1792 	uint8_t		queue_cnt;
1793 	uint8_t		request_cnt;
1794 	ql_xioctl_t	*xp = ha->xioctl;
1795 
1796 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1797 
1798 	/* Compute the number of events that can be returned */
1799 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1800 
1801 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1802 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1803 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1804 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1805 		    "Len=%xh\n", request_cnt);
1806 		cmd->ResponseLen = 0;
1807 		return;
1808 	}
1809 
1810 	/* 1st: Make a local copy of the entire queue content. */
1811 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1812 	queue_cnt = 0;
1813 
1814 	INTR_LOCK(ha);
1815 	i = xp->aen_q_head;
1816 
1817 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1818 		if (tmp_q[i].AsyncEventCode != 0) {
1819 			bcopy(&tmp_q[i], &aen[queue_cnt],
1820 			    sizeof (EXT_ASYNC_EVENT));
1821 			queue_cnt++;
1822 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1823 		}
1824 		if (i == xp->aen_q_tail) {
1825 			/* done. */
1826 			break;
1827 		}
1828 		i++;
1829 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1830 			i = 0;
1831 		}
1832 	}
1833 
1834 	/* Empty the queue. */
1835 	xp->aen_q_head = 0;
1836 	xp->aen_q_tail = 0;
1837 
1838 	INTR_UNLOCK(ha);
1839 
1840 	/* 2nd: Now transfer the queue content to user buffer */
1841 	/* Copy the entire queue to user's buffer. */
1842 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1843 	if (queue_cnt == 0) {
1844 		cmd->ResponseLen = 0;
1845 	} else if (ddi_copyout((void *)&aen[0],
1846 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1847 	    out_size, mode) != 0) {
1848 		cmd->Status = EXT_STATUS_COPY_ERR;
1849 		cmd->ResponseLen = 0;
1850 		EL(ha, "failed, ddi_copyout\n");
1851 	} else {
1852 		cmd->ResponseLen = out_size;
1853 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1854 	}
1855 }
1856 
1857 /*
1858  * ql_enqueue_aen
1859  *
1860  * Input:
1861  *	ha:		adapter state pointer.
1862  *	event_code:	async event code of the event to add to queue.
1863  *	payload:	event payload for the queue.
1864  *	INTR_LOCK must be already obtained.
1865  *
1866  * Context:
1867  *	Interrupt or Kernel context, no mailbox commands allowed.
1868  */
1869 void
1870 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1871 {
1872 	uint8_t			new_entry;	/* index to current entry */
1873 	uint16_t		*mbx;
1874 	EXT_ASYNC_EVENT		*aen_queue;
1875 	ql_xioctl_t		*xp = ha->xioctl;
1876 
1877 	QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1878 	    event_code);
1879 
1880 	if (xp == NULL) {
1881 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1882 		return;
1883 	}
1884 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1885 
1886 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1887 		/* Need to change queue pointers to make room. */
1888 
1889 		/* Increment tail for adding new entry. */
1890 		xp->aen_q_tail++;
1891 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1892 			xp->aen_q_tail = 0;
1893 		}
1894 		if (xp->aen_q_head == xp->aen_q_tail) {
1895 			/*
1896 			 * We're overwriting the oldest entry, so need to
1897 			 * update the head pointer.
1898 			 */
1899 			xp->aen_q_head++;
1900 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1901 				xp->aen_q_head = 0;
1902 			}
1903 		}
1904 	}
1905 
1906 	new_entry = xp->aen_q_tail;
1907 	aen_queue[new_entry].AsyncEventCode = event_code;
1908 
1909 	/* Update payload */
1910 	if (payload != NULL) {
1911 		switch (event_code) {
1912 		case MBA_LIP_OCCURRED:
1913 		case MBA_LOOP_UP:
1914 		case MBA_LOOP_DOWN:
1915 		case MBA_LIP_F8:
1916 		case MBA_LIP_RESET:
1917 		case MBA_PORT_UPDATE:
1918 			break;
1919 		case MBA_RSCN_UPDATE:
1920 			mbx = (uint16_t *)payload;
1921 			/* al_pa */
1922 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1923 			    LSB(mbx[2]);
1924 			/* area */
1925 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1926 			    MSB(mbx[2]);
1927 			/* domain */
1928 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1929 			    LSB(mbx[1]);
1930 			/* save in big endian */
1931 			BIG_ENDIAN_24(&aen_queue[new_entry].
1932 			    Payload.RSCN.RSCNInfo[0]);
1933 
1934 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1935 			    MSB(mbx[1]);
1936 
1937 			break;
1938 		default:
1939 			/* Not supported */
1940 			EL(ha, "failed, event code not supported=%xh\n",
1941 			    event_code);
1942 			aen_queue[new_entry].AsyncEventCode = 0;
1943 			break;
1944 		}
1945 	}
1946 
1947 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1948 }
1949 
1950 /*
1951  * ql_scsi_passthru
1952  *	IOCTL SCSI passthrough.
1953  *
1954  * Input:
1955  *	ha:	adapter state pointer.
1956  *	cmd:	User space SCSI command pointer.
1957  *	mode:	flags.
1958  *
1959  * Returns:
1960  *	None, request status indicated in cmd->Status.
1961  *
1962  * Context:
1963  *	Kernel context.
1964  */
1965 static void
1966 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1967 {
1968 	ql_mbx_iocb_t		*pkt;
1969 	ql_mbx_data_t		mr;
1970 	dma_mem_t		*dma_mem;
1971 	caddr_t			pld;
1972 	uint32_t		pkt_size, pld_size;
1973 	uint16_t		qlnt, retries, cnt, cnt2;
1974 	uint8_t			*name;
1975 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1976 	EXT_SCSI_PASSTHRU	*usp_req;
1977 	int			rval;
1978 	union _passthru {
1979 		EXT_SCSI_PASSTHRU	sp_cmd;
1980 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
1981 	} pt_req;		/* Passthru request */
1982 	uint32_t		status, sense_sz = 0;
1983 	ql_tgt_t		*tq = NULL;
1984 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
1985 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
1986 
1987 	/* SCSI request struct for SCSI passthrough IOs. */
1988 	struct {
1989 		uint16_t	lun;
1990 		uint16_t	sense_length;	/* Sense buffer size */
1991 		size_t		resid;		/* Residual */
1992 		uint8_t		*cdbp;		/* Requestor's CDB */
1993 		uint8_t		*u_sense;	/* Requestor's sense buffer */
1994 		uint8_t		cdb_len;	/* Requestor's CDB length */
1995 		uint8_t		direction;
1996 	} scsi_req;
1997 
1998 	struct {
1999 		uint8_t		*rsp_info;
2000 		uint8_t		*req_sense_data;
2001 		uint32_t	residual_length;
2002 		uint32_t	rsp_info_length;
2003 		uint32_t	req_sense_length;
2004 		uint16_t	comp_status;
2005 		uint8_t		state_flags_l;
2006 		uint8_t		state_flags_h;
2007 		uint8_t		scsi_status_l;
2008 		uint8_t		scsi_status_h;
2009 	} sts;
2010 
2011 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2012 
2013 	/* Verify Sub Code and set cnt to needed request size. */
2014 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2015 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
2016 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2017 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2018 	} else {
2019 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2020 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2021 		cmd->ResponseLen = 0;
2022 		return;
2023 	}
2024 
2025 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2026 	if (dma_mem == NULL) {
2027 		EL(ha, "failed, kmem_zalloc\n");
2028 		cmd->Status = EXT_STATUS_NO_MEMORY;
2029 		cmd->ResponseLen = 0;
2030 		return;
2031 	}
2032 	/*  Verify the size of and copy in the passthru request structure. */
2033 	if (cmd->RequestLen != pld_size) {
2034 		/* Return error */
2035 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2036 		    cmd->RequestLen, pld_size);
2037 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2038 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2039 		cmd->ResponseLen = 0;
2040 		return;
2041 	}
2042 
2043 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2044 	    pld_size, mode) != 0) {
2045 		EL(ha, "failed, ddi_copyin\n");
2046 		cmd->Status = EXT_STATUS_COPY_ERR;
2047 		cmd->ResponseLen = 0;
2048 		return;
2049 	}
2050 
2051 	/*
2052 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2053 	 * request data structure.
2054 	 */
2055 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2056 		scsi_req.lun = sp_req->TargetAddr.Lun;
2057 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2058 		scsi_req.cdbp = &sp_req->Cdb[0];
2059 		scsi_req.cdb_len = sp_req->CdbLength;
2060 		scsi_req.direction = sp_req->Direction;
2061 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2062 		scsi_req.u_sense = &usp_req->SenseData[0];
2063 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2064 
2065 		qlnt = QLNT_PORT;
2066 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2067 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2068 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2069 		tq = ql_find_port(ha, name, qlnt);
2070 	} else {
2071 		/*
2072 		 * Must be FC PASSTHRU, verified above.
2073 		 */
2074 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2075 			qlnt = QLNT_PORT;
2076 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2077 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2078 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2079 			    ha->instance, cmd->SubCode, name[0], name[1],
2080 			    name[2], name[3], name[4], name[5], name[6],
2081 			    name[7]);
2082 			tq = ql_find_port(ha, name, qlnt);
2083 		} else if (fc_req->FCScsiAddr.DestType ==
2084 		    EXT_DEF_DESTTYPE_WWNN) {
2085 			qlnt = QLNT_NODE;
2086 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2087 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2088 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2089 			    ha->instance, cmd->SubCode, name[0], name[1],
2090 			    name[2], name[3], name[4], name[5], name[6],
2091 			    name[7]);
2092 			tq = ql_find_port(ha, name, qlnt);
2093 		} else if (fc_req->FCScsiAddr.DestType ==
2094 		    EXT_DEF_DESTTYPE_PORTID) {
2095 			qlnt = QLNT_PID;
2096 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2097 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2098 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2099 			    name[0], name[1], name[2]);
2100 			tq = ql_find_port(ha, name, qlnt);
2101 		} else {
2102 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2103 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2104 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2105 			cmd->ResponseLen = 0;
2106 			return;
2107 		}
2108 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2109 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2110 		scsi_req.cdbp = &sp_req->Cdb[0];
2111 		scsi_req.cdb_len = sp_req->CdbLength;
2112 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2113 		scsi_req.u_sense = &ufc_req->SenseData[0];
2114 		scsi_req.direction = fc_req->Direction;
2115 	}
2116 
2117 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2118 		EL(ha, "failed, fc_port not found\n");
2119 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2120 		cmd->ResponseLen = 0;
2121 		return;
2122 	}
2123 
2124 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2125 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2126 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2127 		cmd->ResponseLen = 0;
2128 		return;
2129 	}
2130 
2131 	/* Allocate command block. */
2132 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2133 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2134 	    cmd->ResponseLen) {
2135 		pld_size = cmd->ResponseLen;
2136 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2137 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2138 		if (pkt == NULL) {
2139 			EL(ha, "failed, kmem_zalloc\n");
2140 			cmd->Status = EXT_STATUS_NO_MEMORY;
2141 			cmd->ResponseLen = 0;
2142 			return;
2143 		}
2144 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2145 
2146 		/* Get DMA memory for the IOCB */
2147 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2148 		    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2149 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2150 			    "alloc failed", QL_NAME, ha->instance);
2151 			kmem_free(pkt, pkt_size);
2152 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2153 			cmd->ResponseLen = 0;
2154 			return;
2155 		}
2156 
2157 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2158 			scsi_req.direction = (uint8_t)
2159 			    (CFG_IST(ha, CFG_CTRL_242581) ?
2160 			    CF_RD : CF_DATA_IN | CF_STAG);
2161 		} else {
2162 			scsi_req.direction = (uint8_t)
2163 			    (CFG_IST(ha, CFG_CTRL_242581) ?
2164 			    CF_WR : CF_DATA_OUT | CF_STAG);
2165 			cmd->ResponseLen = 0;
2166 
2167 			/* Get command payload. */
2168 			if (ql_get_buffer_data(
2169 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2170 			    pld, pld_size, mode) != pld_size) {
2171 				EL(ha, "failed, get_buffer_data\n");
2172 				cmd->Status = EXT_STATUS_COPY_ERR;
2173 
2174 				kmem_free(pkt, pkt_size);
2175 				ql_free_dma_resource(ha, dma_mem);
2176 				kmem_free(dma_mem, sizeof (dma_mem_t));
2177 				return;
2178 			}
2179 
2180 			/* Copy out going data to DMA buffer. */
2181 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2182 			    (uint8_t *)dma_mem->bp, pld_size,
2183 			    DDI_DEV_AUTOINCR);
2184 
2185 			/* Sync DMA buffer. */
2186 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2187 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2188 		}
2189 	} else {
2190 		scsi_req.direction = (uint8_t)
2191 		    (CFG_IST(ha, CFG_CTRL_242581) ? 0 : CF_STAG);
2192 		cmd->ResponseLen = 0;
2193 
2194 		pkt_size = sizeof (ql_mbx_iocb_t);
2195 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2196 		if (pkt == NULL) {
2197 			EL(ha, "failed, kmem_zalloc-2\n");
2198 			cmd->Status = EXT_STATUS_NO_MEMORY;
2199 			return;
2200 		}
2201 		pld = NULL;
2202 		pld_size = 0;
2203 	}
2204 
2205 	/* retries = ha->port_down_retry_count; */
2206 	retries = 1;
2207 	cmd->Status = EXT_STATUS_OK;
2208 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2209 
2210 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2211 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2212 
2213 	do {
2214 		if (DRIVER_SUSPENDED(ha)) {
2215 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2216 			break;
2217 		}
2218 
2219 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2220 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2221 			pkt->cmd24.entry_count = 1;
2222 
2223 			/* Set LUN number */
2224 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2225 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2226 
2227 			/* Set N_port handle */
2228 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2229 
2230 			/* Set VP Index */
2231 			pkt->cmd24.vp_index = ha->vp_index;
2232 
2233 			/* Set target ID */
2234 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2235 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2236 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2237 
2238 			/* Set ISP command timeout. */
2239 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2240 
2241 			/* Load SCSI CDB */
2242 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2243 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2244 			    DDI_DEV_AUTOINCR);
2245 			for (cnt = 0; cnt < MAX_CMDSZ;
2246 			    cnt = (uint16_t)(cnt + 4)) {
2247 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2248 				    + cnt, 4);
2249 			}
2250 
2251 			/* Set tag queue control flags */
2252 			pkt->cmd24.task = TA_STAG;
2253 
2254 			if (pld_size) {
2255 				/* Set transfer direction. */
2256 				pkt->cmd24.control_flags = scsi_req.direction;
2257 
2258 				/* Set data segment count. */
2259 				pkt->cmd24.dseg_count = LE_16(1);
2260 
2261 				/* Load total byte count. */
2262 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2263 
2264 				/* Load data descriptor. */
2265 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2266 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2267 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2268 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2269 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2270 			}
2271 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2272 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2273 			pkt->cmd3.entry_count = 1;
2274 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2275 				pkt->cmd3.target_l = LSB(tq->loop_id);
2276 				pkt->cmd3.target_h = MSB(tq->loop_id);
2277 			} else {
2278 				pkt->cmd3.target_h = LSB(tq->loop_id);
2279 			}
2280 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2281 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2282 			pkt->cmd3.control_flags_l = scsi_req.direction;
2283 			pkt->cmd3.timeout = LE_16(15);
2284 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2285 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2286 			}
2287 			if (pld_size) {
2288 				pkt->cmd3.dseg_count = LE_16(1);
2289 				pkt->cmd3.byte_count = LE_32(pld_size);
2290 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2291 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2292 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2293 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2294 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2295 			}
2296 		} else {
2297 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2298 			pkt->cmd.entry_count = 1;
2299 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2300 				pkt->cmd.target_l = LSB(tq->loop_id);
2301 				pkt->cmd.target_h = MSB(tq->loop_id);
2302 			} else {
2303 				pkt->cmd.target_h = LSB(tq->loop_id);
2304 			}
2305 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2306 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2307 			pkt->cmd.control_flags_l = scsi_req.direction;
2308 			pkt->cmd.timeout = LE_16(15);
2309 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2310 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2311 			}
2312 			if (pld_size) {
2313 				pkt->cmd.dseg_count = LE_16(1);
2314 				pkt->cmd.byte_count = LE_32(pld_size);
2315 				pkt->cmd.dseg_0_address = (uint32_t)
2316 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2317 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2318 			}
2319 		}
2320 		/* Go issue command and wait for completion. */
2321 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2322 		QL_DUMP_9(pkt, 8, pkt_size);
2323 
2324 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2325 
2326 		if (pld_size) {
2327 			/* Sync in coming DMA buffer. */
2328 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2329 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2330 			/* Copy in coming DMA data. */
2331 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2332 			    (uint8_t *)dma_mem->bp, pld_size,
2333 			    DDI_DEV_AUTOINCR);
2334 		}
2335 
2336 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2337 			pkt->sts24.entry_status = (uint8_t)
2338 			    (pkt->sts24.entry_status & 0x3c);
2339 		} else {
2340 			pkt->sts.entry_status = (uint8_t)
2341 			    (pkt->sts.entry_status & 0x7e);
2342 		}
2343 
2344 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2345 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2346 			    pkt->sts.entry_status, tq->d_id.b24);
2347 			status = QL_FUNCTION_PARAMETER_ERROR;
2348 		}
2349 
2350 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2351 		    LE_16(pkt->sts24.comp_status) :
2352 		    LE_16(pkt->sts.comp_status));
2353 
2354 		/*
2355 		 * We have verified about all the request that can be so far.
2356 		 * Now we need to start verification of our ability to
2357 		 * actually issue the CDB.
2358 		 */
2359 		if (DRIVER_SUSPENDED(ha)) {
2360 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2361 			break;
2362 		} else if (status == QL_SUCCESS &&
2363 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2364 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2365 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2366 			if (tq->flags & TQF_FABRIC_DEVICE) {
2367 				rval = ql_login_fport(ha, tq, tq->loop_id,
2368 				    LFF_NO_PLOGI, &mr);
2369 				if (rval != QL_SUCCESS) {
2370 					EL(ha, "failed, login_fport=%xh, "
2371 					    "d_id=%xh\n", rval, tq->d_id.b24);
2372 				}
2373 			} else {
2374 				rval = ql_login_lport(ha, tq, tq->loop_id,
2375 				    LLF_NONE);
2376 				if (rval != QL_SUCCESS) {
2377 					EL(ha, "failed, login_lport=%xh, "
2378 					    "d_id=%xh\n", rval, tq->d_id.b24);
2379 				}
2380 			}
2381 		} else {
2382 			break;
2383 		}
2384 
2385 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2386 
2387 	} while (retries--);
2388 
2389 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2390 		/* Cannot issue command now, maybe later */
2391 		EL(ha, "failed, suspended\n");
2392 		kmem_free(pkt, pkt_size);
2393 		ql_free_dma_resource(ha, dma_mem);
2394 		kmem_free(dma_mem, sizeof (dma_mem_t));
2395 		cmd->Status = EXT_STATUS_SUSPENDED;
2396 		cmd->ResponseLen = 0;
2397 		return;
2398 	}
2399 
2400 	if (status != QL_SUCCESS) {
2401 		/* Command error */
2402 		EL(ha, "failed, I/O\n");
2403 		kmem_free(pkt, pkt_size);
2404 		ql_free_dma_resource(ha, dma_mem);
2405 		kmem_free(dma_mem, sizeof (dma_mem_t));
2406 		cmd->Status = EXT_STATUS_ERR;
2407 		cmd->DetailStatus = status;
2408 		cmd->ResponseLen = 0;
2409 		return;
2410 	}
2411 
2412 	/* Setup status. */
2413 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2414 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2415 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2416 
2417 		/* Setup residuals. */
2418 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2419 
2420 		/* Setup state flags. */
2421 		sts.state_flags_l = pkt->sts24.state_flags_l;
2422 		sts.state_flags_h = pkt->sts24.state_flags_h;
2423 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2424 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2425 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2426 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2427 		} else {
2428 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2429 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2430 			    SF_GOT_STATUS);
2431 		}
2432 		if (scsi_req.direction & CF_WR) {
2433 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2434 			    SF_DATA_OUT);
2435 		} else if (scsi_req.direction & CF_RD) {
2436 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2437 			    SF_DATA_IN);
2438 		}
2439 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2440 
2441 		/* Setup FCP response info. */
2442 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2443 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2444 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2445 		for (cnt = 0; cnt < sts.rsp_info_length;
2446 		    cnt = (uint16_t)(cnt + 4)) {
2447 			ql_chg_endian(sts.rsp_info + cnt, 4);
2448 		}
2449 
2450 		/* Setup sense data. */
2451 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2452 			sts.req_sense_length =
2453 			    LE_32(pkt->sts24.fcp_sense_length);
2454 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2455 			    SF_ARQ_DONE);
2456 		} else {
2457 			sts.req_sense_length = 0;
2458 		}
2459 		sts.req_sense_data =
2460 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2461 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2462 		    (uintptr_t)sts.req_sense_data);
2463 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2464 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2465 		}
2466 	} else {
2467 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2468 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2469 
2470 		/* Setup residuals. */
2471 		sts.residual_length = LE_32(pkt->sts.residual_length);
2472 
2473 		/* Setup state flags. */
2474 		sts.state_flags_l = pkt->sts.state_flags_l;
2475 		sts.state_flags_h = pkt->sts.state_flags_h;
2476 
2477 		/* Setup FCP response info. */
2478 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2479 		    LE_16(pkt->sts.rsp_info_length) : 0;
2480 		sts.rsp_info = &pkt->sts.rsp_info[0];
2481 
2482 		/* Setup sense data. */
2483 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2484 		    LE_16(pkt->sts.req_sense_length) : 0;
2485 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2486 	}
2487 
2488 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2489 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2490 
2491 	switch (sts.comp_status) {
2492 	case CS_INCOMPLETE:
2493 	case CS_ABORTED:
2494 	case CS_DEVICE_UNAVAILABLE:
2495 	case CS_PORT_UNAVAILABLE:
2496 	case CS_PORT_LOGGED_OUT:
2497 	case CS_PORT_CONFIG_CHG:
2498 	case CS_PORT_BUSY:
2499 	case CS_LOOP_DOWN_ABORT:
2500 		cmd->Status = EXT_STATUS_BUSY;
2501 		break;
2502 	case CS_RESET:
2503 	case CS_QUEUE_FULL:
2504 		cmd->Status = EXT_STATUS_ERR;
2505 		break;
2506 	case CS_TIMEOUT:
2507 		cmd->Status = EXT_STATUS_ERR;
2508 		break;
2509 	case CS_DATA_OVERRUN:
2510 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2511 		break;
2512 	case CS_DATA_UNDERRUN:
2513 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2514 		break;
2515 	}
2516 
2517 	/*
2518 	 * If non data transfer commands fix tranfer counts.
2519 	 */
2520 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2521 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2522 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2523 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2524 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2525 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2526 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2527 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2528 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2529 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2530 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2531 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2532 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2533 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2534 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2535 		/*
2536 		 * Non data transfer command, clear sts_entry residual
2537 		 * length.
2538 		 */
2539 		sts.residual_length = 0;
2540 		cmd->ResponseLen = 0;
2541 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2542 			sts.comp_status = CS_COMPLETE;
2543 			cmd->Status = EXT_STATUS_OK;
2544 		}
2545 	} else {
2546 		cmd->ResponseLen = pld_size;
2547 	}
2548 
2549 	/* Correct ISP completion status */
2550 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2551 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2552 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2553 		    ha->instance);
2554 		scsi_req.resid = 0;
2555 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2556 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2557 		    ha->instance);
2558 		scsi_req.resid = sts.residual_length;
2559 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2560 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2561 
2562 			cmd->ResponseLen = (uint32_t)
2563 			    (pld_size - scsi_req.resid);
2564 		} else {
2565 			EL(ha, "failed, Transfer ERROR\n");
2566 			cmd->Status = EXT_STATUS_ERR;
2567 			cmd->ResponseLen = 0;
2568 		}
2569 	} else {
2570 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2571 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2572 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2573 		    sts.scsi_status_l);
2574 
2575 		scsi_req.resid = pld_size;
2576 		/*
2577 		 * Handle residual count on SCSI check
2578 		 * condition.
2579 		 *
2580 		 * - If Residual Under / Over is set, use the
2581 		 *   Residual Transfer Length field in IOCB.
2582 		 * - If Residual Under / Over is not set, and
2583 		 *   Transferred Data bit is set in State Flags
2584 		 *   field of IOCB, report residual value of 0
2585 		 *   (you may want to do this for tape
2586 		 *   Write-type commands only). This takes care
2587 		 *   of logical end of tape problem and does
2588 		 *   not break Unit Attention.
2589 		 * - If Residual Under / Over is not set, and
2590 		 *   Transferred Data bit is not set in State
2591 		 *   Flags, report residual value equal to
2592 		 *   original data transfer length.
2593 		 */
2594 		if (sts.scsi_status_l & STATUS_CHECK) {
2595 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2596 			cmd->DetailStatus = sts.scsi_status_l;
2597 			if (sts.scsi_status_h &
2598 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2599 				scsi_req.resid = sts.residual_length;
2600 			} else if (sts.state_flags_h &
2601 			    STATE_XFERRED_DATA) {
2602 				scsi_req.resid = 0;
2603 			}
2604 		}
2605 	}
2606 
2607 	if (sts.scsi_status_l & STATUS_CHECK &&
2608 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2609 	    sts.req_sense_length) {
2610 		/*
2611 		 * Check condition with vaild sense data flag set and sense
2612 		 * length != 0
2613 		 */
2614 		if (sts.req_sense_length > scsi_req.sense_length) {
2615 			sense_sz = scsi_req.sense_length;
2616 		} else {
2617 			sense_sz = sts.req_sense_length;
2618 		}
2619 
2620 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2621 		    tq->d_id.b24);
2622 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2623 
2624 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2625 		    (size_t)sense_sz, mode) != 0) {
2626 			EL(ha, "failed, request sense ddi_copyout\n");
2627 		}
2628 
2629 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2630 		cmd->DetailStatus = sts.scsi_status_l;
2631 	}
2632 
2633 	/* Copy response payload from DMA buffer to application. */
2634 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2635 	    cmd->ResponseLen != 0) {
2636 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2637 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2638 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2639 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2640 
2641 		/* Send response payload. */
2642 		if (ql_send_buffer_data(pld,
2643 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2644 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2645 			EL(ha, "failed, send_buffer_data\n");
2646 			cmd->Status = EXT_STATUS_COPY_ERR;
2647 			cmd->ResponseLen = 0;
2648 		}
2649 	}
2650 
2651 	if (cmd->Status != EXT_STATUS_OK) {
2652 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2653 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2654 	} else {
2655 		/*EMPTY*/
2656 		QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2657 		    ha->instance, cmd->ResponseLen);
2658 	}
2659 
2660 	kmem_free(pkt, pkt_size);
2661 	ql_free_dma_resource(ha, dma_mem);
2662 	kmem_free(dma_mem, sizeof (dma_mem_t));
2663 }
2664 
2665 /*
2666  * ql_wwpn_to_scsiaddr
2667  *
2668  * Input:
2669  *	ha:	adapter state pointer.
2670  *	cmd:	EXT_IOCTL cmd struct pointer.
2671  *	mode:	flags.
2672  *
2673  * Context:
2674  *	Kernel context.
2675  */
2676 static void
2677 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2678 {
2679 	int		status;
2680 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2681 	EXT_SCSI_ADDR	*tmp_addr;
2682 	ql_tgt_t	*tq;
2683 
2684 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2685 
2686 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2687 		/* Return error */
2688 		EL(ha, "incorrect RequestLen\n");
2689 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2690 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2691 		return;
2692 	}
2693 
2694 	status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2695 	    cmd->RequestLen, mode);
2696 
2697 	if (status != 0) {
2698 		cmd->Status = EXT_STATUS_COPY_ERR;
2699 		EL(ha, "failed, ddi_copyin\n");
2700 		return;
2701 	}
2702 
2703 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2704 
2705 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2706 		/* no matching device */
2707 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2708 		EL(ha, "failed, device not found\n");
2709 		return;
2710 	}
2711 
2712 	/* Copy out the IDs found.  For now we can only return target ID. */
2713 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2714 
2715 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2716 
2717 	if (status != 0) {
2718 		cmd->Status = EXT_STATUS_COPY_ERR;
2719 		EL(ha, "failed, ddi_copyout\n");
2720 	} else {
2721 		cmd->Status = EXT_STATUS_OK;
2722 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2723 	}
2724 }
2725 
2726 /*
2727  * ql_host_idx
2728  *	Gets host order index.
2729  *
2730  * Input:
2731  *	ha:	adapter state pointer.
2732  *	cmd:	EXT_IOCTL cmd struct pointer.
2733  *	mode:	flags.
2734  *
2735  * Returns:
2736  *	None, request status indicated in cmd->Status.
2737  *
2738  * Context:
2739  *	Kernel context.
2740  */
2741 static void
2742 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2743 {
2744 	uint16_t	idx;
2745 
2746 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2747 
2748 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2749 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2750 		cmd->DetailStatus = sizeof (uint16_t);
2751 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2752 		cmd->ResponseLen = 0;
2753 		return;
2754 	}
2755 
2756 	idx = (uint16_t)ha->instance;
2757 
2758 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2759 	    sizeof (uint16_t), mode) != 0) {
2760 		cmd->Status = EXT_STATUS_COPY_ERR;
2761 		cmd->ResponseLen = 0;
2762 		EL(ha, "failed, ddi_copyout\n");
2763 	} else {
2764 		cmd->ResponseLen = sizeof (uint16_t);
2765 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2766 	}
2767 }
2768 
2769 /*
2770  * ql_host_drvname
2771  *	Gets host driver name
2772  *
2773  * Input:
2774  *	ha:	adapter state pointer.
2775  *	cmd:	EXT_IOCTL cmd struct pointer.
2776  *	mode:	flags.
2777  *
2778  * Returns:
2779  *	None, request status indicated in cmd->Status.
2780  *
2781  * Context:
2782  *	Kernel context.
2783  */
2784 static void
2785 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2786 {
2787 
2788 	char		drvname[] = QL_NAME;
2789 	uint32_t	qlnamelen;
2790 
2791 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2792 
2793 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2794 
2795 	if (cmd->ResponseLen < qlnamelen) {
2796 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2797 		cmd->DetailStatus = qlnamelen;
2798 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2799 		    cmd->ResponseLen, qlnamelen);
2800 		cmd->ResponseLen = 0;
2801 		return;
2802 	}
2803 
2804 	if (ddi_copyout((void *)&drvname,
2805 	    (void *)(uintptr_t)(cmd->ResponseAdr),
2806 	    qlnamelen, mode) != 0) {
2807 		cmd->Status = EXT_STATUS_COPY_ERR;
2808 		cmd->ResponseLen = 0;
2809 		EL(ha, "failed, ddi_copyout\n");
2810 	} else {
2811 		cmd->ResponseLen = qlnamelen-1;
2812 	}
2813 
2814 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2815 }
2816 
2817 /*
2818  * ql_read_nvram
2819  *	Get NVRAM contents.
2820  *
2821  * Input:
2822  *	ha:	adapter state pointer.
2823  *	cmd:	EXT_IOCTL cmd struct pointer.
2824  *	mode:	flags.
2825  *
2826  * Returns:
2827  *	None, request status indicated in cmd->Status.
2828  *
2829  * Context:
2830  *	Kernel context.
2831  */
2832 static void
2833 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2834 {
2835 	uint32_t	nv_size;
2836 
2837 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2838 
2839 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2840 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2841 	if (cmd->ResponseLen < nv_size) {
2842 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2843 		cmd->DetailStatus = nv_size;
2844 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2845 		    cmd->ResponseLen);
2846 		cmd->ResponseLen = 0;
2847 		return;
2848 	}
2849 
2850 	/* Get NVRAM data. */
2851 	if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2852 	    mode) != 0) {
2853 		cmd->Status = EXT_STATUS_COPY_ERR;
2854 		cmd->ResponseLen = 0;
2855 		EL(ha, "failed, copy error\n");
2856 	} else {
2857 		cmd->ResponseLen = nv_size;
2858 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2859 	}
2860 }
2861 
2862 /*
2863  * ql_write_nvram
2864  *	Loads NVRAM contents.
2865  *
2866  * Input:
2867  *	ha:	adapter state pointer.
2868  *	cmd:	EXT_IOCTL cmd struct pointer.
2869  *	mode:	flags.
2870  *
2871  * Returns:
2872  *	None, request status indicated in cmd->Status.
2873  *
2874  * Context:
2875  *	Kernel context.
2876  */
2877 static void
2878 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2879 {
2880 	uint32_t	nv_size;
2881 
2882 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2883 
2884 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2885 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2886 	if (cmd->RequestLen < nv_size) {
2887 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2888 		cmd->DetailStatus = sizeof (nvram_t);
2889 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2890 		    cmd->RequestLen);
2891 		return;
2892 	}
2893 
2894 	/* Load NVRAM data. */
2895 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2896 	    mode) != 0) {
2897 		cmd->Status = EXT_STATUS_COPY_ERR;
2898 		EL(ha, "failed, copy error\n");
2899 	} else {
2900 		/*EMPTY*/
2901 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2902 	}
2903 }
2904 
2905 /*
2906  * ql_write_vpd
2907  *	Loads VPD contents.
2908  *
2909  * Input:
2910  *	ha:	adapter state pointer.
2911  *	cmd:	EXT_IOCTL cmd struct pointer.
2912  *	mode:	flags.
2913  *
2914  * Returns:
2915  *	None, request status indicated in cmd->Status.
2916  *
2917  * Context:
2918  *	Kernel context.
2919  */
2920 static void
2921 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2922 {
2923 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2924 
2925 	int32_t		rval = 0;
2926 
2927 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2928 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2929 		EL(ha, "failed, invalid request for HBA\n");
2930 		return;
2931 	}
2932 
2933 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2934 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2935 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2936 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2937 		    cmd->RequestLen);
2938 		return;
2939 	}
2940 
2941 	/* Load VPD data. */
2942 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2943 	    mode)) != 0) {
2944 		cmd->Status = EXT_STATUS_COPY_ERR;
2945 		cmd->DetailStatus = rval;
2946 		EL(ha, "failed, errno=%x\n", rval);
2947 	} else {
2948 		/*EMPTY*/
2949 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2950 	}
2951 }
2952 
2953 /*
2954  * ql_read_vpd
2955  *	Dumps VPD contents.
2956  *
2957  * Input:
2958  *	ha:	adapter state pointer.
2959  *	cmd:	EXT_IOCTL cmd struct pointer.
2960  *	mode:	flags.
2961  *
2962  * Returns:
2963  *	None, request status indicated in cmd->Status.
2964  *
2965  * Context:
2966  *	Kernel context.
2967  */
2968 static void
2969 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2970 {
2971 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2972 
2973 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2974 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2975 		EL(ha, "failed, invalid request for HBA\n");
2976 		return;
2977 	}
2978 
2979 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2980 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2981 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2982 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2983 		    cmd->ResponseLen);
2984 		return;
2985 	}
2986 
2987 	/* Dump VPD data. */
2988 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2989 	    mode)) != 0) {
2990 		cmd->Status = EXT_STATUS_COPY_ERR;
2991 		EL(ha, "failed,\n");
2992 	} else {
2993 		/*EMPTY*/
2994 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2995 	}
2996 }
2997 
2998 /*
2999  * ql_get_fcache
3000  *	Dumps flash cache contents.
3001  *
3002  * Input:
3003  *	ha:	adapter state pointer.
3004  *	cmd:	EXT_IOCTL cmd struct pointer.
3005  *	mode:	flags.
3006  *
3007  * Returns:
3008  *	None, request status indicated in cmd->Status.
3009  *
3010  * Context:
3011  *	Kernel context.
3012  */
3013 static void
3014 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3015 {
3016 	uint32_t	bsize, boff, types, cpsize, hsize;
3017 	ql_fcache_t	*fptr;
3018 
3019 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3020 
3021 	CACHE_LOCK(ha);
3022 
3023 	if (ha->fcache == NULL) {
3024 		CACHE_UNLOCK(ha);
3025 		cmd->Status = EXT_STATUS_ERR;
3026 		EL(ha, "failed, adapter fcache not setup\n");
3027 		return;
3028 	}
3029 
3030 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
3031 		bsize = 100;
3032 	} else {
3033 		bsize = 400;
3034 	}
3035 
3036 	if (cmd->ResponseLen < bsize) {
3037 		CACHE_UNLOCK(ha);
3038 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3039 		cmd->DetailStatus = bsize;
3040 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3041 		    bsize, cmd->ResponseLen);
3042 		return;
3043 	}
3044 
3045 	boff = 0;
3046 	bsize = 0;
3047 	fptr = ha->fcache;
3048 
3049 	/*
3050 	 * For backwards compatibility, get one of each image type
3051 	 */
3052 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3053 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3054 		/* Get the next image */
3055 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3056 
3057 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3058 
3059 			if (ddi_copyout(fptr->buf,
3060 			    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3061 			    cpsize, mode) != 0) {
3062 				CACHE_UNLOCK(ha);
3063 				EL(ha, "ddicopy failed, done\n");
3064 				cmd->Status = EXT_STATUS_COPY_ERR;
3065 				cmd->DetailStatus = 0;
3066 				return;
3067 			}
3068 			boff += 100;
3069 			bsize += cpsize;
3070 			types &= ~(fptr->type);
3071 		}
3072 	}
3073 
3074 	/*
3075 	 * Get the firmware image -- it needs to be last in the
3076 	 * buffer at offset 300 for backwards compatibility. Also for
3077 	 * backwards compatibility, the pci header is stripped off.
3078 	 */
3079 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3080 
3081 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3082 		if (hsize > fptr->buflen) {
3083 			CACHE_UNLOCK(ha);
3084 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3085 			    hsize, fptr->buflen);
3086 			cmd->Status = EXT_STATUS_COPY_ERR;
3087 			cmd->DetailStatus = 0;
3088 			return;
3089 		}
3090 
3091 		cpsize = ((fptr->buflen - hsize) < 100 ?
3092 		    fptr->buflen - hsize : 100);
3093 
3094 		if (ddi_copyout(fptr->buf+hsize,
3095 		    (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3096 		    cpsize, mode) != 0) {
3097 			CACHE_UNLOCK(ha);
3098 			EL(ha, "fw ddicopy failed, done\n");
3099 			cmd->Status = EXT_STATUS_COPY_ERR;
3100 			cmd->DetailStatus = 0;
3101 			return;
3102 		}
3103 		bsize += 100;
3104 	}
3105 
3106 	CACHE_UNLOCK(ha);
3107 	cmd->Status = EXT_STATUS_OK;
3108 	cmd->DetailStatus = bsize;
3109 
3110 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3111 }
3112 
3113 /*
3114  * ql_get_fcache_ex
3115  *	Dumps flash cache contents.
3116  *
3117  * Input:
3118  *	ha:	adapter state pointer.
3119  *	cmd:	EXT_IOCTL cmd struct pointer.
3120  *	mode:	flags.
3121  *
3122  * Returns:
3123  *	None, request status indicated in cmd->Status.
3124  *
3125  * Context:
3126  *	Kernel context.
3127  */
3128 static void
3129 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3130 {
3131 	uint32_t	bsize = 0;
3132 	uint32_t	boff = 0;
3133 	ql_fcache_t	*fptr;
3134 
3135 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3136 
3137 	CACHE_LOCK(ha);
3138 	if (ha->fcache == NULL) {
3139 		CACHE_UNLOCK(ha);
3140 		cmd->Status = EXT_STATUS_ERR;
3141 		EL(ha, "failed, adapter fcache not setup\n");
3142 		return;
3143 	}
3144 
3145 	/* Make sure user passed enough buffer space */
3146 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3147 		bsize += FBUFSIZE;
3148 	}
3149 
3150 	if (cmd->ResponseLen < bsize) {
3151 		CACHE_UNLOCK(ha);
3152 		if (cmd->ResponseLen != 0) {
3153 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3154 			    bsize, cmd->ResponseLen);
3155 		}
3156 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3157 		cmd->DetailStatus = bsize;
3158 		return;
3159 	}
3160 
3161 	boff = 0;
3162 	fptr = ha->fcache;
3163 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3164 		/* Get the next image */
3165 		if (ddi_copyout(fptr->buf,
3166 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3167 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3168 		    mode) != 0) {
3169 			CACHE_UNLOCK(ha);
3170 			EL(ha, "failed, ddicopy at %xh, done\n", boff);
3171 			cmd->Status = EXT_STATUS_COPY_ERR;
3172 			cmd->DetailStatus = 0;
3173 			return;
3174 		}
3175 		boff += FBUFSIZE;
3176 		fptr = fptr->next;
3177 	}
3178 
3179 	CACHE_UNLOCK(ha);
3180 	cmd->Status = EXT_STATUS_OK;
3181 	cmd->DetailStatus = bsize;
3182 
3183 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3184 }
3185 
3186 /*
3187  * ql_read_flash
3188  *	Get flash contents.
3189  *
3190  * Input:
3191  *	ha:	adapter state pointer.
3192  *	cmd:	EXT_IOCTL cmd struct pointer.
3193  *	mode:	flags.
3194  *
3195  * Returns:
3196  *	None, request status indicated in cmd->Status.
3197  *
3198  * Context:
3199  *	Kernel context.
3200  */
3201 static void
3202 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3203 {
3204