1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2010 QLogic Corporation; ql_xioctl.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
61 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
62     boolean_t (*)(EXT_IOCTL *));
63 static boolean_t ql_validate_signature(EXT_IOCTL *);
64 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
65 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
66 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
67 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
90 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int);
91 
92 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
93 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
94 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
95 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
96     uint8_t);
97 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
98 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
99 static int ql_24xx_flash_desc(ql_adapter_state_t *);
100 static int ql_setup_flash(ql_adapter_state_t *);
101 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
102 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
103 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
104     uint32_t, int);
105 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
106     uint8_t);
107 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
108 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
109 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
110 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
113 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
115 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
116 static uint32_t ql_setup_led(ql_adapter_state_t *);
117 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
118 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
119 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
120 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
121 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
122 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
123 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
124 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
125 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
126 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
127 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
128 static void ql_process_flt(ql_adapter_state_t *, uint32_t);
129 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
130 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
131 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
132 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
134 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
135 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
138 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
139 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
140 static void ql_restart_hba(ql_adapter_state_t *);
141 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
143 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
144 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
145 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
146 static void ql_update_flash_caches(ql_adapter_state_t *);
147 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
148 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
149 static void ql_get_fcf_list(ql_adapter_state_t *, EXT_IOCTL *, int);
150 static void ql_get_resource_counts(ql_adapter_state_t *, EXT_IOCTL *, int);
151 static void ql_qry_adapter_versions(ql_adapter_state_t *, EXT_IOCTL *, int);
152 
153 /* ******************************************************************** */
154 /*			External IOCTL support.				*/
155 /* ******************************************************************** */
156 
157 /*
158  * ql_alloc_xioctl_resource
159  *	Allocates resources needed by module code.
160  *
161  * Input:
162  *	ha:		adapter state pointer.
163  *
164  * Returns:
165  *	SYS_ERRNO
166  *
167  * Context:
168  *	Kernel context.
169  */
170 int
171 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
172 {
173 	ql_xioctl_t	*xp;
174 
175 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
176 
177 	if (ha->xioctl != NULL) {
178 		QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
179 		    ha->instance);
180 		return (0);
181 	}
182 
183 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
184 	if (xp == NULL) {
185 		EL(ha, "failed, kmem_zalloc\n");
186 		return (ENOMEM);
187 	}
188 	ha->xioctl = xp;
189 
190 	/* Allocate AEN tracking buffer */
191 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
192 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
193 	if (xp->aen_tracking_queue == NULL) {
194 		EL(ha, "failed, kmem_zalloc-2\n");
195 		ql_free_xioctl_resource(ha);
196 		return (ENOMEM);
197 	}
198 
199 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
200 
201 	return (0);
202 }
203 
204 /*
205  * ql_free_xioctl_resource
206  *	Frees resources used by module code.
207  *
208  * Input:
209  *	ha:		adapter state pointer.
210  *
211  * Context:
212  *	Kernel context.
213  */
214 void
215 ql_free_xioctl_resource(ql_adapter_state_t *ha)
216 {
217 	ql_xioctl_t	*xp = ha->xioctl;
218 
219 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
220 
221 	if (xp == NULL) {
222 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
223 		return;
224 	}
225 
226 	if (xp->aen_tracking_queue != NULL) {
227 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
228 		    sizeof (EXT_ASYNC_EVENT));
229 		xp->aen_tracking_queue = NULL;
230 	}
231 
232 	kmem_free(xp, sizeof (ql_xioctl_t));
233 	ha->xioctl = NULL;
234 
235 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
236 }
237 
238 /*
239  * ql_xioctl
240  *	External IOCTL processing.
241  *
242  * Input:
243  *	ha:	adapter state pointer.
244  *	cmd:	function to perform
245  *	arg:	data type varies with request
246  *	mode:	flags
247  *	cred_p:	credentials pointer
248  *	rval_p:	pointer to result value
249  *
250  * Returns:
251  *	0:		success
252  *	ENXIO:		No such device or address
253  *	ENOPROTOOPT:	Protocol not available
254  *
255  * Context:
256  *	Kernel context.
257  */
258 /* ARGSUSED */
259 int
260 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
261     cred_t *cred_p, int *rval_p)
262 {
263 	int	rval;
264 
265 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
266 
267 	if (ha->xioctl == NULL) {
268 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
269 		return (ENXIO);
270 	}
271 
272 	switch (cmd) {
273 	case EXT_CC_QUERY:
274 	case EXT_CC_SEND_FCCT_PASSTHRU:
275 	case EXT_CC_REG_AEN:
276 	case EXT_CC_GET_AEN:
277 	case EXT_CC_SEND_SCSI_PASSTHRU:
278 	case EXT_CC_WWPN_TO_SCSIADDR:
279 	case EXT_CC_SEND_ELS_RNID:
280 	case EXT_CC_SET_DATA:
281 	case EXT_CC_GET_DATA:
282 	case EXT_CC_HOST_IDX:
283 	case EXT_CC_READ_NVRAM:
284 	case EXT_CC_UPDATE_NVRAM:
285 	case EXT_CC_READ_OPTION_ROM:
286 	case EXT_CC_READ_OPTION_ROM_EX:
287 	case EXT_CC_UPDATE_OPTION_ROM:
288 	case EXT_CC_UPDATE_OPTION_ROM_EX:
289 	case EXT_CC_GET_VPD:
290 	case EXT_CC_SET_VPD:
291 	case EXT_CC_LOOPBACK:
292 	case EXT_CC_GET_FCACHE:
293 	case EXT_CC_GET_FCACHE_EX:
294 	case EXT_CC_HOST_DRVNAME:
295 	case EXT_CC_GET_SFP_DATA:
296 	case EXT_CC_PORT_PARAM:
297 	case EXT_CC_GET_PCI_DATA:
298 	case EXT_CC_GET_FWEXTTRACE:
299 	case EXT_CC_GET_FWFCETRACE:
300 	case EXT_CC_GET_VP_CNT_ID:
301 	case EXT_CC_VPORT_CMD:
302 	case EXT_CC_ACCESS_FLASH:
303 	case EXT_CC_RESET_FW:
304 	case EXT_CC_MENLO_MANAGE_INFO:
305 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
306 		break;
307 	default:
308 		/* function not supported. */
309 		EL(ha, "function=%d not supported\n", cmd);
310 		rval = ENOPROTOOPT;
311 	}
312 
313 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
314 
315 	return (rval);
316 }
317 
318 /*
319  * ql_sdm_ioctl
320  *	Provides ioctl functions for SAN/Device Management functions
321  *	AKA External Ioctl functions.
322  *
323  * Input:
324  *	ha:		adapter state pointer.
325  *	ioctl_code:	ioctl function to perform
326  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
327  *	mode:		flags
328  *
329  * Returns:
330  *	0:	success
331  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
332  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
333  *		copyout of EXT_IOCTL status info failed.
334  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
335  *	EBUSY:	Device busy
336  *
337  * Context:
338  *	Kernel context.
339  */
340 static int
341 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
342 {
343 	EXT_IOCTL		*cmd;
344 	int			rval;
345 	ql_adapter_state_t	*vha;
346 
347 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
348 
349 	/* Copy argument structure (EXT_IOCTL) from application land. */
350 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
351 	    ql_validate_signature)) != 0) {
352 		/*
353 		 * a non-zero value at this time means a problem getting
354 		 * the requested information from application land, just
355 		 * return the error code and hope for the best.
356 		 */
357 		EL(ha, "failed, sdm_setup\n");
358 		return (rval);
359 	}
360 
361 	/*
362 	 * Map the physical ha ptr (which the ioctl is called with)
363 	 * to the virtual ha that the caller is addressing.
364 	 */
365 	if (ha->flags & VP_ENABLED) {
366 		/* Check that it is within range. */
367 		if (cmd->HbaSelect > (CFG_IST(ha, CFG_CTRL_2422) ?
368 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
369 			EL(ha, "Invalid HbaSelect vp index: %xh\n",
370 			    cmd->HbaSelect);
371 			cmd->Status = EXT_STATUS_INVALID_VPINDEX;
372 			cmd->ResponseLen = 0;
373 			return (rval);
374 		}
375 		/*
376 		 * Special case: HbaSelect == 0 is physical ha
377 		 */
378 		if (cmd->HbaSelect != 0) {
379 			vha = ha->vp_next;
380 			while (vha != NULL) {
381 				if (vha->vp_index == cmd->HbaSelect) {
382 					ha = vha;
383 					break;
384 				}
385 				vha = vha->vp_next;
386 			}
387 			/*
388 			 * The specified vp index may be valid(within range)
389 			 * but it's not in the list. Currently this is all
390 			 * we can say.
391 			 */
392 			if (vha == NULL) {
393 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
394 				cmd->ResponseLen = 0;
395 				return (rval);
396 			}
397 		}
398 	}
399 
400 	/*
401 	 * If driver is suspended, stalled, or powered down rtn BUSY
402 	 */
403 	if (ha->flags & ADAPTER_SUSPENDED ||
404 	    ha->task_daemon_flags & DRIVER_STALL ||
405 	    ha->power_level != PM_LEVEL_D0) {
406 		EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
407 		    "driver suspended" :
408 		    (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
409 		    "FCA powered down"));
410 		cmd->Status = EXT_STATUS_BUSY;
411 		cmd->ResponseLen = 0;
412 		rval = EBUSY;
413 
414 		/* Return results to caller */
415 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
416 			EL(ha, "failed, sdm_return\n");
417 			rval = EFAULT;
418 		}
419 		return (rval);
420 	}
421 
422 	switch (ioctl_code) {
423 	case EXT_CC_QUERY_OS:
424 		ql_query(ha, cmd, mode);
425 		break;
426 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
427 		ql_fcct(ha, cmd, mode);
428 		break;
429 	case EXT_CC_REG_AEN_OS:
430 		ql_aen_reg(ha, cmd, mode);
431 		break;
432 	case EXT_CC_GET_AEN_OS:
433 		ql_aen_get(ha, cmd, mode);
434 		break;
435 	case EXT_CC_GET_DATA_OS:
436 		ql_get_host_data(ha, cmd, mode);
437 		break;
438 	case EXT_CC_SET_DATA_OS:
439 		ql_set_host_data(ha, cmd, mode);
440 		break;
441 	case EXT_CC_SEND_ELS_RNID_OS:
442 		ql_send_els_rnid(ha, cmd, mode);
443 		break;
444 	case EXT_CC_SCSI_PASSTHRU_OS:
445 		ql_scsi_passthru(ha, cmd, mode);
446 		break;
447 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
448 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
449 		break;
450 	case EXT_CC_HOST_IDX_OS:
451 		ql_host_idx(ha, cmd, mode);
452 		break;
453 	case EXT_CC_HOST_DRVNAME_OS:
454 		ql_host_drvname(ha, cmd, mode);
455 		break;
456 	case EXT_CC_READ_NVRAM_OS:
457 		ql_read_nvram(ha, cmd, mode);
458 		break;
459 	case EXT_CC_UPDATE_NVRAM_OS:
460 		ql_write_nvram(ha, cmd, mode);
461 		break;
462 	case EXT_CC_READ_OPTION_ROM_OS:
463 	case EXT_CC_READ_OPTION_ROM_EX_OS:
464 		ql_read_flash(ha, cmd, mode);
465 		break;
466 	case EXT_CC_UPDATE_OPTION_ROM_OS:
467 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
468 		ql_write_flash(ha, cmd, mode);
469 		break;
470 	case EXT_CC_LOOPBACK_OS:
471 		ql_diagnostic_loopback(ha, cmd, mode);
472 		break;
473 	case EXT_CC_GET_VPD_OS:
474 		ql_read_vpd(ha, cmd, mode);
475 		break;
476 	case EXT_CC_SET_VPD_OS:
477 		ql_write_vpd(ha, cmd, mode);
478 		break;
479 	case EXT_CC_GET_FCACHE_OS:
480 		ql_get_fcache(ha, cmd, mode);
481 		break;
482 	case EXT_CC_GET_FCACHE_EX_OS:
483 		ql_get_fcache_ex(ha, cmd, mode);
484 		break;
485 	case EXT_CC_GET_SFP_DATA_OS:
486 		ql_get_sfp(ha, cmd, mode);
487 		break;
488 	case EXT_CC_PORT_PARAM_OS:
489 		ql_port_param(ha, cmd, mode);
490 		break;
491 	case EXT_CC_GET_PCI_DATA_OS:
492 		ql_get_pci_data(ha, cmd, mode);
493 		break;
494 	case EXT_CC_GET_FWEXTTRACE_OS:
495 		ql_get_fwexttrace(ha, cmd, mode);
496 		break;
497 	case EXT_CC_GET_FWFCETRACE_OS:
498 		ql_get_fwfcetrace(ha, cmd, mode);
499 		break;
500 	case EXT_CC_MENLO_RESET:
501 		ql_menlo_reset(ha, cmd, mode);
502 		break;
503 	case EXT_CC_MENLO_GET_FW_VERSION:
504 		ql_menlo_get_fw_version(ha, cmd, mode);
505 		break;
506 	case EXT_CC_MENLO_UPDATE_FW:
507 		ql_menlo_update_fw(ha, cmd, mode);
508 		break;
509 	case EXT_CC_MENLO_MANAGE_INFO:
510 		ql_menlo_manage_info(ha, cmd, mode);
511 		break;
512 	case EXT_CC_GET_VP_CNT_ID_OS:
513 		ql_get_vp_cnt_id(ha, cmd, mode);
514 		break;
515 	case EXT_CC_VPORT_CMD_OS:
516 		ql_vp_ioctl(ha, cmd, mode);
517 		break;
518 	case EXT_CC_ACCESS_FLASH_OS:
519 		ql_access_flash(ha, cmd, mode);
520 		break;
521 	case EXT_CC_RESET_FW_OS:
522 		ql_reset_cmd(ha, cmd);
523 		break;
524 	default:
525 		/* function not supported. */
526 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
527 
528 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
529 		cmd->ResponseLen = 0;
530 		break;
531 	}
532 
533 	/* Return results to caller */
534 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
535 		EL(ha, "failed, sdm_return\n");
536 		return (EFAULT);
537 	}
538 
539 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
540 
541 	return (0);
542 }
543 
544 /*
545  * ql_sdm_setup
546  *	Make a local copy of the EXT_IOCTL struct and validate it.
547  *
548  * Input:
549  *	ha:		adapter state pointer.
550  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
551  *	arg:		Address of application EXT_IOCTL cmd data
552  *	mode:		flags
553  *	val_sig:	Pointer to a function to validate the ioctl signature.
554  *
555  * Returns:
556  *	0:		success
557  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
558  *	EINVAL:		Invalid version, signature.
559  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
560  *
561  * Context:
562  *	Kernel context.
563  */
564 static int
565 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
566     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
567 {
568 	int		rval;
569 	EXT_IOCTL	*cmd;
570 
571 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
572 
573 	/* Allocate local memory for EXT_IOCTL. */
574 	*cmd_struct = NULL;
575 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
576 	if (cmd == NULL) {
577 		EL(ha, "failed, kmem_zalloc\n");
578 		return (ENOMEM);
579 	}
580 	/* Get argument structure. */
581 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
582 	if (rval != 0) {
583 		EL(ha, "failed, ddi_copyin\n");
584 		rval = EFAULT;
585 	} else {
586 		/*
587 		 * Check signature and the version.
588 		 * If either are not valid then neither is the
589 		 * structure so don't attempt to return any error status
590 		 * because we can't trust what caller's arg points to.
591 		 * Just return the errno.
592 		 */
593 		if (val_sig(cmd) == 0) {
594 			EL(ha, "failed, signature\n");
595 			rval = EINVAL;
596 		} else if (cmd->Version > EXT_VERSION) {
597 			EL(ha, "failed, version\n");
598 			rval = EINVAL;
599 		}
600 	}
601 
602 	if (rval == 0) {
603 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
604 		*cmd_struct = cmd;
605 		cmd->Status = EXT_STATUS_OK;
606 		cmd->DetailStatus = 0;
607 	} else {
608 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
609 	}
610 
611 	return (rval);
612 }
613 
614 /*
615  * ql_validate_signature
616  *	Validate the signature string for an external ioctl call.
617  *
618  * Input:
619  *	sg:	Pointer to EXT_IOCTL signature to validate.
620  *
621  * Returns:
622  *	B_TRUE:		Signature is valid.
623  *	B_FALSE:	Signature is NOT valid.
624  *
625  * Context:
626  *	Kernel context.
627  */
628 static boolean_t
629 ql_validate_signature(EXT_IOCTL *cmd_struct)
630 {
631 	/*
632 	 * Check signature.
633 	 *
634 	 * If signature is not valid then neither is the rest of
635 	 * the structure (e.g., can't trust it), so don't attempt
636 	 * to return any error status other than the errno.
637 	 */
638 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
639 		QL_PRINT_2(CE_CONT, "failed,\n");
640 		return (B_FALSE);
641 	}
642 
643 	return (B_TRUE);
644 }
645 
646 /*
647  * ql_sdm_return
648  *	Copies return data/status to application land for
649  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
650  *
651  * Input:
652  *	ha:		adapter state pointer.
653  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
654  *	ioctl_code:	ioctl function to perform
655  *	arg:		EXT_IOCTL cmd data in application land.
656  *	mode:		flags
657  *
658  * Returns:
659  *	0:	success
660  *	EFAULT:	Copy out error.
661  *
662  * Context:
663  *	Kernel context.
664  */
665 /* ARGSUSED */
666 static int
667 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
668 {
669 	int	rval = 0;
670 
671 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
672 
673 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
674 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
675 	    mode);
676 
677 	rval |= ddi_copyout((void *)&cmd->Status,
678 	    (void *)&(((EXT_IOCTL*)arg)->Status),
679 	    sizeof (cmd->Status), mode);
680 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
681 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
682 	    sizeof (cmd->DetailStatus), mode);
683 
684 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
685 
686 	if (rval != 0) {
687 		/* Some copyout operation failed */
688 		EL(ha, "failed, ddi_copyout\n");
689 		return (EFAULT);
690 	}
691 
692 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
693 
694 	return (0);
695 }
696 
697 /*
698  * ql_query
699  *	Performs all EXT_CC_QUERY functions.
700  *
701  * Input:
702  *	ha:	adapter state pointer.
703  *	cmd:	Local EXT_IOCTL cmd struct pointer.
704  *	mode:	flags.
705  *
706  * Returns:
707  *	None, request status indicated in cmd->Status.
708  *
709  * Context:
710  *	Kernel context.
711  */
712 static void
713 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
714 {
715 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
716 	    cmd->SubCode);
717 
718 	/* case off on command subcode */
719 	switch (cmd->SubCode) {
720 	case EXT_SC_QUERY_HBA_NODE:
721 		ql_qry_hba_node(ha, cmd, mode);
722 		break;
723 	case EXT_SC_QUERY_HBA_PORT:
724 		ql_qry_hba_port(ha, cmd, mode);
725 		break;
726 	case EXT_SC_QUERY_DISC_PORT:
727 		ql_qry_disc_port(ha, cmd, mode);
728 		break;
729 	case EXT_SC_QUERY_DISC_TGT:
730 		ql_qry_disc_tgt(ha, cmd, mode);
731 		break;
732 	case EXT_SC_QUERY_DRIVER:
733 		ql_qry_driver(ha, cmd, mode);
734 		break;
735 	case EXT_SC_QUERY_FW:
736 		ql_qry_fw(ha, cmd, mode);
737 		break;
738 	case EXT_SC_QUERY_CHIP:
739 		ql_qry_chip(ha, cmd, mode);
740 		break;
741 	case EXT_SC_QUERY_CNA_PORT:
742 		ql_qry_cna_port(ha, cmd, mode);
743 		break;
744 	case EXT_SC_QUERY_ADAPTER_VERSIONS:
745 		ql_qry_adapter_versions(ha, cmd, mode);
746 		break;
747 	case EXT_SC_QUERY_DISC_LUN:
748 	default:
749 		/* function not supported. */
750 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
751 		EL(ha, "failed, Unsupported Subcode=%xh\n",
752 		    cmd->SubCode);
753 		break;
754 	}
755 
756 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
757 }
758 
759 /*
760  * ql_qry_hba_node
761  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
762  *
763  * Input:
764  *	ha:	adapter state pointer.
765  *	cmd:	EXT_IOCTL cmd struct pointer.
766  *	mode:	flags.
767  *
768  * Returns:
769  *	None, request status indicated in cmd->Status.
770  *
771  * Context:
772  *	Kernel context.
773  */
774 static void
775 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
776 {
777 	EXT_HBA_NODE	tmp_node = {0};
778 	uint_t		len;
779 	caddr_t		bufp;
780 	ql_mbx_data_t	mr;
781 
782 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
783 
784 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
785 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
786 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
787 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
788 		    "Len=%xh\n", cmd->ResponseLen);
789 		cmd->ResponseLen = 0;
790 		return;
791 	}
792 
793 	/* fill in the values */
794 
795 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
796 	    EXT_DEF_WWN_NAME_SIZE);
797 
798 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
799 
800 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
801 
802 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
803 
804 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
805 
806 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
807 		size_t		verlen;
808 		uint16_t	w;
809 		char		*tmpptr;
810 
811 		verlen = strlen((char *)(tmp_node.DriverVersion));
812 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
813 			EL(ha, "failed, No room for fpga version string\n");
814 		} else {
815 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
816 			    (uint16_t *)
817 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
818 
819 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
820 			if (tmpptr == NULL) {
821 				EL(ha, "Unable to insert fpga version str\n");
822 			} else {
823 				(void) sprintf(tmpptr, "%d.%d",
824 				    ((w & 0xf0) >> 4), (w & 0x0f));
825 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
826 			}
827 		}
828 	}
829 	(void) ql_get_fw_version(ha, &mr, MAILBOX_TOV);
830 
831 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
832 	    mr.mb[1], mr.mb[2], mr.mb[3]);
833 
834 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
835 		switch (mr.mb[6]) {
836 		case FWATTRIB_EF:
837 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
838 			break;
839 		case FWATTRIB_TP:
840 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
841 			break;
842 		case FWATTRIB_IP:
843 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
844 			break;
845 		case FWATTRIB_IPX:
846 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
847 			break;
848 		case FWATTRIB_FL:
849 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
850 			break;
851 		case FWATTRIB_FPX:
852 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
853 			break;
854 		default:
855 			break;
856 		}
857 	}
858 
859 	/* FCode version. */
860 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
861 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
862 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
863 	    (int *)&len) == DDI_PROP_SUCCESS) {
864 		if (len < EXT_DEF_MAX_STR_SIZE) {
865 			bcopy(bufp, tmp_node.OptRomVersion, len);
866 		} else {
867 			bcopy(bufp, tmp_node.OptRomVersion,
868 			    EXT_DEF_MAX_STR_SIZE - 1);
869 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
870 			    '\0';
871 		}
872 		kmem_free(bufp, len);
873 	} else {
874 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
875 	}
876 	tmp_node.PortCount = 1;
877 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
878 
879 	if (ddi_copyout((void *)&tmp_node,
880 	    (void *)(uintptr_t)(cmd->ResponseAdr),
881 	    sizeof (EXT_HBA_NODE), mode) != 0) {
882 		cmd->Status = EXT_STATUS_COPY_ERR;
883 		cmd->ResponseLen = 0;
884 		EL(ha, "failed, ddi_copyout\n");
885 	} else {
886 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
887 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
888 	}
889 }
890 
891 /*
892  * ql_qry_hba_port
893  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
894  *
895  * Input:
896  *	ha:	adapter state pointer.
897  *	cmd:	EXT_IOCTL cmd struct pointer.
898  *	mode:	flags.
899  *
900  * Returns:
901  *	None, request status indicated in cmd->Status.
902  *
903  * Context:
904  *	Kernel context.
905  */
906 static void
907 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
908 {
909 	ql_link_t	*link;
910 	ql_tgt_t	*tq;
911 	ql_mbx_data_t	mr;
912 	EXT_HBA_PORT	tmp_port = {0};
913 	int		rval;
914 	uint16_t	port_cnt, tgt_cnt, index;
915 
916 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
917 
918 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
919 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
920 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
921 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
922 		    cmd->ResponseLen);
923 		cmd->ResponseLen = 0;
924 		return;
925 	}
926 
927 	/* fill in the values */
928 
929 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
930 	    EXT_DEF_WWN_NAME_SIZE);
931 	tmp_port.Id[0] = 0;
932 	tmp_port.Id[1] = ha->d_id.b.domain;
933 	tmp_port.Id[2] = ha->d_id.b.area;
934 	tmp_port.Id[3] = ha->d_id.b.al_pa;
935 
936 	/* For now we are initiator only driver */
937 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
938 
939 	if (ha->task_daemon_flags & LOOP_DOWN) {
940 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
941 	} else if (DRIVER_SUSPENDED(ha)) {
942 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
943 	} else {
944 		tmp_port.State = EXT_DEF_HBA_OK;
945 	}
946 
947 	if (ha->flags & POINT_TO_POINT) {
948 		tmp_port.Mode = EXT_DEF_P2P_MODE;
949 	} else {
950 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
951 	}
952 	/*
953 	 * fill in the portspeed values.
954 	 *
955 	 * default to not yet negotiated state
956 	 */
957 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
958 
959 	if (tmp_port.State == EXT_DEF_HBA_OK) {
960 		if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
961 			mr.mb[1] = 0;
962 			mr.mb[2] = 0;
963 			rval = ql_data_rate(ha, &mr);
964 			if (rval != QL_SUCCESS) {
965 				EL(ha, "failed, data_rate=%xh\n", rval);
966 			} else {
967 				switch (mr.mb[1]) {
968 				case IIDMA_RATE_1GB:
969 					tmp_port.PortSpeed =
970 					    EXT_DEF_PORTSPEED_1GBIT;
971 					break;
972 				case IIDMA_RATE_2GB:
973 					tmp_port.PortSpeed =
974 					    EXT_DEF_PORTSPEED_2GBIT;
975 					break;
976 				case IIDMA_RATE_4GB:
977 					tmp_port.PortSpeed =
978 					    EXT_DEF_PORTSPEED_4GBIT;
979 					break;
980 				case IIDMA_RATE_8GB:
981 					tmp_port.PortSpeed =
982 					    EXT_DEF_PORTSPEED_8GBIT;
983 					break;
984 				case IIDMA_RATE_10GB:
985 					tmp_port.PortSpeed =
986 					    EXT_DEF_PORTSPEED_10GBIT;
987 					break;
988 				default:
989 					tmp_port.PortSpeed =
990 					    EXT_DEF_PORTSPEED_UNKNOWN;
991 					EL(ha, "failed, data rate=%xh\n",
992 					    mr.mb[1]);
993 					break;
994 				}
995 			}
996 		} else {
997 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
998 		}
999 	}
1000 
1001 	/* Report all supported port speeds */
1002 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
1003 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
1004 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
1005 		    EXT_DEF_PORTSPEED_1GBIT);
1006 		/*
1007 		 * Correct supported speeds based on type of
1008 		 * sfp that is present
1009 		 */
1010 		switch (ha->sfp_stat) {
1011 		case 1:
1012 			/* no sfp detected */
1013 			break;
1014 		case 2:
1015 		case 4:
1016 			/* 4GB sfp */
1017 			tmp_port.PortSupportedSpeed &=
1018 			    ~EXT_DEF_PORTSPEED_8GBIT;
1019 			break;
1020 		case 3:
1021 		case 5:
1022 			/* 8GB sfp */
1023 			tmp_port.PortSupportedSpeed &=
1024 			    ~EXT_DEF_PORTSPEED_1GBIT;
1025 			break;
1026 		default:
1027 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1028 			break;
1029 
1030 		}
1031 	} else if (CFG_IST(ha, CFG_CTRL_8081)) {
1032 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1033 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1034 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1035 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1036 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
1037 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1038 		    EXT_DEF_PORTSPEED_1GBIT);
1039 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
1040 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1041 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
1042 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1043 	} else {
1044 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1045 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1046 	}
1047 	tmp_port.LinkState2 = LSB(ha->sfp_stat);
1048 	port_cnt = 0;
1049 	tgt_cnt = 0;
1050 
1051 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1052 		for (link = ha->dev[index].first; link != NULL;
1053 		    link = link->next) {
1054 			tq = link->base_address;
1055 
1056 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1057 				continue;
1058 			}
1059 
1060 			port_cnt++;
1061 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1062 				tgt_cnt++;
1063 			}
1064 		}
1065 	}
1066 
1067 	tmp_port.DiscPortCount = port_cnt;
1068 	tmp_port.DiscTargetCount = tgt_cnt;
1069 
1070 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1071 
1072 	rval = ddi_copyout((void *)&tmp_port,
1073 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1074 	    sizeof (EXT_HBA_PORT), mode);
1075 	if (rval != 0) {
1076 		cmd->Status = EXT_STATUS_COPY_ERR;
1077 		cmd->ResponseLen = 0;
1078 		EL(ha, "failed, ddi_copyout\n");
1079 	} else {
1080 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1081 		QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1082 		    ha->instance, port_cnt, tgt_cnt);
1083 	}
1084 }
1085 
1086 /*
1087  * ql_qry_disc_port
1088  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1089  *
1090  * Input:
1091  *	ha:	adapter state pointer.
1092  *	cmd:	EXT_IOCTL cmd struct pointer.
1093  *	mode:	flags.
1094  *
1095  *	cmd->Instance = Port instance in fcport chain.
1096  *
1097  * Returns:
1098  *	None, request status indicated in cmd->Status.
1099  *
1100  * Context:
1101  *	Kernel context.
1102  */
1103 static void
1104 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1105 {
1106 	EXT_DISC_PORT	tmp_port = {0};
1107 	ql_link_t	*link;
1108 	ql_tgt_t	*tq;
1109 	uint16_t	index;
1110 	uint16_t	inst = 0;
1111 
1112 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1113 
1114 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1115 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1116 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1117 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1118 		    cmd->ResponseLen);
1119 		cmd->ResponseLen = 0;
1120 		return;
1121 	}
1122 
1123 	for (link = NULL, index = 0;
1124 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1125 		for (link = ha->dev[index].first; link != NULL;
1126 		    link = link->next) {
1127 			tq = link->base_address;
1128 
1129 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1130 				continue;
1131 			}
1132 			if (inst != cmd->Instance) {
1133 				inst++;
1134 				continue;
1135 			}
1136 
1137 			/* fill in the values */
1138 			bcopy(tq->node_name, tmp_port.WWNN,
1139 			    EXT_DEF_WWN_NAME_SIZE);
1140 			bcopy(tq->port_name, tmp_port.WWPN,
1141 			    EXT_DEF_WWN_NAME_SIZE);
1142 
1143 			break;
1144 		}
1145 	}
1146 
1147 	if (link == NULL) {
1148 		/* no matching device */
1149 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1150 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1151 		cmd->ResponseLen = 0;
1152 		return;
1153 	}
1154 
1155 	tmp_port.Id[0] = 0;
1156 	tmp_port.Id[1] = tq->d_id.b.domain;
1157 	tmp_port.Id[2] = tq->d_id.b.area;
1158 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1159 
1160 	tmp_port.Type = 0;
1161 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1162 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1163 		    EXT_DEF_INITIATOR_DEV);
1164 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1165 		(void) ql_inq_scan(ha, tq, 1);
1166 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1167 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1168 	}
1169 
1170 	if (tq->flags & TQF_FABRIC_DEVICE) {
1171 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1172 	} else {
1173 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1174 	}
1175 
1176 	tmp_port.Status = 0;
1177 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1178 
1179 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1180 
1181 	if (ddi_copyout((void *)&tmp_port,
1182 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1183 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1184 		cmd->Status = EXT_STATUS_COPY_ERR;
1185 		cmd->ResponseLen = 0;
1186 		EL(ha, "failed, ddi_copyout\n");
1187 	} else {
1188 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1189 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1190 	}
1191 }
1192 
1193 /*
1194  * ql_qry_disc_tgt
1195  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1196  *
1197  * Input:
1198  *	ha:		adapter state pointer.
1199  *	cmd:		EXT_IOCTL cmd struct pointer.
1200  *	mode:		flags.
1201  *
1202  *	cmd->Instance = Port instance in fcport chain.
1203  *
1204  * Returns:
1205  *	None, request status indicated in cmd->Status.
1206  *
1207  * Context:
1208  *	Kernel context.
1209  */
1210 static void
1211 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1212 {
1213 	EXT_DISC_TARGET	tmp_tgt = {0};
1214 	ql_link_t	*link;
1215 	ql_tgt_t	*tq;
1216 	uint16_t	index;
1217 	uint16_t	inst = 0;
1218 
1219 	QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1220 	    cmd->Instance);
1221 
1222 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1223 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1224 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1225 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1226 		    cmd->ResponseLen);
1227 		cmd->ResponseLen = 0;
1228 		return;
1229 	}
1230 
1231 	/* Scan port list for requested target and fill in the values */
1232 	for (link = NULL, index = 0;
1233 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1234 		for (link = ha->dev[index].first; link != NULL;
1235 		    link = link->next) {
1236 			tq = link->base_address;
1237 
1238 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1239 			    tq->flags & TQF_INITIATOR_DEVICE) {
1240 				continue;
1241 			}
1242 			if (inst != cmd->Instance) {
1243 				inst++;
1244 				continue;
1245 			}
1246 
1247 			/* fill in the values */
1248 			bcopy(tq->node_name, tmp_tgt.WWNN,
1249 			    EXT_DEF_WWN_NAME_SIZE);
1250 			bcopy(tq->port_name, tmp_tgt.WWPN,
1251 			    EXT_DEF_WWN_NAME_SIZE);
1252 
1253 			break;
1254 		}
1255 	}
1256 
1257 	if (link == NULL) {
1258 		/* no matching device */
1259 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1260 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1261 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1262 		cmd->ResponseLen = 0;
1263 		return;
1264 	}
1265 	tmp_tgt.Id[0] = 0;
1266 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1267 	tmp_tgt.Id[2] = tq->d_id.b.area;
1268 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1269 
1270 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1271 
1272 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1273 		(void) ql_inq_scan(ha, tq, 1);
1274 	}
1275 
1276 	tmp_tgt.Type = 0;
1277 	if (tq->flags & TQF_TAPE_DEVICE) {
1278 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1279 	}
1280 
1281 	if (tq->flags & TQF_FABRIC_DEVICE) {
1282 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1283 	} else {
1284 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1285 	}
1286 
1287 	tmp_tgt.Status = 0;
1288 
1289 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1290 
1291 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1292 
1293 	if (ddi_copyout((void *)&tmp_tgt,
1294 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1295 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1296 		cmd->Status = EXT_STATUS_COPY_ERR;
1297 		cmd->ResponseLen = 0;
1298 		EL(ha, "failed, ddi_copyout\n");
1299 	} else {
1300 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1301 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1302 	}
1303 }
1304 
1305 /*
1306  * ql_qry_fw
1307  *	Performs EXT_SC_QUERY_FW subfunction.
1308  *
1309  * Input:
1310  *	ha:	adapter state pointer.
1311  *	cmd:	EXT_IOCTL cmd struct pointer.
1312  *	mode:	flags.
1313  *
1314  * Returns:
1315  *	None, request status indicated in cmd->Status.
1316  *
1317  * Context:
1318  *	Kernel context.
1319  */
1320 static void
1321 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1322 {
1323 	ql_mbx_data_t	mr;
1324 	EXT_FW		fw_info = {0};
1325 
1326 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1327 
1328 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1329 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1330 		cmd->DetailStatus = sizeof (EXT_FW);
1331 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1332 		    cmd->ResponseLen);
1333 		cmd->ResponseLen = 0;
1334 		return;
1335 	}
1336 
1337 	(void) ql_get_fw_version(ha, &mr, MAILBOX_TOV);
1338 
1339 	(void) sprintf((char *)(fw_info.Version), "%d.%d.%d", mr.mb[1],
1340 	    mr.mb[2], mr.mb[2]);
1341 
1342 	fw_info.Attrib = mr.mb[6];
1343 
1344 	if (ddi_copyout((void *)&fw_info,
1345 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1346 	    sizeof (EXT_FW), mode) != 0) {
1347 		cmd->Status = EXT_STATUS_COPY_ERR;
1348 		cmd->ResponseLen = 0;
1349 		EL(ha, "failed, ddi_copyout\n");
1350 		return;
1351 	} else {
1352 		cmd->ResponseLen = sizeof (EXT_FW);
1353 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1354 	}
1355 }
1356 
1357 /*
1358  * ql_qry_chip
1359  *	Performs EXT_SC_QUERY_CHIP subfunction.
1360  *
1361  * Input:
1362  *	ha:	adapter state pointer.
1363  *	cmd:	EXT_IOCTL cmd struct pointer.
1364  *	mode:	flags.
1365  *
1366  * Returns:
1367  *	None, request status indicated in cmd->Status.
1368  *
1369  * Context:
1370  *	Kernel context.
1371  */
1372 static void
1373 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1374 {
1375 	EXT_CHIP	chip = {0};
1376 
1377 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1378 
1379 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1380 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1381 		cmd->DetailStatus = sizeof (EXT_CHIP);
1382 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1383 		    cmd->ResponseLen);
1384 		cmd->ResponseLen = 0;
1385 		return;
1386 	}
1387 
1388 	chip.VendorId = ha->ven_id;
1389 	chip.DeviceId = ha->device_id;
1390 	chip.SubVendorId = ha->subven_id;
1391 	chip.SubSystemId = ha->subsys_id;
1392 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1393 	chip.IoAddrLen = 0x100;
1394 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1395 	chip.MemAddrLen = 0x100;
1396 	chip.ChipRevID = ha->rev_id;
1397 	if (ha->flags & FUNCTION_1) {
1398 		chip.FuncNo = 1;
1399 	}
1400 
1401 	if (ddi_copyout((void *)&chip,
1402 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1403 	    sizeof (EXT_CHIP), mode) != 0) {
1404 		cmd->Status = EXT_STATUS_COPY_ERR;
1405 		cmd->ResponseLen = 0;
1406 		EL(ha, "failed, ddi_copyout\n");
1407 	} else {
1408 		cmd->ResponseLen = sizeof (EXT_CHIP);
1409 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1410 	}
1411 }
1412 
1413 /*
1414  * ql_qry_driver
1415  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1416  *
1417  * Input:
1418  *	ha:	adapter state pointer.
1419  *	cmd:	EXT_IOCTL cmd struct pointer.
1420  *	mode:	flags.
1421  *
1422  * Returns:
1423  *	None, request status indicated in cmd->Status.
1424  *
1425  * Context:
1426  *	Kernel context.
1427  */
1428 static void
1429 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1430 {
1431 	EXT_DRIVER	qd = {0};
1432 
1433 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1434 
1435 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1436 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1437 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1438 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1439 		    cmd->ResponseLen);
1440 		cmd->ResponseLen = 0;
1441 		return;
1442 	}
1443 
1444 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1445 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1446 	qd.TargetsPerBus = (uint16_t)
1447 	    (CFG_IST(ha, (CFG_CTRL_24258081 | CFG_EXT_FW_INTERFACE)) ?
1448 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1449 	qd.LunsPerTarget = 2030;
1450 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1451 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1452 
1453 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1454 	    sizeof (EXT_DRIVER), mode) != 0) {
1455 		cmd->Status = EXT_STATUS_COPY_ERR;
1456 		cmd->ResponseLen = 0;
1457 		EL(ha, "failed, ddi_copyout\n");
1458 	} else {
1459 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1460 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1461 	}
1462 }
1463 
1464 /*
1465  * ql_fcct
1466  *	IOCTL management server FC-CT passthrough.
1467  *
1468  * Input:
1469  *	ha:	adapter state pointer.
1470  *	cmd:	User space CT arguments pointer.
1471  *	mode:	flags.
1472  *
1473  * Returns:
1474  *	None, request status indicated in cmd->Status.
1475  *
1476  * Context:
1477  *	Kernel context.
1478  */
1479 static void
1480 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1481 {
1482 	ql_mbx_iocb_t		*pkt;
1483 	ql_mbx_data_t		mr;
1484 	dma_mem_t		*dma_mem;
1485 	caddr_t			pld;
1486 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1487 	int			rval;
1488 	ql_ct_iu_preamble_t	*ct;
1489 	ql_xioctl_t		*xp = ha->xioctl;
1490 	ql_tgt_t		tq;
1491 	uint16_t		comp_status, loop_id;
1492 
1493 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1494 
1495 	/* Get CT argument structure. */
1496 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1497 		EL(ha, "failed, No switch\n");
1498 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1499 		cmd->ResponseLen = 0;
1500 		return;
1501 	}
1502 
1503 	if (DRIVER_SUSPENDED(ha)) {
1504 		EL(ha, "failed, LOOP_NOT_READY\n");
1505 		cmd->Status = EXT_STATUS_BUSY;
1506 		cmd->ResponseLen = 0;
1507 		return;
1508 	}
1509 
1510 	/* Login management server device. */
1511 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1512 		tq.d_id.b.al_pa = 0xfa;
1513 		tq.d_id.b.area = 0xff;
1514 		tq.d_id.b.domain = 0xff;
1515 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1516 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1517 		    MANAGEMENT_SERVER_LOOP_ID);
1518 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1519 		if (rval != QL_SUCCESS) {
1520 			EL(ha, "failed, server login\n");
1521 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1522 			cmd->ResponseLen = 0;
1523 			return;
1524 		} else {
1525 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1526 		}
1527 	}
1528 
1529 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1530 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1531 
1532 	/* Allocate a DMA Memory Descriptor */
1533 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1534 	if (dma_mem == NULL) {
1535 		EL(ha, "failed, kmem_zalloc\n");
1536 		cmd->Status = EXT_STATUS_NO_MEMORY;
1537 		cmd->ResponseLen = 0;
1538 		return;
1539 	}
1540 	/* Determine maximum buffer size. */
1541 	if (cmd->RequestLen < cmd->ResponseLen) {
1542 		pld_byte_cnt = cmd->ResponseLen;
1543 	} else {
1544 		pld_byte_cnt = cmd->RequestLen;
1545 	}
1546 
1547 	/* Allocate command block. */
1548 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1549 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1550 	if (pkt == NULL) {
1551 		EL(ha, "failed, kmem_zalloc\n");
1552 		cmd->Status = EXT_STATUS_NO_MEMORY;
1553 		cmd->ResponseLen = 0;
1554 		return;
1555 	}
1556 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1557 
1558 	/* Get command payload data. */
1559 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1560 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1561 		EL(ha, "failed, get_buffer_data\n");
1562 		kmem_free(pkt, pkt_size);
1563 		cmd->Status = EXT_STATUS_COPY_ERR;
1564 		cmd->ResponseLen = 0;
1565 		return;
1566 	}
1567 
1568 	/* Get DMA memory for the IOCB */
1569 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1570 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1571 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1572 		    "alloc failed", QL_NAME, ha->instance);
1573 		kmem_free(pkt, pkt_size);
1574 		kmem_free(dma_mem, sizeof (dma_mem_t));
1575 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1576 		cmd->ResponseLen = 0;
1577 		return;
1578 	}
1579 
1580 	/* Copy out going payload data to IOCB DMA buffer. */
1581 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1582 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1583 
1584 	/* Sync IOCB DMA buffer. */
1585 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1586 	    DDI_DMA_SYNC_FORDEV);
1587 
1588 	/*
1589 	 * Setup IOCB
1590 	 */
1591 	ct = (ql_ct_iu_preamble_t *)pld;
1592 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
1593 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1594 		pkt->ms24.entry_count = 1;
1595 
1596 		/* Set loop ID */
1597 		pkt->ms24.n_port_hdl = (uint16_t)
1598 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1599 		    LE_16(SNS_24XX_HDL) :
1600 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1601 
1602 		/* Set ISP command timeout. */
1603 		pkt->ms24.timeout = LE_16(120);
1604 
1605 		/* Set cmd/response data segment counts. */
1606 		pkt->ms24.cmd_dseg_count = LE_16(1);
1607 		pkt->ms24.resp_dseg_count = LE_16(1);
1608 
1609 		/* Load ct cmd byte count. */
1610 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1611 
1612 		/* Load ct rsp byte count. */
1613 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1614 
1615 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1616 
1617 		/* Load MS command entry data segments. */
1618 		*long_ptr++ = (uint32_t)
1619 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1620 		*long_ptr++ = (uint32_t)
1621 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1622 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1623 
1624 		/* Load MS response entry data segments. */
1625 		*long_ptr++ = (uint32_t)
1626 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1627 		*long_ptr++ = (uint32_t)
1628 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1629 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1630 
1631 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1632 		    sizeof (ql_mbx_iocb_t));
1633 
1634 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1635 		if (comp_status == CS_DATA_UNDERRUN) {
1636 			if ((BE_16(ct->max_residual_size)) == 0) {
1637 				comp_status = CS_COMPLETE;
1638 			}
1639 		}
1640 
1641 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1642 		    0) {
1643 			EL(ha, "failed, I/O timeout or "
1644 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1645 			    pkt->sts24.entry_status,
1646 			    pkt->sts24.scsi_status_l, rval);
1647 			kmem_free(pkt, pkt_size);
1648 			ql_free_dma_resource(ha, dma_mem);
1649 			kmem_free(dma_mem, sizeof (dma_mem_t));
1650 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1651 			cmd->ResponseLen = 0;
1652 			return;
1653 		}
1654 	} else {
1655 		pkt->ms.entry_type = MS_TYPE;
1656 		pkt->ms.entry_count = 1;
1657 
1658 		/* Set loop ID */
1659 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1660 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1661 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1662 			pkt->ms.loop_id_l = LSB(loop_id);
1663 			pkt->ms.loop_id_h = MSB(loop_id);
1664 		} else {
1665 			pkt->ms.loop_id_h = LSB(loop_id);
1666 		}
1667 
1668 		/* Set ISP command timeout. */
1669 		pkt->ms.timeout = LE_16(120);
1670 
1671 		/* Set data segment counts. */
1672 		pkt->ms.cmd_dseg_count_l = 1;
1673 		pkt->ms.total_dseg_count = LE_16(2);
1674 
1675 		/* Response total byte count. */
1676 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1677 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1678 
1679 		/* Command total byte count. */
1680 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1681 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1682 
1683 		/* Load command/response data segments. */
1684 		pkt->ms.dseg_0_address[0] = (uint32_t)
1685 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1686 		pkt->ms.dseg_0_address[1] = (uint32_t)
1687 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1688 		pkt->ms.dseg_1_address[0] = (uint32_t)
1689 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1690 		pkt->ms.dseg_1_address[1] = (uint32_t)
1691 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1692 
1693 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1694 		    sizeof (ql_mbx_iocb_t));
1695 
1696 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1697 		if (comp_status == CS_DATA_UNDERRUN) {
1698 			if ((BE_16(ct->max_residual_size)) == 0) {
1699 				comp_status = CS_COMPLETE;
1700 			}
1701 		}
1702 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1703 			EL(ha, "failed, I/O timeout or "
1704 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1705 			kmem_free(pkt, pkt_size);
1706 			ql_free_dma_resource(ha, dma_mem);
1707 			kmem_free(dma_mem, sizeof (dma_mem_t));
1708 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1709 			cmd->ResponseLen = 0;
1710 			return;
1711 		}
1712 	}
1713 
1714 	/* Sync in coming DMA buffer. */
1715 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1716 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1717 	/* Copy in coming DMA data. */
1718 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1719 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1720 	    DDI_DEV_AUTOINCR);
1721 
1722 	/* Copy response payload from DMA buffer to application. */
1723 	if (cmd->ResponseLen != 0) {
1724 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1725 		    cmd->ResponseLen);
1726 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1727 
1728 		/* Send response payload. */
1729 		if (ql_send_buffer_data(pld,
1730 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1731 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1732 			EL(ha, "failed, send_buffer_data\n");
1733 			cmd->Status = EXT_STATUS_COPY_ERR;
1734 			cmd->ResponseLen = 0;
1735 		}
1736 	}
1737 
1738 	kmem_free(pkt, pkt_size);
1739 	ql_free_dma_resource(ha, dma_mem);
1740 	kmem_free(dma_mem, sizeof (dma_mem_t));
1741 
1742 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1743 }
1744 
1745 /*
1746  * ql_aen_reg
1747  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1748  *
1749  * Input:
1750  *	ha:	adapter state pointer.
1751  *	cmd:	EXT_IOCTL cmd struct pointer.
1752  *	mode:	flags.
1753  *
1754  * Returns:
1755  *	None, request status indicated in cmd->Status.
1756  *
1757  * Context:
1758  *	Kernel context.
1759  */
1760 static void
1761 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1762 {
1763 	EXT_REG_AEN	reg_struct;
1764 	int		rval = 0;
1765 	ql_xioctl_t	*xp = ha->xioctl;
1766 
1767 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1768 
1769 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &reg_struct,
1770 	    cmd->RequestLen, mode);
1771 
1772 	if (rval == 0) {
1773 		if (reg_struct.Enable) {
1774 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1775 		} else {
1776 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1777 			/* Empty the queue. */
1778 			INTR_LOCK(ha);
1779 			xp->aen_q_head = 0;
1780 			xp->aen_q_tail = 0;
1781 			INTR_UNLOCK(ha);
1782 		}
1783 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1784 	} else {
1785 		cmd->Status = EXT_STATUS_COPY_ERR;
1786 		EL(ha, "failed, ddi_copyin\n");
1787 	}
1788 }
1789 
1790 /*
1791  * ql_aen_get
1792  *	IOCTL management server Asynchronous Event Record Transfer.
1793  *
1794  * Input:
1795  *	ha:	adapter state pointer.
1796  *	cmd:	EXT_IOCTL cmd struct pointer.
1797  *	mode:	flags.
1798  *
1799  * Returns:
1800  *	None, request status indicated in cmd->Status.
1801  *
1802  * Context:
1803  *	Kernel context.
1804  */
1805 static void
1806 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1807 {
1808 	uint32_t	out_size;
1809 	EXT_ASYNC_EVENT	*tmp_q;
1810 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1811 	uint8_t		i;
1812 	uint8_t		queue_cnt;
1813 	uint8_t		request_cnt;
1814 	ql_xioctl_t	*xp = ha->xioctl;
1815 
1816 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1817 
1818 	/* Compute the number of events that can be returned */
1819 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1820 
1821 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1822 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1823 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1824 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1825 		    "Len=%xh\n", request_cnt);
1826 		cmd->ResponseLen = 0;
1827 		return;
1828 	}
1829 
1830 	/* 1st: Make a local copy of the entire queue content. */
1831 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1832 	queue_cnt = 0;
1833 
1834 	INTR_LOCK(ha);
1835 	i = xp->aen_q_head;
1836 
1837 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1838 		if (tmp_q[i].AsyncEventCode != 0) {
1839 			bcopy(&tmp_q[i], &aen[queue_cnt],
1840 			    sizeof (EXT_ASYNC_EVENT));
1841 			queue_cnt++;
1842 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1843 		}
1844 		if (i == xp->aen_q_tail) {
1845 			/* done. */
1846 			break;
1847 		}
1848 		i++;
1849 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1850 			i = 0;
1851 		}
1852 	}
1853 
1854 	/* Empty the queue. */
1855 	xp->aen_q_head = 0;
1856 	xp->aen_q_tail = 0;
1857 
1858 	INTR_UNLOCK(ha);
1859 
1860 	/* 2nd: Now transfer the queue content to user buffer */
1861 	/* Copy the entire queue to user's buffer. */
1862 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1863 	if (queue_cnt == 0) {
1864 		cmd->ResponseLen = 0;
1865 	} else if (ddi_copyout((void *)&aen[0],
1866 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1867 	    out_size, mode) != 0) {
1868 		cmd->Status = EXT_STATUS_COPY_ERR;
1869 		cmd->ResponseLen = 0;
1870 		EL(ha, "failed, ddi_copyout\n");
1871 	} else {
1872 		cmd->ResponseLen = out_size;
1873 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1874 	}
1875 }
1876 
1877 /*
1878  * ql_enqueue_aen
1879  *
1880  * Input:
1881  *	ha:		adapter state pointer.
1882  *	event_code:	async event code of the event to add to queue.
1883  *	payload:	event payload for the queue.
1884  *	INTR_LOCK must be already obtained.
1885  *
1886  * Context:
1887  *	Interrupt or Kernel context, no mailbox commands allowed.
1888  */
1889 void
1890 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1891 {
1892 	uint8_t			new_entry;	/* index to current entry */
1893 	uint16_t		*mbx;
1894 	EXT_ASYNC_EVENT		*aen_queue;
1895 	ql_xioctl_t		*xp = ha->xioctl;
1896 
1897 	QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1898 	    event_code);
1899 
1900 	if (xp == NULL) {
1901 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1902 		return;
1903 	}
1904 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1905 
1906 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1907 		/* Need to change queue pointers to make room. */
1908 
1909 		/* Increment tail for adding new entry. */
1910 		xp->aen_q_tail++;
1911 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1912 			xp->aen_q_tail = 0;
1913 		}
1914 		if (xp->aen_q_head == xp->aen_q_tail) {
1915 			/*
1916 			 * We're overwriting the oldest entry, so need to
1917 			 * update the head pointer.
1918 			 */
1919 			xp->aen_q_head++;
1920 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1921 				xp->aen_q_head = 0;
1922 			}
1923 		}
1924 	}
1925 
1926 	new_entry = xp->aen_q_tail;
1927 	aen_queue[new_entry].AsyncEventCode = event_code;
1928 
1929 	/* Update payload */
1930 	if (payload != NULL) {
1931 		switch (event_code) {
1932 		case MBA_LIP_OCCURRED:
1933 		case MBA_LOOP_UP:
1934 		case MBA_LOOP_DOWN:
1935 		case MBA_LIP_F8:
1936 		case MBA_LIP_RESET:
1937 		case MBA_PORT_UPDATE:
1938 			break;
1939 		case MBA_RSCN_UPDATE:
1940 			mbx = (uint16_t *)payload;
1941 			/* al_pa */
1942 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1943 			    LSB(mbx[2]);
1944 			/* area */
1945 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1946 			    MSB(mbx[2]);
1947 			/* domain */
1948 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1949 			    LSB(mbx[1]);
1950 			/* save in big endian */
1951 			BIG_ENDIAN_24(&aen_queue[new_entry].
1952 			    Payload.RSCN.RSCNInfo[0]);
1953 
1954 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1955 			    MSB(mbx[1]);
1956 
1957 			break;
1958 		default:
1959 			/* Not supported */
1960 			EL(ha, "failed, event code not supported=%xh\n",
1961 			    event_code);
1962 			aen_queue[new_entry].AsyncEventCode = 0;
1963 			break;
1964 		}
1965 	}
1966 
1967 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1968 }
1969 
1970 /*
1971  * ql_scsi_passthru
1972  *	IOCTL SCSI passthrough.
1973  *
1974  * Input:
1975  *	ha:	adapter state pointer.
1976  *	cmd:	User space SCSI command pointer.
1977  *	mode:	flags.
1978  *
1979  * Returns:
1980  *	None, request status indicated in cmd->Status.
1981  *
1982  * Context:
1983  *	Kernel context.
1984  */
1985 static void
1986 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1987 {
1988 	ql_mbx_iocb_t		*pkt;
1989 	ql_mbx_data_t		mr;
1990 	dma_mem_t		*dma_mem;
1991 	caddr_t			pld;
1992 	uint32_t		pkt_size, pld_size;
1993 	uint16_t		qlnt, retries, cnt, cnt2;
1994 	uint8_t			*name;
1995 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1996 	EXT_SCSI_PASSTHRU	*usp_req;
1997 	int			rval;
1998 	union _passthru {
1999 		EXT_SCSI_PASSTHRU	sp_cmd;
2000 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
2001 	} pt_req;		/* Passthru request */
2002 	uint32_t		status, sense_sz = 0;
2003 	ql_tgt_t		*tq = NULL;
2004 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
2005 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
2006 
2007 	/* SCSI request struct for SCSI passthrough IOs. */
2008 	struct {
2009 		uint16_t	lun;
2010 		uint16_t	sense_length;	/* Sense buffer size */
2011 		size_t		resid;		/* Residual */
2012 		uint8_t		*cdbp;		/* Requestor's CDB */
2013 		uint8_t		*u_sense;	/* Requestor's sense buffer */
2014 		uint8_t		cdb_len;	/* Requestor's CDB length */
2015 		uint8_t		direction;
2016 	} scsi_req;
2017 
2018 	struct {
2019 		uint8_t		*rsp_info;
2020 		uint8_t		*req_sense_data;
2021 		uint32_t	residual_length;
2022 		uint32_t	rsp_info_length;
2023 		uint32_t	req_sense_length;
2024 		uint16_t	comp_status;
2025 		uint8_t		state_flags_l;
2026 		uint8_t		state_flags_h;
2027 		uint8_t		scsi_status_l;
2028 		uint8_t		scsi_status_h;
2029 	} sts;
2030 
2031 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2032 
2033 	/* Verify Sub Code and set cnt to needed request size. */
2034 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2035 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
2036 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2037 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2038 	} else {
2039 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2040 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2041 		cmd->ResponseLen = 0;
2042 		return;
2043 	}
2044 
2045 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2046 	if (dma_mem == NULL) {
2047 		EL(ha, "failed, kmem_zalloc\n");
2048 		cmd->Status = EXT_STATUS_NO_MEMORY;
2049 		cmd->ResponseLen = 0;
2050 		return;
2051 	}
2052 	/*  Verify the size of and copy in the passthru request structure. */
2053 	if (cmd->RequestLen != pld_size) {
2054 		/* Return error */
2055 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2056 		    cmd->RequestLen, pld_size);
2057 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2058 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2059 		cmd->ResponseLen = 0;
2060 		return;
2061 	}
2062 
2063 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2064 	    pld_size, mode) != 0) {
2065 		EL(ha, "failed, ddi_copyin\n");
2066 		cmd->Status = EXT_STATUS_COPY_ERR;
2067 		cmd->ResponseLen = 0;
2068 		return;
2069 	}
2070 
2071 	/*
2072 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2073 	 * request data structure.
2074 	 */
2075 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2076 		scsi_req.lun = sp_req->TargetAddr.Lun;
2077 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2078 		scsi_req.cdbp = &sp_req->Cdb[0];
2079 		scsi_req.cdb_len = sp_req->CdbLength;
2080 		scsi_req.direction = sp_req->Direction;
2081 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2082 		scsi_req.u_sense = &usp_req->SenseData[0];
2083 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2084 
2085 		qlnt = QLNT_PORT;
2086 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2087 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2088 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2089 		tq = ql_find_port(ha, name, qlnt);
2090 	} else {
2091 		/*
2092 		 * Must be FC PASSTHRU, verified above.
2093 		 */
2094 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2095 			qlnt = QLNT_PORT;
2096 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2097 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2098 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2099 			    ha->instance, cmd->SubCode, name[0], name[1],
2100 			    name[2], name[3], name[4], name[5], name[6],
2101 			    name[7]);
2102 			tq = ql_find_port(ha, name, qlnt);
2103 		} else if (fc_req->FCScsiAddr.DestType ==
2104 		    EXT_DEF_DESTTYPE_WWNN) {
2105 			qlnt = QLNT_NODE;
2106 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2107 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2108 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2109 			    ha->instance, cmd->SubCode, name[0], name[1],
2110 			    name[2], name[3], name[4], name[5], name[6],
2111 			    name[7]);
2112 			tq = ql_find_port(ha, name, qlnt);
2113 		} else if (fc_req->FCScsiAddr.DestType ==
2114 		    EXT_DEF_DESTTYPE_PORTID) {
2115 			qlnt = QLNT_PID;
2116 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2117 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2118 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2119 			    name[0], name[1], name[2]);
2120 			tq = ql_find_port(ha, name, qlnt);
2121 		} else {
2122 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2123 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2124 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2125 			cmd->ResponseLen = 0;
2126 			return;
2127 		}
2128 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2129 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2130 		scsi_req.cdbp = &sp_req->Cdb[0];
2131 		scsi_req.cdb_len = sp_req->CdbLength;
2132 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2133 		scsi_req.u_sense = &ufc_req->SenseData[0];
2134 		scsi_req.direction = fc_req->Direction;
2135 	}
2136 
2137 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2138 		EL(ha, "failed, fc_port not found\n");
2139 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2140 		cmd->ResponseLen = 0;
2141 		return;
2142 	}
2143 
2144 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2145 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2146 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2147 		cmd->ResponseLen = 0;
2148 		return;
2149 	}
2150 
2151 	/* Allocate command block. */
2152 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2153 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2154 	    cmd->ResponseLen) {
2155 		pld_size = cmd->ResponseLen;
2156 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2157 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2158 		if (pkt == NULL) {
2159 			EL(ha, "failed, kmem_zalloc\n");
2160 			cmd->Status = EXT_STATUS_NO_MEMORY;
2161 			cmd->ResponseLen = 0;
2162 			return;
2163 		}
2164 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2165 
2166 		/* Get DMA memory for the IOCB */
2167 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2168 		    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2169 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2170 			    "alloc failed", QL_NAME, ha->instance);
2171 			kmem_free(pkt, pkt_size);
2172 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2173 			cmd->ResponseLen = 0;
2174 			return;
2175 		}
2176 
2177 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2178 			scsi_req.direction = (uint8_t)
2179 			    (CFG_IST(ha, CFG_CTRL_24258081) ?
2180 			    CF_RD : CF_DATA_IN | CF_STAG);
2181 		} else {
2182 			scsi_req.direction = (uint8_t)
2183 			    (CFG_IST(ha, CFG_CTRL_24258081) ?
2184 			    CF_WR : CF_DATA_OUT | CF_STAG);
2185 			cmd->ResponseLen = 0;
2186 
2187 			/* Get command payload. */
2188 			if (ql_get_buffer_data(
2189 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2190 			    pld, pld_size, mode) != pld_size) {
2191 				EL(ha, "failed, get_buffer_data\n");
2192 				cmd->Status = EXT_STATUS_COPY_ERR;
2193 
2194 				kmem_free(pkt, pkt_size);
2195 				ql_free_dma_resource(ha, dma_mem);
2196 				kmem_free(dma_mem, sizeof (dma_mem_t));
2197 				return;
2198 			}
2199 
2200 			/* Copy out going data to DMA buffer. */
2201 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2202 			    (uint8_t *)dma_mem->bp, pld_size,
2203 			    DDI_DEV_AUTOINCR);
2204 
2205 			/* Sync DMA buffer. */
2206 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2207 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2208 		}
2209 	} else {
2210 		scsi_req.direction = (uint8_t)
2211 		    (CFG_IST(ha, CFG_CTRL_24258081) ? 0 : CF_STAG);
2212 		cmd->ResponseLen = 0;
2213 
2214 		pkt_size = sizeof (ql_mbx_iocb_t);
2215 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2216 		if (pkt == NULL) {
2217 			EL(ha, "failed, kmem_zalloc-2\n");
2218 			cmd->Status = EXT_STATUS_NO_MEMORY;
2219 			return;
2220 		}
2221 		pld = NULL;
2222 		pld_size = 0;
2223 	}
2224 
2225 	/* retries = ha->port_down_retry_count; */
2226 	retries = 1;
2227 	cmd->Status = EXT_STATUS_OK;
2228 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2229 
2230 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2231 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2232 
2233 	do {
2234 		if (DRIVER_SUSPENDED(ha)) {
2235 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2236 			break;
2237 		}
2238 
2239 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2240 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2241 			pkt->cmd24.entry_count = 1;
2242 
2243 			/* Set LUN number */
2244 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2245 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2246 
2247 			/* Set N_port handle */
2248 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2249 
2250 			/* Set VP Index */
2251 			pkt->cmd24.vp_index = ha->vp_index;
2252 
2253 			/* Set target ID */
2254 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2255 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2256 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2257 
2258 			/* Set ISP command timeout. */
2259 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2260 
2261 			/* Load SCSI CDB */
2262 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2263 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2264 			    DDI_DEV_AUTOINCR);
2265 			for (cnt = 0; cnt < MAX_CMDSZ;
2266 			    cnt = (uint16_t)(cnt + 4)) {
2267 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2268 				    + cnt, 4);
2269 			}
2270 
2271 			/* Set tag queue control flags */
2272 			pkt->cmd24.task = TA_STAG;
2273 
2274 			if (pld_size) {
2275 				/* Set transfer direction. */
2276 				pkt->cmd24.control_flags = scsi_req.direction;
2277 
2278 				/* Set data segment count. */
2279 				pkt->cmd24.dseg_count = LE_16(1);
2280 
2281 				/* Load total byte count. */
2282 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2283 
2284 				/* Load data descriptor. */
2285 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2286 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2287 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2288 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2289 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2290 			}
2291 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2292 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2293 			pkt->cmd3.entry_count = 1;
2294 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2295 				pkt->cmd3.target_l = LSB(tq->loop_id);
2296 				pkt->cmd3.target_h = MSB(tq->loop_id);
2297 			} else {
2298 				pkt->cmd3.target_h = LSB(tq->loop_id);
2299 			}
2300 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2301 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2302 			pkt->cmd3.control_flags_l = scsi_req.direction;
2303 			pkt->cmd3.timeout = LE_16(15);
2304 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2305 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2306 			}
2307 			if (pld_size) {
2308 				pkt->cmd3.dseg_count = LE_16(1);
2309 				pkt->cmd3.byte_count = LE_32(pld_size);
2310 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2311 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2312 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2313 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2314 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2315 			}
2316 		} else {
2317 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2318 			pkt->cmd.entry_count = 1;
2319 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2320 				pkt->cmd.target_l = LSB(tq->loop_id);
2321 				pkt->cmd.target_h = MSB(tq->loop_id);
2322 			} else {
2323 				pkt->cmd.target_h = LSB(tq->loop_id);
2324 			}
2325 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2326 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2327 			pkt->cmd.control_flags_l = scsi_req.direction;
2328 			pkt->cmd.timeout = LE_16(15);
2329 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2330 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2331 			}
2332 			if (pld_size) {
2333 				pkt->cmd.dseg_count = LE_16(1);
2334 				pkt->cmd.byte_count = LE_32(pld_size);
2335 				pkt->cmd.dseg_0_address = (uint32_t)
2336 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2337 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2338 			}
2339 		}
2340 		/* Go issue command and wait for completion. */
2341 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2342 		QL_DUMP_9(pkt, 8, pkt_size);
2343 
2344 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2345 
2346 		if (pld_size) {
2347 			/* Sync in coming DMA buffer. */
2348 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2349 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2350 			/* Copy in coming DMA data. */
2351 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2352 			    (uint8_t *)dma_mem->bp, pld_size,
2353 			    DDI_DEV_AUTOINCR);
2354 		}
2355 
2356 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2357 			pkt->sts24.entry_status = (uint8_t)
2358 			    (pkt->sts24.entry_status & 0x3c);
2359 		} else {
2360 			pkt->sts.entry_status = (uint8_t)
2361 			    (pkt->sts.entry_status & 0x7e);
2362 		}
2363 
2364 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2365 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2366 			    pkt->sts.entry_status, tq->d_id.b24);
2367 			status = QL_FUNCTION_PARAMETER_ERROR;
2368 		}
2369 
2370 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
2371 		    LE_16(pkt->sts24.comp_status) :
2372 		    LE_16(pkt->sts.comp_status));
2373 
2374 		/*
2375 		 * We have verified about all the request that can be so far.
2376 		 * Now we need to start verification of our ability to
2377 		 * actually issue the CDB.
2378 		 */
2379 		if (DRIVER_SUSPENDED(ha)) {
2380 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2381 			break;
2382 		} else if (status == QL_SUCCESS &&
2383 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2384 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2385 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2386 			if (tq->flags & TQF_FABRIC_DEVICE) {
2387 				rval = ql_login_fport(ha, tq, tq->loop_id,
2388 				    LFF_NO_PLOGI, &mr);
2389 				if (rval != QL_SUCCESS) {
2390 					EL(ha, "failed, login_fport=%xh, "
2391 					    "d_id=%xh\n", rval, tq->d_id.b24);
2392 				}
2393 			} else {
2394 				rval = ql_login_lport(ha, tq, tq->loop_id,
2395 				    LLF_NONE);
2396 				if (rval != QL_SUCCESS) {
2397 					EL(ha, "failed, login_lport=%xh, "
2398 					    "d_id=%xh\n", rval, tq->d_id.b24);
2399 				}
2400 			}
2401 		} else {
2402 			break;
2403 		}
2404 
2405 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2406 
2407 	} while (retries--);
2408 
2409 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2410 		/* Cannot issue command now, maybe later */
2411 		EL(ha, "failed, suspended\n");
2412 		kmem_free(pkt, pkt_size);
2413 		ql_free_dma_resource(ha, dma_mem);
2414 		kmem_free(dma_mem, sizeof (dma_mem_t));
2415 		cmd->Status = EXT_STATUS_SUSPENDED;
2416 		cmd->ResponseLen = 0;
2417 		return;
2418 	}
2419 
2420 	if (status != QL_SUCCESS) {
2421 		/* Command error */
2422 		EL(ha, "failed, I/O\n");
2423 		kmem_free(pkt, pkt_size);
2424 		ql_free_dma_resource(ha, dma_mem);
2425 		kmem_free(dma_mem, sizeof (dma_mem_t));
2426 		cmd->Status = EXT_STATUS_ERR;
2427 		cmd->DetailStatus = status;
2428 		cmd->ResponseLen = 0;
2429 		return;
2430 	}
2431 
2432 	/* Setup status. */
2433 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
2434 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2435 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2436 
2437 		/* Setup residuals. */
2438 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2439 
2440 		/* Setup state flags. */
2441 		sts.state_flags_l = pkt->sts24.state_flags_l;
2442 		sts.state_flags_h = pkt->sts24.state_flags_h;
2443 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2444 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2445 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2446 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2447 		} else {
2448 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2449 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2450 			    SF_GOT_STATUS);
2451 		}
2452 		if (scsi_req.direction & CF_WR) {
2453 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2454 			    SF_DATA_OUT);
2455 		} else if (scsi_req.direction & CF_RD) {
2456 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2457 			    SF_DATA_IN);
2458 		}
2459 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2460 
2461 		/* Setup FCP response info. */
2462 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2463 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2464 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2465 		for (cnt = 0; cnt < sts.rsp_info_length;
2466 		    cnt = (uint16_t)(cnt + 4)) {
2467 			ql_chg_endian(sts.rsp_info + cnt, 4);
2468 		}
2469 
2470 		/* Setup sense data. */
2471 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2472 			sts.req_sense_length =
2473 			    LE_32(pkt->sts24.fcp_sense_length);
2474 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2475 			    SF_ARQ_DONE);
2476 		} else {
2477 			sts.req_sense_length = 0;
2478 		}
2479 		sts.req_sense_data =
2480 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2481 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2482 		    (uintptr_t)sts.req_sense_data);
2483 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2484 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2485 		}
2486 	} else {
2487 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2488 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2489 
2490 		/* Setup residuals. */
2491 		sts.residual_length = LE_32(pkt->sts.residual_length);
2492 
2493 		/* Setup state flags. */
2494 		sts.state_flags_l = pkt->sts.state_flags_l;
2495 		sts.state_flags_h = pkt->sts.state_flags_h;
2496 
2497 		/* Setup FCP response info. */
2498 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2499 		    LE_16(pkt->sts.rsp_info_length) : 0;
2500 		sts.rsp_info = &pkt->sts.rsp_info[0];
2501 
2502 		/* Setup sense data. */
2503 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2504 		    LE_16(pkt->sts.req_sense_length) : 0;
2505 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2506 	}
2507 
2508 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2509 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2510 
2511 	switch (sts.comp_status) {
2512 	case CS_INCOMPLETE:
2513 	case CS_ABORTED:
2514 	case CS_DEVICE_UNAVAILABLE:
2515 	case CS_PORT_UNAVAILABLE:
2516 	case CS_PORT_LOGGED_OUT:
2517 	case CS_PORT_CONFIG_CHG:
2518 	case CS_PORT_BUSY:
2519 	case CS_LOOP_DOWN_ABORT:
2520 		cmd->Status = EXT_STATUS_BUSY;
2521 		break;
2522 	case CS_RESET:
2523 	case CS_QUEUE_FULL:
2524 		cmd->Status = EXT_STATUS_ERR;
2525 		break;
2526 	case CS_TIMEOUT:
2527 		cmd->Status = EXT_STATUS_ERR;
2528 		break;
2529 	case CS_DATA_OVERRUN:
2530 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2531 		break;
2532 	case CS_DATA_UNDERRUN:
2533 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2534 		break;
2535 	}
2536 
2537 	/*
2538 	 * If non data transfer commands fix tranfer counts.
2539 	 */
2540 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2541 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2542 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2543 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2544 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2545 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2546 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2547 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2548 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2549 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2550 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2551 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2552 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2553 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2554 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2555 		/*
2556 		 * Non data transfer command, clear sts_entry residual
2557 		 * length.
2558 		 */
2559 		sts.residual_length = 0;
2560 		cmd->ResponseLen = 0;
2561 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2562 			sts.comp_status = CS_COMPLETE;
2563 			cmd->Status = EXT_STATUS_OK;
2564 		}
2565 	} else {
2566 		cmd->ResponseLen = pld_size;
2567 	}
2568 
2569 	/* Correct ISP completion status */
2570 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2571 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2572 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2573 		    ha->instance);
2574 		scsi_req.resid = 0;
2575 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2576 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2577 		    ha->instance);
2578 		scsi_req.resid = sts.residual_length;
2579 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2580 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2581 
2582 			cmd->ResponseLen = (uint32_t)
2583 			    (pld_size - scsi_req.resid);
2584 		} else {
2585 			EL(ha, "failed, Transfer ERROR\n");
2586 			cmd->Status = EXT_STATUS_ERR;
2587 			cmd->ResponseLen = 0;
2588 		}
2589 	} else {
2590 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2591 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2592 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2593 		    sts.scsi_status_l);
2594 
2595 		scsi_req.resid = pld_size;
2596 		/*
2597 		 * Handle residual count on SCSI check
2598 		 * condition.
2599 		 *
2600 		 * - If Residual Under / Over is set, use the
2601 		 *   Residual Transfer Length field in IOCB.
2602 		 * - If Residual Under / Over is not set, and
2603 		 *   Transferred Data bit is set in State Flags
2604 		 *   field of IOCB, report residual value of 0
2605 		 *   (you may want to do this for tape
2606 		 *   Write-type commands only). This takes care
2607 		 *   of logical end of tape problem and does
2608 		 *   not break Unit Attention.
2609 		 * - If Residual Under / Over is not set, and
2610 		 *   Transferred Data bit is not set in State
2611 		 *   Flags, report residual value equal to
2612 		 *   original data transfer length.
2613 		 */
2614 		if (sts.scsi_status_l & STATUS_CHECK) {
2615 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2616 			cmd->DetailStatus = sts.scsi_status_l;
2617 			if (sts.scsi_status_h &
2618 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2619 				scsi_req.resid = sts.residual_length;
2620 			} else if (sts.state_flags_h &
2621 			    STATE_XFERRED_DATA) {
2622 				scsi_req.resid = 0;
2623 			}
2624 		}
2625 	}
2626 
2627 	if (sts.scsi_status_l & STATUS_CHECK &&
2628 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2629 	    sts.req_sense_length) {
2630 		/*
2631 		 * Check condition with vaild sense data flag set and sense
2632 		 * length != 0
2633 		 */
2634 		if (sts.req_sense_length > scsi_req.sense_length) {
2635 			sense_sz = scsi_req.sense_length;
2636 		} else {
2637 			sense_sz = sts.req_sense_length;
2638 		}
2639 
2640 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2641 		    tq->d_id.b24);
2642 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2643 
2644 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2645 		    (size_t)sense_sz, mode) != 0) {
2646 			EL(ha, "failed, request sense ddi_copyout\n");
2647 		}
2648 
2649 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2650 		cmd->DetailStatus = sts.scsi_status_l;
2651 	}
2652 
2653 	/* Copy response payload from DMA buffer to application. */
2654 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2655 	    cmd->ResponseLen != 0) {
2656 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2657 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2658 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2659 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2660 
2661 		/* Send response payload. */
2662 		if (ql_send_buffer_data(pld,
2663 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2664 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2665 			EL(ha, "failed, send_buffer_data\n");
2666 			cmd->Status = EXT_STATUS_COPY_ERR;
2667 			cmd->ResponseLen = 0;
2668 		}
2669 	}
2670 
2671 	if (cmd->Status != EXT_STATUS_OK) {
2672 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2673 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2674 	} else {
2675 		/*EMPTY*/
2676 		QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2677 		    ha->instance, cmd->ResponseLen);
2678 	}
2679 
2680 	kmem_free(pkt, pkt_size);
2681 	ql_free_dma_resource(ha, dma_mem);
2682 	kmem_free(dma_mem, sizeof (dma_mem_t));
2683 }
2684 
2685 /*
2686  * ql_wwpn_to_scsiaddr
2687  *
2688  * Input:
2689  *	ha:	adapter state pointer.
2690  *	cmd:	EXT_IOCTL cmd struct pointer.
2691  *	mode:	flags.
2692  *
2693  * Context:
2694  *	Kernel context.
2695  */
2696 static void
2697 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2698 {
2699 	int		status;
2700 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2701 	EXT_SCSI_ADDR	*tmp_addr;
2702 	ql_tgt_t	*tq;
2703 
2704 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2705 
2706 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2707 		/* Return error */
2708 		EL(ha, "incorrect RequestLen\n");
2709 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2710 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2711 		return;
2712 	}
2713 
2714 	status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2715 	    cmd->RequestLen, mode);
2716 
2717 	if (status != 0) {
2718 		cmd->Status = EXT_STATUS_COPY_ERR;
2719 		EL(ha, "failed, ddi_copyin\n");
2720 		return;
2721 	}
2722 
2723 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2724 
2725 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2726 		/* no matching device */
2727 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2728 		EL(ha, "failed, device not found\n");
2729 		return;
2730 	}
2731 
2732 	/* Copy out the IDs found.  For now we can only return target ID. */
2733 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2734 
2735 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2736 
2737 	if (status != 0) {
2738 		cmd->Status = EXT_STATUS_COPY_ERR;
2739 		EL(ha, "failed, ddi_copyout\n");
2740 	} else {
2741 		cmd->Status = EXT_STATUS_OK;
2742 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2743 	}
2744 }
2745 
2746 /*
2747  * ql_host_idx
2748  *	Gets host order index.
2749  *
2750  * Input:
2751  *	ha:	adapter state pointer.
2752  *	cmd:	EXT_IOCTL cmd struct pointer.
2753  *	mode:	flags.
2754  *
2755  * Returns:
2756  *	None, request status indicated in cmd->Status.
2757  *
2758  * Context:
2759  *	Kernel context.
2760  */
2761 static void
2762 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2763 {
2764 	uint16_t	idx;
2765 
2766 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2767 
2768 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2769 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2770 		cmd->DetailStatus = sizeof (uint16_t);
2771 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2772 		cmd->ResponseLen = 0;
2773 		return;
2774 	}
2775 
2776 	idx = (uint16_t)ha->instance;
2777 
2778 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2779 	    sizeof (uint16_t), mode) != 0) {
2780 		cmd->Status = EXT_STATUS_COPY_ERR;
2781 		cmd->ResponseLen = 0;
2782 		EL(ha, "failed, ddi_copyout\n");
2783 	} else {
2784 		cmd->ResponseLen = sizeof (uint16_t);
2785 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2786 	}
2787 }
2788 
2789 /*
2790  * ql_host_drvname
2791  *	Gets host driver name
2792  *
2793  * Input:
2794  *	ha:	adapter state pointer.
2795  *	cmd:	EXT_IOCTL cmd struct pointer.
2796  *	mode:	flags.
2797  *
2798  * Returns:
2799  *	None, request status indicated in cmd->Status.
2800  *
2801  * Context:
2802  *	Kernel context.
2803  */
2804 static void
2805 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2806 {
2807 
2808 	char		drvname[] = QL_NAME;
2809 	uint32_t	qlnamelen;
2810 
2811 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2812 
2813 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2814 
2815 	if (cmd->ResponseLen < qlnamelen) {
2816 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2817 		cmd->DetailStatus = qlnamelen;
2818 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2819 		    cmd->ResponseLen, qlnamelen);
2820 		cmd->ResponseLen = 0;
2821 		return;
2822 	}
2823 
2824 	if (ddi_copyout((void *)&drvname,
2825 	    (void *)(uintptr_t)(cmd->ResponseAdr),
2826 	    qlnamelen, mode) != 0) {
2827 		cmd->Status = EXT_STATUS_COPY_ERR;
2828 		cmd->ResponseLen = 0;
2829 		EL(ha, "failed, ddi_copyout\n");
2830 	} else {
2831 		cmd->ResponseLen = qlnamelen-1;
2832 	}
2833 
2834 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2835 }
2836 
2837 /*
2838  * ql_read_nvram
2839  *	Get NVRAM contents.
2840  *
2841  * Input:
2842  *	ha:	adapter state pointer.
2843  *	cmd:	EXT_IOCTL cmd struct pointer.
2844  *	mode:	flags.
2845  *
2846  * Returns:
2847  *	None, request status indicated in cmd->Status.
2848  *
2849  * Context:
2850  *	Kernel context.
2851  */
2852 static void
2853 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2854 {
2855 
2856 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2857 
2858 	if (cmd->ResponseLen < ha->nvram_cache->size) {
2859 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2860 		cmd->DetailStatus = ha->nvram_cache->size;
2861 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2862 		    cmd->ResponseLen);
2863 		cmd->ResponseLen = 0;
2864 		return;
2865 	}
2866 
2867 	/* Get NVRAM data. */
2868 	if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2869 	    mode) != 0) {
2870 		cmd->Status = EXT_STATUS_COPY_ERR;
2871 		cmd->ResponseLen = 0;
2872 		EL(ha, "failed, copy error\n");
2873 	} else {
2874 		cmd->ResponseLen = ha->nvram_cache->size;
2875 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2876 	}
2877 }
2878 
2879 /*
2880  * ql_write_nvram
2881  *	Loads NVRAM contents.
2882  *
2883  * Input:
2884  *	ha:	adapter state pointer.
2885  *	cmd:	EXT_IOCTL cmd struct pointer.
2886  *	mode:	flags.
2887  *
2888  * Returns:
2889  *	None, request status indicated in cmd->Status.
2890  *
2891  * Context:
2892  *	Kernel context.
2893  */
2894 static void
2895 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2896 {
2897 
2898 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2899 
2900 	if (cmd->RequestLen < ha->nvram_cache->size) {
2901 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2902 		cmd->DetailStatus = ha->nvram_cache->size;
2903 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2904 		    cmd->RequestLen);
2905 		return;
2906 	}
2907 
2908 	/* Load NVRAM data. */
2909 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2910 	    mode) != 0) {
2911 		cmd->Status = EXT_STATUS_COPY_ERR;
2912 		EL(ha, "failed, copy error\n");
2913 	} else {
2914 		/*EMPTY*/
2915 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2916 	}
2917 }
2918 
2919 /*
2920  * ql_write_vpd
2921  *	Loads VPD contents.
2922  *
2923  * Input:
2924  *	ha:	adapter state pointer.
2925  *	cmd:	EXT_IOCTL cmd struct pointer.
2926  *	mode:	flags.
2927  *
2928  * Returns:
2929  *	None, request status indicated in cmd->Status.
2930  *
2931  * Context:
2932  *	Kernel context.
2933  */
2934 static void
2935 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2936 {
2937 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2938 
2939 	int32_t		rval = 0;
2940 
2941 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2942 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2943 		EL(ha, "failed, invalid request for HBA\n");
2944 		return;
2945 	}
2946 
2947 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2948 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2949 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2950 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2951 		    cmd->RequestLen);
2952 		return;
2953 	}
2954 
2955 	/* Load VPD data. */
2956 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2957 	    mode)) != 0) {
2958 		cmd->Status = EXT_STATUS_COPY_ERR;
2959 		cmd->DetailStatus = rval;
2960 		EL(ha, "failed, errno=%x\n", rval);
2961 	} else {
2962 		/*EMPTY*/
2963 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2964 	}
2965 }
2966 
2967 /*
2968  * ql_read_vpd
2969  *	Dumps VPD contents.
2970  *
2971  * Input:
2972  *	ha:	adapter state pointer.
2973  *	cmd:	EXT_IOCTL cmd struct pointer.
2974  *	mode:	flags.
2975  *
2976  * Returns:
2977  *	None, request status indicated in cmd->Status.
2978  *
2979  * Context:
2980  *	Kernel context.
2981  */
2982 static void
2983 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2984 {
2985 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2986 
2987 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2988 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2989 		EL(ha, "failed, invalid request for HBA\n");
2990 		return;
2991 	}
2992 
2993 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2994 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2995 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2996 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2997 		    cmd->ResponseLen);
2998 		return;
2999 	}
3000 
3001 	/* Dump VPD data. */
3002 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
3003 	    mode)) != 0) {
3004 		cmd->Status = EXT_STATUS_COPY_ERR;
3005 		EL(ha, "failed,\n");
3006 	} else {
3007 		/*EMPTY*/
3008 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3009 	}
3010 }
3011 
3012 /*
3013  * ql_get_fcache
3014  *	Dumps flash cache contents.
3015  *
3016  * Input:
3017  *	ha:	adapter state pointer.
3018  *	cmd:	EXT_IOCTL cmd struct pointer.
3019  *	mode:	flags.
3020  *
3021  * Returns:
3022  *	None, request status indicated in cmd->Status.
3023  *
3024  * Context:
3025  *	Kernel context.
3026  */
3027 static void
3028 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3029 {
3030 	uint32_t	bsize, boff, types, cpsize, hsize;
3031 	ql_fcache_t	*fptr;
3032 
3033 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3034 
3035 	CACHE_LOCK(ha);
3036 
3037 	if (ha->fcache == NULL) {
3038 		CACHE_UNLOCK(ha);
3039 		cmd->Status = EXT_STATUS_ERR;
3040 		EL(ha, "failed, adapter fcache not setup\n");
3041 		return;
3042 	}
3043 
3044 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
3045 		bsize = 100;
3046 	} else {
3047 		bsize = 400;
3048 	}
3049 
3050 	if (cmd->ResponseLen < bsize) {
3051 		CACHE_UNLOCK(ha);
3052 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3053 		cmd->DetailStatus = bsize;
3054 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3055 		    bsize, cmd->ResponseLen);
3056 		return;
3057 	}
3058 
3059 	boff = 0;
3060 	bsize = 0;
3061 	fptr = ha->fcache;
3062 
3063 	/*
3064 	 * For backwards compatibility, get one of each image type
3065 	 */
3066 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3067 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3068 		/* Get the next image */
3069 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3070 
3071 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3072 
3073 			if (ddi_copyout(fptr->buf,
3074 			    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3075 			    cpsize, mode) != 0) {
3076 				CACHE_UNLOCK(ha);
3077 				EL(ha, "ddicopy failed, done\n");
3078 				cmd->Status = EXT_STATUS_COPY_ERR;
3079 				cmd->DetailStatus = 0;
3080 				return;
3081 			}
3082 			boff += 100;
3083 			bsize += cpsize;
3084 			types &= ~(fptr->type);
3085 		}
3086 	}
3087 
3088 	/*
3089 	 * Get the firmware image -- it needs to be last in the
3090 	 * buffer at offset 300 for backwards compatibility. Also for
3091 	 * backwards compatibility, the pci header is stripped off.
3092 	 */
3093 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3094 
3095 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3096 		if (hsize > fptr->buflen) {
3097 			CACHE_UNLOCK(ha);
3098 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3099 			    hsize, fptr->buflen);
3100 			cmd->Status = EXT_STATUS_COPY_ERR;
3101 			cmd->DetailStatus = 0;
3102 			return;
3103 		}
3104 
3105 		cpsize = ((fptr->buflen - hsize) < 100 ?
3106 		    fptr->buflen - hsize : 100);
3107 
3108 		if (ddi_copyout(fptr->buf+hsize,
3109 		    (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3110 		    cpsize, mode) != 0) {
3111 			CACHE_UNLOCK(ha);
3112 			EL(ha, "fw ddicopy failed, done\n");
3113 			cmd->Status = EXT_STATUS_COPY_ERR;
3114 			cmd->DetailStatus = 0;
3115 			return;
3116 		}
3117 		bsize += 100;
3118 	}
3119 
3120 	CACHE_UNLOCK(ha);
3121 	cmd->Status = EXT_STATUS_OK;
3122 	cmd->DetailStatus = bsize;
3123 
3124 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3125 }
3126 
3127 /*
3128  * ql_get_fcache_ex
3129  *	Dumps flash cache contents.
3130  *
3131  * Input:
3132  *	ha:	adapter state pointer.
3133  *	cmd:	EXT_IOCTL cmd struct pointer.
3134  *	mode:	flags.
3135  *
3136  * Returns:
3137  *	None, request status indicated in cmd->Status.
3138  *
3139  * Context:
3140  *	Kernel context.
3141  */
3142 static void
3143 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3144 {
3145 	uint32_t	bsize = 0;
3146 	uint32_t	boff = 0;
3147 	ql_fcache_t	*fptr;
3148 
3149 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3150 
3151 	CACHE_LOCK(ha);
3152 	if (ha->fcache == NULL) {
3153 		CACHE_UNLOCK(ha);
3154 		cmd->Status = EXT_STATUS_ERR;
3155 		EL(ha, "failed, adapter fcache not setup\n");
3156 		return;
3157 	}
3158 
3159 	/* Make sure user passed enough buffer space */
3160 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3161 		bsize += FBUFSIZE;
3162 	}
3163 
3164 	if (cmd->ResponseLen < bsize) {
3165 		CACHE_UNLOCK(ha);
3166 		if (cmd->ResponseLen != 0) {
3167 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3168 			    bsize, cmd->ResponseLen);
3169 		}
3170 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3171 		cmd->DetailStatus = bsize;
3172 		return;
3173 	}
3174 
3175 	boff = 0;
3176 	fptr = ha->fcache;
3177 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3178 		/* Get the next image */
3179 		if (ddi_copyout(fptr->buf,
3180 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3181 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3182 		    mode) != 0) {
3183 			CACHE_UNLOCK(ha);
3184