1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_xioctl.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
61 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
62     boolean_t (*)(EXT_IOCTL *));
63 static boolean_t ql_validate_signature(EXT_IOCTL *);
64 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
65 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
66 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
67 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
90 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int);
91 
92 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
93 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
94 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
95 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
96     uint8_t);
97 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
98 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
99 static int ql_24xx_flash_desc(ql_adapter_state_t *);
100 static int ql_setup_flash(ql_adapter_state_t *);
101 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
102 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
103 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
104     uint32_t, int);
105 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
106     uint8_t);
107 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
108 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
109 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
110 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
113 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
115 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
116 static uint32_t ql_setup_led(ql_adapter_state_t *);
117 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
118 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
119 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
120 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
121 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
122 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
123 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
124 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
125 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
126 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
127 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
128 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
129 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
130 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
131 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
132 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
134 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
135 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
138 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
139 static void ql_restart_hba(ql_adapter_state_t *);
140 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
141 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
143 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
144 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
145 static void ql_update_flash_caches(ql_adapter_state_t *);
146 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
147 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
148 
149 /* ******************************************************************** */
150 /*			External IOCTL support.				*/
151 /* ******************************************************************** */
152 
153 /*
154  * ql_alloc_xioctl_resource
155  *	Allocates resources needed by module code.
156  *
157  * Input:
158  *	ha:		adapter state pointer.
159  *
160  * Returns:
161  *	SYS_ERRNO
162  *
163  * Context:
164  *	Kernel context.
165  */
166 int
167 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
168 {
169 	ql_xioctl_t	*xp;
170 
171 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
172 
173 	if (ha->xioctl != NULL) {
174 		QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
175 		    ha->instance);
176 		return (0);
177 	}
178 
179 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
180 	if (xp == NULL) {
181 		EL(ha, "failed, kmem_zalloc\n");
182 		return (ENOMEM);
183 	}
184 	ha->xioctl = xp;
185 
186 	/* Allocate AEN tracking buffer */
187 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
188 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
189 	if (xp->aen_tracking_queue == NULL) {
190 		EL(ha, "failed, kmem_zalloc-2\n");
191 		ql_free_xioctl_resource(ha);
192 		return (ENOMEM);
193 	}
194 
195 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
196 
197 	return (0);
198 }
199 
200 /*
201  * ql_free_xioctl_resource
202  *	Frees resources used by module code.
203  *
204  * Input:
205  *	ha:		adapter state pointer.
206  *
207  * Context:
208  *	Kernel context.
209  */
210 void
211 ql_free_xioctl_resource(ql_adapter_state_t *ha)
212 {
213 	ql_xioctl_t	*xp = ha->xioctl;
214 
215 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
216 
217 	if (xp == NULL) {
218 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
219 		return;
220 	}
221 
222 	if (xp->aen_tracking_queue != NULL) {
223 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
224 		    sizeof (EXT_ASYNC_EVENT));
225 		xp->aen_tracking_queue = NULL;
226 	}
227 
228 	kmem_free(xp, sizeof (ql_xioctl_t));
229 	ha->xioctl = NULL;
230 
231 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
232 }
233 
234 /*
235  * ql_xioctl
236  *	External IOCTL processing.
237  *
238  * Input:
239  *	ha:	adapter state pointer.
240  *	cmd:	function to perform
241  *	arg:	data type varies with request
242  *	mode:	flags
243  *	cred_p:	credentials pointer
244  *	rval_p:	pointer to result value
245  *
246  * Returns:
247  *	0:		success
248  *	ENXIO:		No such device or address
249  *	ENOPROTOOPT:	Protocol not available
250  *
251  * Context:
252  *	Kernel context.
253  */
254 /* ARGSUSED */
255 int
256 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
257     cred_t *cred_p, int *rval_p)
258 {
259 	int	rval;
260 
261 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
262 
263 	if (ha->xioctl == NULL) {
264 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
265 		return (ENXIO);
266 	}
267 
268 	switch (cmd) {
269 	case EXT_CC_QUERY:
270 	case EXT_CC_SEND_FCCT_PASSTHRU:
271 	case EXT_CC_REG_AEN:
272 	case EXT_CC_GET_AEN:
273 	case EXT_CC_SEND_SCSI_PASSTHRU:
274 	case EXT_CC_WWPN_TO_SCSIADDR:
275 	case EXT_CC_SEND_ELS_RNID:
276 	case EXT_CC_SET_DATA:
277 	case EXT_CC_GET_DATA:
278 	case EXT_CC_HOST_IDX:
279 	case EXT_CC_READ_NVRAM:
280 	case EXT_CC_UPDATE_NVRAM:
281 	case EXT_CC_READ_OPTION_ROM:
282 	case EXT_CC_READ_OPTION_ROM_EX:
283 	case EXT_CC_UPDATE_OPTION_ROM:
284 	case EXT_CC_UPDATE_OPTION_ROM_EX:
285 	case EXT_CC_GET_VPD:
286 	case EXT_CC_SET_VPD:
287 	case EXT_CC_LOOPBACK:
288 	case EXT_CC_GET_FCACHE:
289 	case EXT_CC_GET_FCACHE_EX:
290 	case EXT_CC_HOST_DRVNAME:
291 	case EXT_CC_GET_SFP_DATA:
292 	case EXT_CC_PORT_PARAM:
293 	case EXT_CC_GET_PCI_DATA:
294 	case EXT_CC_GET_FWEXTTRACE:
295 	case EXT_CC_GET_FWFCETRACE:
296 	case EXT_CC_GET_VP_CNT_ID:
297 	case EXT_CC_VPORT_CMD:
298 	case EXT_CC_ACCESS_FLASH:
299 	case EXT_CC_RESET_FW:
300 	case EXT_CC_MENLO_MANAGE_INFO:
301 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
302 		break;
303 	default:
304 		/* function not supported. */
305 		EL(ha, "function=%d not supported\n", cmd);
306 		rval = ENOPROTOOPT;
307 	}
308 
309 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
310 
311 	return (rval);
312 }
313 
314 /*
315  * ql_sdm_ioctl
316  *	Provides ioctl functions for SAN/Device Management functions
317  *	AKA External Ioctl functions.
318  *
319  * Input:
320  *	ha:		adapter state pointer.
321  *	ioctl_code:	ioctl function to perform
322  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
323  *	mode:		flags
324  *
325  * Returns:
326  *	0:	success
327  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
328  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
329  *		copyout of EXT_IOCTL status info failed.
330  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
331  *	EBUSY:	Device busy
332  *
333  * Context:
334  *	Kernel context.
335  */
336 static int
337 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
338 {
339 	EXT_IOCTL		*cmd;
340 	int			rval;
341 	ql_adapter_state_t	*vha;
342 
343 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
344 
345 	/* Copy argument structure (EXT_IOCTL) from application land. */
346 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
347 	    ql_validate_signature)) != 0) {
348 		/*
349 		 * a non-zero value at this time means a problem getting
350 		 * the requested information from application land, just
351 		 * return the error code and hope for the best.
352 		 */
353 		EL(ha, "failed, sdm_setup\n");
354 		return (rval);
355 	}
356 
357 	/*
358 	 * Map the physical ha ptr (which the ioctl is called with)
359 	 * to the virtual ha that the caller is addressing.
360 	 */
361 	if (ha->flags & VP_ENABLED) {
362 		/*
363 		 * Special case: HbaSelect == 0 is physical ha
364 		 */
365 		if (cmd->HbaSelect != 0) {
366 			vha = ha->vp_next;
367 			while (vha != NULL) {
368 				if (vha->vp_index == cmd->HbaSelect) {
369 					ha = vha;
370 					break;
371 				}
372 				vha = vha->vp_next;
373 			}
374 
375 			/*
376 			 * If we can't find the specified vp index then
377 			 * we probably have an error (vp indexes shifting
378 			 * under our feet?).
379 			 */
380 			if (vha == NULL) {
381 				EL(ha, "Invalid HbaSelect vp index: %xh\n",
382 				    cmd->HbaSelect);
383 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
384 				cmd->ResponseLen = 0;
385 				return (EFAULT);
386 			}
387 		}
388 	}
389 
390 	/*
391 	 * If driver is suspended, stalled, or powered down rtn BUSY
392 	 */
393 	if (ha->flags & ADAPTER_SUSPENDED ||
394 	    ha->task_daemon_flags & DRIVER_STALL ||
395 	    ha->power_level != PM_LEVEL_D0) {
396 		EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
397 		    "driver suspended" :
398 		    (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
399 		    "FCA powered down"));
400 		cmd->Status = EXT_STATUS_BUSY;
401 		cmd->ResponseLen = 0;
402 		rval = EBUSY;
403 
404 		/* Return results to caller */
405 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
406 			EL(ha, "failed, sdm_return\n");
407 			rval = EFAULT;
408 		}
409 		return (rval);
410 	}
411 
412 	switch (ioctl_code) {
413 	case EXT_CC_QUERY_OS:
414 		ql_query(ha, cmd, mode);
415 		break;
416 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
417 		ql_fcct(ha, cmd, mode);
418 		break;
419 	case EXT_CC_REG_AEN_OS:
420 		ql_aen_reg(ha, cmd, mode);
421 		break;
422 	case EXT_CC_GET_AEN_OS:
423 		ql_aen_get(ha, cmd, mode);
424 		break;
425 	case EXT_CC_GET_DATA_OS:
426 		ql_get_host_data(ha, cmd, mode);
427 		break;
428 	case EXT_CC_SET_DATA_OS:
429 		ql_set_host_data(ha, cmd, mode);
430 		break;
431 	case EXT_CC_SEND_ELS_RNID_OS:
432 		ql_send_els_rnid(ha, cmd, mode);
433 		break;
434 	case EXT_CC_SCSI_PASSTHRU_OS:
435 		ql_scsi_passthru(ha, cmd, mode);
436 		break;
437 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
438 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
439 		break;
440 	case EXT_CC_HOST_IDX_OS:
441 		ql_host_idx(ha, cmd, mode);
442 		break;
443 	case EXT_CC_HOST_DRVNAME_OS:
444 		ql_host_drvname(ha, cmd, mode);
445 		break;
446 	case EXT_CC_READ_NVRAM_OS:
447 		ql_read_nvram(ha, cmd, mode);
448 		break;
449 	case EXT_CC_UPDATE_NVRAM_OS:
450 		ql_write_nvram(ha, cmd, mode);
451 		break;
452 	case EXT_CC_READ_OPTION_ROM_OS:
453 	case EXT_CC_READ_OPTION_ROM_EX_OS:
454 		ql_read_flash(ha, cmd, mode);
455 		break;
456 	case EXT_CC_UPDATE_OPTION_ROM_OS:
457 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
458 		ql_write_flash(ha, cmd, mode);
459 		break;
460 	case EXT_CC_LOOPBACK_OS:
461 		ql_diagnostic_loopback(ha, cmd, mode);
462 		break;
463 	case EXT_CC_GET_VPD_OS:
464 		ql_read_vpd(ha, cmd, mode);
465 		break;
466 	case EXT_CC_SET_VPD_OS:
467 		ql_write_vpd(ha, cmd, mode);
468 		break;
469 	case EXT_CC_GET_FCACHE_OS:
470 		ql_get_fcache(ha, cmd, mode);
471 		break;
472 	case EXT_CC_GET_FCACHE_EX_OS:
473 		ql_get_fcache_ex(ha, cmd, mode);
474 		break;
475 	case EXT_CC_GET_SFP_DATA_OS:
476 		ql_get_sfp(ha, cmd, mode);
477 		break;
478 	case EXT_CC_PORT_PARAM_OS:
479 		ql_port_param(ha, cmd, mode);
480 		break;
481 	case EXT_CC_GET_PCI_DATA_OS:
482 		ql_get_pci_data(ha, cmd, mode);
483 		break;
484 	case EXT_CC_GET_FWEXTTRACE_OS:
485 		ql_get_fwexttrace(ha, cmd, mode);
486 		break;
487 	case EXT_CC_GET_FWFCETRACE_OS:
488 		ql_get_fwfcetrace(ha, cmd, mode);
489 		break;
490 	case EXT_CC_MENLO_RESET:
491 		ql_menlo_reset(ha, cmd, mode);
492 		break;
493 	case EXT_CC_MENLO_GET_FW_VERSION:
494 		ql_menlo_get_fw_version(ha, cmd, mode);
495 		break;
496 	case EXT_CC_MENLO_UPDATE_FW:
497 		ql_menlo_update_fw(ha, cmd, mode);
498 		break;
499 	case EXT_CC_MENLO_MANAGE_INFO:
500 		ql_menlo_manage_info(ha, cmd, mode);
501 		break;
502 	case EXT_CC_GET_VP_CNT_ID_OS:
503 		ql_get_vp_cnt_id(ha, cmd, mode);
504 		break;
505 	case EXT_CC_VPORT_CMD_OS:
506 		ql_vp_ioctl(ha, cmd, mode);
507 		break;
508 	case EXT_CC_ACCESS_FLASH_OS:
509 		ql_access_flash(ha, cmd, mode);
510 		break;
511 	case EXT_CC_RESET_FW_OS:
512 		ql_reset_cmd(ha, cmd);
513 		break;
514 	default:
515 		/* function not supported. */
516 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
517 
518 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
519 		cmd->ResponseLen = 0;
520 		break;
521 	}
522 
523 	/* Return results to caller */
524 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
525 		EL(ha, "failed, sdm_return\n");
526 		return (EFAULT);
527 	}
528 
529 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
530 
531 	return (0);
532 }
533 
534 /*
535  * ql_sdm_setup
536  *	Make a local copy of the EXT_IOCTL struct and validate it.
537  *
538  * Input:
539  *	ha:		adapter state pointer.
540  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
541  *	arg:		Address of application EXT_IOCTL cmd data
542  *	mode:		flags
543  *	val_sig:	Pointer to a function to validate the ioctl signature.
544  *
545  * Returns:
546  *	0:		success
547  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
548  *	EINVAL:		Invalid version, signature.
549  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
550  *
551  * Context:
552  *	Kernel context.
553  */
554 static int
555 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
556     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
557 {
558 	int		rval;
559 	EXT_IOCTL	*cmd;
560 
561 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
562 
563 	/* Allocate local memory for EXT_IOCTL. */
564 	*cmd_struct = NULL;
565 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
566 	if (cmd == NULL) {
567 		EL(ha, "failed, kmem_zalloc\n");
568 		return (ENOMEM);
569 	}
570 	/* Get argument structure. */
571 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
572 	if (rval != 0) {
573 		EL(ha, "failed, ddi_copyin\n");
574 		rval = EFAULT;
575 	} else {
576 		/*
577 		 * Check signature and the version.
578 		 * If either are not valid then neither is the
579 		 * structure so don't attempt to return any error status
580 		 * because we can't trust what caller's arg points to.
581 		 * Just return the errno.
582 		 */
583 		if (val_sig(cmd) == 0) {
584 			EL(ha, "failed, signature\n");
585 			rval = EINVAL;
586 		} else if (cmd->Version > EXT_VERSION) {
587 			EL(ha, "failed, version\n");
588 			rval = EINVAL;
589 		}
590 	}
591 
592 	if (rval == 0) {
593 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
594 		*cmd_struct = cmd;
595 		cmd->Status = EXT_STATUS_OK;
596 		cmd->DetailStatus = 0;
597 	} else {
598 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
599 	}
600 
601 	return (rval);
602 }
603 
604 /*
605  * ql_validate_signature
606  *	Validate the signature string for an external ioctl call.
607  *
608  * Input:
609  *	sg:	Pointer to EXT_IOCTL signature to validate.
610  *
611  * Returns:
612  *	B_TRUE:		Signature is valid.
613  *	B_FALSE:	Signature is NOT valid.
614  *
615  * Context:
616  *	Kernel context.
617  */
618 static boolean_t
619 ql_validate_signature(EXT_IOCTL *cmd_struct)
620 {
621 	/*
622 	 * Check signature.
623 	 *
624 	 * If signature is not valid then neither is the rest of
625 	 * the structure (e.g., can't trust it), so don't attempt
626 	 * to return any error status other than the errno.
627 	 */
628 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
629 		QL_PRINT_2(CE_CONT, "failed,\n");
630 		return (B_FALSE);
631 	}
632 
633 	return (B_TRUE);
634 }
635 
636 /*
637  * ql_sdm_return
638  *	Copies return data/status to application land for
639  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
640  *
641  * Input:
642  *	ha:		adapter state pointer.
643  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
644  *	ioctl_code:	ioctl function to perform
645  *	arg:		EXT_IOCTL cmd data in application land.
646  *	mode:		flags
647  *
648  * Returns:
649  *	0:	success
650  *	EFAULT:	Copy out error.
651  *
652  * Context:
653  *	Kernel context.
654  */
655 /* ARGSUSED */
656 static int
657 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
658 {
659 	int	rval = 0;
660 
661 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
662 
663 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
664 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
665 	    mode);
666 
667 	rval |= ddi_copyout((void *)&cmd->Status,
668 	    (void *)&(((EXT_IOCTL*)arg)->Status),
669 	    sizeof (cmd->Status), mode);
670 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
671 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
672 	    sizeof (cmd->DetailStatus), mode);
673 
674 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
675 
676 	if (rval != 0) {
677 		/* Some copyout operation failed */
678 		EL(ha, "failed, ddi_copyout\n");
679 		return (EFAULT);
680 	}
681 
682 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
683 
684 	return (0);
685 }
686 
687 /*
688  * ql_query
689  *	Performs all EXT_CC_QUERY functions.
690  *
691  * Input:
692  *	ha:	adapter state pointer.
693  *	cmd:	Local EXT_IOCTL cmd struct pointer.
694  *	mode:	flags.
695  *
696  * Returns:
697  *	None, request status indicated in cmd->Status.
698  *
699  * Context:
700  *	Kernel context.
701  */
702 static void
703 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
704 {
705 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
706 	    cmd->SubCode);
707 
708 	/* case off on command subcode */
709 	switch (cmd->SubCode) {
710 	case EXT_SC_QUERY_HBA_NODE:
711 		ql_qry_hba_node(ha, cmd, mode);
712 		break;
713 	case EXT_SC_QUERY_HBA_PORT:
714 		ql_qry_hba_port(ha, cmd, mode);
715 		break;
716 	case EXT_SC_QUERY_DISC_PORT:
717 		ql_qry_disc_port(ha, cmd, mode);
718 		break;
719 	case EXT_SC_QUERY_DISC_TGT:
720 		ql_qry_disc_tgt(ha, cmd, mode);
721 		break;
722 	case EXT_SC_QUERY_DRIVER:
723 		ql_qry_driver(ha, cmd, mode);
724 		break;
725 	case EXT_SC_QUERY_FW:
726 		ql_qry_fw(ha, cmd, mode);
727 		break;
728 	case EXT_SC_QUERY_CHIP:
729 		ql_qry_chip(ha, cmd, mode);
730 		break;
731 	case EXT_SC_QUERY_CNA_PORT:
732 		ql_qry_cna_port(ha, cmd, mode);
733 		break;
734 	case EXT_SC_QUERY_DISC_LUN:
735 	default:
736 		/* function not supported. */
737 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
738 		EL(ha, "failed, Unsupported Subcode=%xh\n",
739 		    cmd->SubCode);
740 		break;
741 	}
742 
743 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
744 }
745 
746 /*
747  * ql_qry_hba_node
748  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
749  *
750  * Input:
751  *	ha:	adapter state pointer.
752  *	cmd:	EXT_IOCTL cmd struct pointer.
753  *	mode:	flags.
754  *
755  * Returns:
756  *	None, request status indicated in cmd->Status.
757  *
758  * Context:
759  *	Kernel context.
760  */
761 static void
762 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
763 {
764 	EXT_HBA_NODE	tmp_node = {0};
765 	uint_t		len;
766 	caddr_t		bufp;
767 	ql_mbx_data_t	mr;
768 
769 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
770 
771 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
772 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
773 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
774 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
775 		    "Len=%xh\n", cmd->ResponseLen);
776 		cmd->ResponseLen = 0;
777 		return;
778 	}
779 
780 	/* fill in the values */
781 
782 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
783 	    EXT_DEF_WWN_NAME_SIZE);
784 
785 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
786 
787 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
788 
789 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
790 
791 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
792 
793 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
794 		size_t		verlen;
795 		uint16_t	w;
796 		char		*tmpptr;
797 
798 		verlen = strlen((char *)(tmp_node.DriverVersion));
799 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
800 			EL(ha, "failed, No room for fpga version string\n");
801 		} else {
802 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
803 			    (uint16_t *)
804 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
805 
806 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
807 			if (tmpptr == NULL) {
808 				EL(ha, "Unable to insert fpga version str\n");
809 			} else {
810 				(void) sprintf(tmpptr, "%d.%d",
811 				    ((w & 0xf0) >> 4), (w & 0x0f));
812 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
813 			}
814 		}
815 	}
816 	(void) ql_get_fw_version(ha, &mr);
817 
818 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
819 	    mr.mb[1], mr.mb[2], mr.mb[3]);
820 
821 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
822 		switch (mr.mb[6]) {
823 		case FWATTRIB_EF:
824 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
825 			break;
826 		case FWATTRIB_TP:
827 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
828 			break;
829 		case FWATTRIB_IP:
830 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
831 			break;
832 		case FWATTRIB_IPX:
833 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
834 			break;
835 		case FWATTRIB_FL:
836 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
837 			break;
838 		case FWATTRIB_FPX:
839 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
840 			break;
841 		default:
842 			break;
843 		}
844 	}
845 
846 	/* FCode version. */
847 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
848 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
849 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
850 	    (int *)&len) == DDI_PROP_SUCCESS) {
851 		if (len < EXT_DEF_MAX_STR_SIZE) {
852 			bcopy(bufp, tmp_node.OptRomVersion, len);
853 		} else {
854 			bcopy(bufp, tmp_node.OptRomVersion,
855 			    EXT_DEF_MAX_STR_SIZE - 1);
856 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
857 			    '\0';
858 		}
859 		kmem_free(bufp, len);
860 	} else {
861 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
862 	}
863 	tmp_node.PortCount = 1;
864 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
865 
866 	if (ddi_copyout((void *)&tmp_node,
867 	    (void *)(uintptr_t)(cmd->ResponseAdr),
868 	    sizeof (EXT_HBA_NODE), mode) != 0) {
869 		cmd->Status = EXT_STATUS_COPY_ERR;
870 		cmd->ResponseLen = 0;
871 		EL(ha, "failed, ddi_copyout\n");
872 	} else {
873 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
874 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
875 	}
876 }
877 
878 /*
879  * ql_qry_hba_port
880  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
881  *
882  * Input:
883  *	ha:	adapter state pointer.
884  *	cmd:	EXT_IOCTL cmd struct pointer.
885  *	mode:	flags.
886  *
887  * Returns:
888  *	None, request status indicated in cmd->Status.
889  *
890  * Context:
891  *	Kernel context.
892  */
893 static void
894 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
895 {
896 	ql_link_t	*link;
897 	ql_tgt_t	*tq;
898 	ql_mbx_data_t	mr;
899 	EXT_HBA_PORT	tmp_port = {0};
900 	int		rval;
901 	uint16_t	port_cnt, tgt_cnt, index;
902 
903 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
904 
905 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
906 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
907 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
908 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
909 		    cmd->ResponseLen);
910 		cmd->ResponseLen = 0;
911 		return;
912 	}
913 
914 	/* fill in the values */
915 
916 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
917 	    EXT_DEF_WWN_NAME_SIZE);
918 	tmp_port.Id[0] = 0;
919 	tmp_port.Id[1] = ha->d_id.b.domain;
920 	tmp_port.Id[2] = ha->d_id.b.area;
921 	tmp_port.Id[3] = ha->d_id.b.al_pa;
922 
923 	/* For now we are initiator only driver */
924 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
925 
926 	if (ha->task_daemon_flags & LOOP_DOWN) {
927 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
928 	} else if (DRIVER_SUSPENDED(ha)) {
929 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
930 	} else {
931 		tmp_port.State = EXT_DEF_HBA_OK;
932 	}
933 
934 	if (ha->flags & POINT_TO_POINT) {
935 		tmp_port.Mode = EXT_DEF_P2P_MODE;
936 	} else {
937 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
938 	}
939 	/*
940 	 * fill in the portspeed values.
941 	 *
942 	 * default to not yet negotiated state
943 	 */
944 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
945 
946 	if (tmp_port.State == EXT_DEF_HBA_OK) {
947 		if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
948 			mr.mb[1] = 0;
949 			mr.mb[2] = 0;
950 			rval = ql_data_rate(ha, &mr);
951 			if (rval != QL_SUCCESS) {
952 				EL(ha, "failed, data_rate=%xh\n", rval);
953 			} else {
954 				switch (mr.mb[1]) {
955 				case IIDMA_RATE_1GB:
956 					tmp_port.PortSpeed =
957 					    EXT_DEF_PORTSPEED_1GBIT;
958 					break;
959 				case IIDMA_RATE_2GB:
960 					tmp_port.PortSpeed =
961 					    EXT_DEF_PORTSPEED_2GBIT;
962 					break;
963 				case IIDMA_RATE_4GB:
964 					tmp_port.PortSpeed =
965 					    EXT_DEF_PORTSPEED_4GBIT;
966 					break;
967 				case IIDMA_RATE_8GB:
968 					tmp_port.PortSpeed =
969 					    EXT_DEF_PORTSPEED_8GBIT;
970 					break;
971 				case IIDMA_RATE_10GB:
972 					tmp_port.PortSpeed =
973 					    EXT_DEF_PORTSPEED_10GBIT;
974 					break;
975 				default:
976 					tmp_port.PortSpeed =
977 					    EXT_DEF_PORTSPEED_UNKNOWN;
978 					EL(ha, "failed, data rate=%xh\n",
979 					    mr.mb[1]);
980 					break;
981 				}
982 			}
983 		} else {
984 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
985 		}
986 	}
987 
988 	/* Report all supported port speeds */
989 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
990 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
991 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
992 		    EXT_DEF_PORTSPEED_1GBIT);
993 		/*
994 		 * Correct supported speeds based on type of
995 		 * sfp that is present
996 		 */
997 		switch (ha->sfp_stat) {
998 		case 1:
999 			/* no sfp detected */
1000 			break;
1001 		case 2:
1002 		case 4:
1003 			/* 4GB sfp */
1004 			tmp_port.PortSupportedSpeed &=
1005 			    ~EXT_DEF_PORTSPEED_8GBIT;
1006 			break;
1007 		case 3:
1008 		case 5:
1009 			/* 8GB sfp */
1010 			tmp_port.PortSupportedSpeed &=
1011 			    ~EXT_DEF_PORTSPEED_1GBIT;
1012 			break;
1013 		default:
1014 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1015 			break;
1016 
1017 		}
1018 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
1019 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1020 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1021 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1022 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1023 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
1024 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1025 		    EXT_DEF_PORTSPEED_1GBIT);
1026 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
1027 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1028 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
1029 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1030 	} else {
1031 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1032 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1033 	}
1034 	tmp_port.LinkState2 = LSB(ha->sfp_stat);
1035 	port_cnt = 0;
1036 	tgt_cnt = 0;
1037 
1038 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1039 		for (link = ha->dev[index].first; link != NULL;
1040 		    link = link->next) {
1041 			tq = link->base_address;
1042 
1043 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1044 				continue;
1045 			}
1046 
1047 			port_cnt++;
1048 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1049 				tgt_cnt++;
1050 			}
1051 		}
1052 	}
1053 
1054 	tmp_port.DiscPortCount = port_cnt;
1055 	tmp_port.DiscTargetCount = tgt_cnt;
1056 
1057 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1058 
1059 	rval = ddi_copyout((void *)&tmp_port,
1060 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1061 	    sizeof (EXT_HBA_PORT), mode);
1062 	if (rval != 0) {
1063 		cmd->Status = EXT_STATUS_COPY_ERR;
1064 		cmd->ResponseLen = 0;
1065 		EL(ha, "failed, ddi_copyout\n");
1066 	} else {
1067 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1068 		QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1069 		    ha->instance, port_cnt, tgt_cnt);
1070 	}
1071 }
1072 
1073 /*
1074  * ql_qry_disc_port
1075  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1076  *
1077  * Input:
1078  *	ha:	adapter state pointer.
1079  *	cmd:	EXT_IOCTL cmd struct pointer.
1080  *	mode:	flags.
1081  *
1082  *	cmd->Instance = Port instance in fcport chain.
1083  *
1084  * Returns:
1085  *	None, request status indicated in cmd->Status.
1086  *
1087  * Context:
1088  *	Kernel context.
1089  */
1090 static void
1091 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1092 {
1093 	EXT_DISC_PORT	tmp_port = {0};
1094 	ql_link_t	*link;
1095 	ql_tgt_t	*tq;
1096 	uint16_t	index;
1097 	uint16_t	inst = 0;
1098 
1099 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1100 
1101 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1102 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1103 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1104 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1105 		    cmd->ResponseLen);
1106 		cmd->ResponseLen = 0;
1107 		return;
1108 	}
1109 
1110 	for (link = NULL, index = 0;
1111 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1112 		for (link = ha->dev[index].first; link != NULL;
1113 		    link = link->next) {
1114 			tq = link->base_address;
1115 
1116 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1117 				continue;
1118 			}
1119 			if (inst != cmd->Instance) {
1120 				inst++;
1121 				continue;
1122 			}
1123 
1124 			/* fill in the values */
1125 			bcopy(tq->node_name, tmp_port.WWNN,
1126 			    EXT_DEF_WWN_NAME_SIZE);
1127 			bcopy(tq->port_name, tmp_port.WWPN,
1128 			    EXT_DEF_WWN_NAME_SIZE);
1129 
1130 			break;
1131 		}
1132 	}
1133 
1134 	if (link == NULL) {
1135 		/* no matching device */
1136 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1137 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1138 		cmd->ResponseLen = 0;
1139 		return;
1140 	}
1141 
1142 	tmp_port.Id[0] = 0;
1143 	tmp_port.Id[1] = tq->d_id.b.domain;
1144 	tmp_port.Id[2] = tq->d_id.b.area;
1145 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1146 
1147 	tmp_port.Type = 0;
1148 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1149 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1150 		    EXT_DEF_INITIATOR_DEV);
1151 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1152 		(void) ql_inq_scan(ha, tq, 1);
1153 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1154 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1155 	}
1156 
1157 	if (tq->flags & TQF_FABRIC_DEVICE) {
1158 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1159 	} else {
1160 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1161 	}
1162 
1163 	tmp_port.Status = 0;
1164 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1165 
1166 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1167 
1168 	if (ddi_copyout((void *)&tmp_port,
1169 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1170 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1171 		cmd->Status = EXT_STATUS_COPY_ERR;
1172 		cmd->ResponseLen = 0;
1173 		EL(ha, "failed, ddi_copyout\n");
1174 	} else {
1175 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1176 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1177 	}
1178 }
1179 
1180 /*
1181  * ql_qry_disc_tgt
1182  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1183  *
1184  * Input:
1185  *	ha:		adapter state pointer.
1186  *	cmd:		EXT_IOCTL cmd struct pointer.
1187  *	mode:		flags.
1188  *
1189  *	cmd->Instance = Port instance in fcport chain.
1190  *
1191  * Returns:
1192  *	None, request status indicated in cmd->Status.
1193  *
1194  * Context:
1195  *	Kernel context.
1196  */
1197 static void
1198 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1199 {
1200 	EXT_DISC_TARGET	tmp_tgt = {0};
1201 	ql_link_t	*link;
1202 	ql_tgt_t	*tq;
1203 	uint16_t	index;
1204 	uint16_t	inst = 0;
1205 
1206 	QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1207 	    cmd->Instance);
1208 
1209 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1210 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1211 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1212 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1213 		    cmd->ResponseLen);
1214 		cmd->ResponseLen = 0;
1215 		return;
1216 	}
1217 
1218 	/* Scan port list for requested target and fill in the values */
1219 	for (link = NULL, index = 0;
1220 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1221 		for (link = ha->dev[index].first; link != NULL;
1222 		    link = link->next) {
1223 			tq = link->base_address;
1224 
1225 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1226 			    tq->flags & TQF_INITIATOR_DEVICE) {
1227 				continue;
1228 			}
1229 			if (inst != cmd->Instance) {
1230 				inst++;
1231 				continue;
1232 			}
1233 
1234 			/* fill in the values */
1235 			bcopy(tq->node_name, tmp_tgt.WWNN,
1236 			    EXT_DEF_WWN_NAME_SIZE);
1237 			bcopy(tq->port_name, tmp_tgt.WWPN,
1238 			    EXT_DEF_WWN_NAME_SIZE);
1239 
1240 			break;
1241 		}
1242 	}
1243 
1244 	if (link == NULL) {
1245 		/* no matching device */
1246 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1247 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1248 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1249 		cmd->ResponseLen = 0;
1250 		return;
1251 	}
1252 	tmp_tgt.Id[0] = 0;
1253 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1254 	tmp_tgt.Id[2] = tq->d_id.b.area;
1255 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1256 
1257 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1258 
1259 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1260 		(void) ql_inq_scan(ha, tq, 1);
1261 	}
1262 
1263 	tmp_tgt.Type = 0;
1264 	if (tq->flags & TQF_TAPE_DEVICE) {
1265 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1266 	}
1267 
1268 	if (tq->flags & TQF_FABRIC_DEVICE) {
1269 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1270 	} else {
1271 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1272 	}
1273 
1274 	tmp_tgt.Status = 0;
1275 
1276 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1277 
1278 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1279 
1280 	if (ddi_copyout((void *)&tmp_tgt,
1281 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1282 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1283 		cmd->Status = EXT_STATUS_COPY_ERR;
1284 		cmd->ResponseLen = 0;
1285 		EL(ha, "failed, ddi_copyout\n");
1286 	} else {
1287 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1288 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1289 	}
1290 }
1291 
1292 /*
1293  * ql_qry_fw
1294  *	Performs EXT_SC_QUERY_FW subfunction.
1295  *
1296  * Input:
1297  *	ha:	adapter state pointer.
1298  *	cmd:	EXT_IOCTL cmd struct pointer.
1299  *	mode:	flags.
1300  *
1301  * Returns:
1302  *	None, request status indicated in cmd->Status.
1303  *
1304  * Context:
1305  *	Kernel context.
1306  */
1307 static void
1308 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1309 {
1310 	ql_mbx_data_t	mr;
1311 	EXT_FW		fw_info = {0};
1312 
1313 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1314 
1315 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1316 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1317 		cmd->DetailStatus = sizeof (EXT_FW);
1318 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1319 		    cmd->ResponseLen);
1320 		cmd->ResponseLen = 0;
1321 		return;
1322 	}
1323 
1324 	(void) ql_get_fw_version(ha, &mr);
1325 
1326 	(void) sprintf((char *)(fw_info.Version), "%d.%d.%d", mr.mb[1],
1327 	    mr.mb[2], mr.mb[2]);
1328 
1329 	fw_info.Attrib = mr.mb[6];
1330 
1331 	if (ddi_copyout((void *)&fw_info,
1332 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1333 	    sizeof (EXT_FW), mode) != 0) {
1334 		cmd->Status = EXT_STATUS_COPY_ERR;
1335 		cmd->ResponseLen = 0;
1336 		EL(ha, "failed, ddi_copyout\n");
1337 		return;
1338 	} else {
1339 		cmd->ResponseLen = sizeof (EXT_FW);
1340 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1341 	}
1342 }
1343 
1344 /*
1345  * ql_qry_chip
1346  *	Performs EXT_SC_QUERY_CHIP subfunction.
1347  *
1348  * Input:
1349  *	ha:	adapter state pointer.
1350  *	cmd:	EXT_IOCTL cmd struct pointer.
1351  *	mode:	flags.
1352  *
1353  * Returns:
1354  *	None, request status indicated in cmd->Status.
1355  *
1356  * Context:
1357  *	Kernel context.
1358  */
1359 static void
1360 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1361 {
1362 	EXT_CHIP	chip = {0};
1363 
1364 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1365 
1366 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1367 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1368 		cmd->DetailStatus = sizeof (EXT_CHIP);
1369 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1370 		    cmd->ResponseLen);
1371 		cmd->ResponseLen = 0;
1372 		return;
1373 	}
1374 
1375 	chip.VendorId = ha->ven_id;
1376 	chip.DeviceId = ha->device_id;
1377 	chip.SubVendorId = ha->subven_id;
1378 	chip.SubSystemId = ha->subsys_id;
1379 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1380 	chip.IoAddrLen = 0x100;
1381 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1382 	chip.MemAddrLen = 0x100;
1383 	chip.ChipRevID = ha->rev_id;
1384 	if (ha->flags & FUNCTION_1) {
1385 		chip.FuncNo = 1;
1386 	}
1387 
1388 	if (ddi_copyout((void *)&chip,
1389 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1390 	    sizeof (EXT_CHIP), mode) != 0) {
1391 		cmd->Status = EXT_STATUS_COPY_ERR;
1392 		cmd->ResponseLen = 0;
1393 		EL(ha, "failed, ddi_copyout\n");
1394 	} else {
1395 		cmd->ResponseLen = sizeof (EXT_CHIP);
1396 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1397 	}
1398 }
1399 
1400 /*
1401  * ql_qry_driver
1402  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1403  *
1404  * Input:
1405  *	ha:	adapter state pointer.
1406  *	cmd:	EXT_IOCTL cmd struct pointer.
1407  *	mode:	flags.
1408  *
1409  * Returns:
1410  *	None, request status indicated in cmd->Status.
1411  *
1412  * Context:
1413  *	Kernel context.
1414  */
1415 static void
1416 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1417 {
1418 	EXT_DRIVER	qd = {0};
1419 
1420 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1421 
1422 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1423 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1424 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1425 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1426 		    cmd->ResponseLen);
1427 		cmd->ResponseLen = 0;
1428 		return;
1429 	}
1430 
1431 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1432 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1433 	qd.TargetsPerBus = (uint16_t)
1434 	    (CFG_IST(ha, (CFG_CTRL_242581 | CFG_EXT_FW_INTERFACE)) ?
1435 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1436 	qd.LunsPerTarget = 2030;
1437 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1438 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1439 
1440 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1441 	    sizeof (EXT_DRIVER), mode) != 0) {
1442 		cmd->Status = EXT_STATUS_COPY_ERR;
1443 		cmd->ResponseLen = 0;
1444 		EL(ha, "failed, ddi_copyout\n");
1445 	} else {
1446 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1447 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1448 	}
1449 }
1450 
1451 /*
1452  * ql_fcct
1453  *	IOCTL management server FC-CT passthrough.
1454  *
1455  * Input:
1456  *	ha:	adapter state pointer.
1457  *	cmd:	User space CT arguments pointer.
1458  *	mode:	flags.
1459  *
1460  * Returns:
1461  *	None, request status indicated in cmd->Status.
1462  *
1463  * Context:
1464  *	Kernel context.
1465  */
1466 static void
1467 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1468 {
1469 	ql_mbx_iocb_t		*pkt;
1470 	ql_mbx_data_t		mr;
1471 	dma_mem_t		*dma_mem;
1472 	caddr_t			pld;
1473 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1474 	int			rval;
1475 	ql_ct_iu_preamble_t	*ct;
1476 	ql_xioctl_t		*xp = ha->xioctl;
1477 	ql_tgt_t		tq;
1478 	uint16_t		comp_status, loop_id;
1479 
1480 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1481 
1482 	/* Get CT argument structure. */
1483 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1484 		EL(ha, "failed, No switch\n");
1485 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1486 		cmd->ResponseLen = 0;
1487 		return;
1488 	}
1489 
1490 	if (DRIVER_SUSPENDED(ha)) {
1491 		EL(ha, "failed, LOOP_NOT_READY\n");
1492 		cmd->Status = EXT_STATUS_BUSY;
1493 		cmd->ResponseLen = 0;
1494 		return;
1495 	}
1496 
1497 	/* Login management server device. */
1498 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1499 		tq.d_id.b.al_pa = 0xfa;
1500 		tq.d_id.b.area = 0xff;
1501 		tq.d_id.b.domain = 0xff;
1502 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1503 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1504 		    MANAGEMENT_SERVER_LOOP_ID);
1505 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1506 		if (rval != QL_SUCCESS) {
1507 			EL(ha, "failed, server login\n");
1508 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1509 			cmd->ResponseLen = 0;
1510 			return;
1511 		} else {
1512 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1513 		}
1514 	}
1515 
1516 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1517 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1518 
1519 	/* Allocate a DMA Memory Descriptor */
1520 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1521 	if (dma_mem == NULL) {
1522 		EL(ha, "failed, kmem_zalloc\n");
1523 		cmd->Status = EXT_STATUS_NO_MEMORY;
1524 		cmd->ResponseLen = 0;
1525 		return;
1526 	}
1527 	/* Determine maximum buffer size. */
1528 	if (cmd->RequestLen < cmd->ResponseLen) {
1529 		pld_byte_cnt = cmd->ResponseLen;
1530 	} else {
1531 		pld_byte_cnt = cmd->RequestLen;
1532 	}
1533 
1534 	/* Allocate command block. */
1535 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1536 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1537 	if (pkt == NULL) {
1538 		EL(ha, "failed, kmem_zalloc\n");
1539 		cmd->Status = EXT_STATUS_NO_MEMORY;
1540 		cmd->ResponseLen = 0;
1541 		return;
1542 	}
1543 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1544 
1545 	/* Get command payload data. */
1546 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1547 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1548 		EL(ha, "failed, get_buffer_data\n");
1549 		kmem_free(pkt, pkt_size);
1550 		cmd->Status = EXT_STATUS_COPY_ERR;
1551 		cmd->ResponseLen = 0;
1552 		return;
1553 	}
1554 
1555 	/* Get DMA memory for the IOCB */
1556 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1557 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1558 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1559 		    "alloc failed", QL_NAME, ha->instance);
1560 		kmem_free(pkt, pkt_size);
1561 		kmem_free(dma_mem, sizeof (dma_mem_t));
1562 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1563 		cmd->ResponseLen = 0;
1564 		return;
1565 	}
1566 
1567 	/* Copy out going payload data to IOCB DMA buffer. */
1568 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1569 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1570 
1571 	/* Sync IOCB DMA buffer. */
1572 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1573 	    DDI_DMA_SYNC_FORDEV);
1574 
1575 	/*
1576 	 * Setup IOCB
1577 	 */
1578 	ct = (ql_ct_iu_preamble_t *)pld;
1579 	if (CFG_IST(ha, CFG_CTRL_242581)) {
1580 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1581 		pkt->ms24.entry_count = 1;
1582 
1583 		/* Set loop ID */
1584 		pkt->ms24.n_port_hdl = (uint16_t)
1585 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1586 		    LE_16(SNS_24XX_HDL) :
1587 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1588 
1589 		/* Set ISP command timeout. */
1590 		pkt->ms24.timeout = LE_16(120);
1591 
1592 		/* Set cmd/response data segment counts. */
1593 		pkt->ms24.cmd_dseg_count = LE_16(1);
1594 		pkt->ms24.resp_dseg_count = LE_16(1);
1595 
1596 		/* Load ct cmd byte count. */
1597 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1598 
1599 		/* Load ct rsp byte count. */
1600 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1601 
1602 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1603 
1604 		/* Load MS command entry data segments. */
1605 		*long_ptr++ = (uint32_t)
1606 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1607 		*long_ptr++ = (uint32_t)
1608 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1609 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1610 
1611 		/* Load MS response entry data segments. */
1612 		*long_ptr++ = (uint32_t)
1613 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1614 		*long_ptr++ = (uint32_t)
1615 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1616 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1617 
1618 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1619 		    sizeof (ql_mbx_iocb_t));
1620 
1621 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1622 		if (comp_status == CS_DATA_UNDERRUN) {
1623 			if ((BE_16(ct->max_residual_size)) == 0) {
1624 				comp_status = CS_COMPLETE;
1625 			}
1626 		}
1627 
1628 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1629 		    0) {
1630 			EL(ha, "failed, I/O timeout or "
1631 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1632 			    pkt->sts24.entry_status,
1633 			    pkt->sts24.scsi_status_l, rval);
1634 			kmem_free(pkt, pkt_size);
1635 			ql_free_dma_resource(ha, dma_mem);
1636 			kmem_free(dma_mem, sizeof (dma_mem_t));
1637 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1638 			cmd->ResponseLen = 0;
1639 			return;
1640 		}
1641 	} else {
1642 		pkt->ms.entry_type = MS_TYPE;
1643 		pkt->ms.entry_count = 1;
1644 
1645 		/* Set loop ID */
1646 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1647 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1648 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1649 			pkt->ms.loop_id_l = LSB(loop_id);
1650 			pkt->ms.loop_id_h = MSB(loop_id);
1651 		} else {
1652 			pkt->ms.loop_id_h = LSB(loop_id);
1653 		}
1654 
1655 		/* Set ISP command timeout. */
1656 		pkt->ms.timeout = LE_16(120);
1657 
1658 		/* Set data segment counts. */
1659 		pkt->ms.cmd_dseg_count_l = 1;
1660 		pkt->ms.total_dseg_count = LE_16(2);
1661 
1662 		/* Response total byte count. */
1663 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1664 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1665 
1666 		/* Command total byte count. */
1667 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1668 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1669 
1670 		/* Load command/response data segments. */
1671 		pkt->ms.dseg_0_address[0] = (uint32_t)
1672 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1673 		pkt->ms.dseg_0_address[1] = (uint32_t)
1674 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1675 		pkt->ms.dseg_1_address[0] = (uint32_t)
1676 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1677 		pkt->ms.dseg_1_address[1] = (uint32_t)
1678 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1679 
1680 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1681 		    sizeof (ql_mbx_iocb_t));
1682 
1683 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1684 		if (comp_status == CS_DATA_UNDERRUN) {
1685 			if ((BE_16(ct->max_residual_size)) == 0) {
1686 				comp_status = CS_COMPLETE;
1687 			}
1688 		}
1689 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1690 			EL(ha, "failed, I/O timeout or "
1691 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1692 			kmem_free(pkt, pkt_size);
1693 			ql_free_dma_resource(ha, dma_mem);
1694 			kmem_free(dma_mem, sizeof (dma_mem_t));
1695 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1696 			cmd->ResponseLen = 0;
1697 			return;
1698 		}
1699 	}
1700 
1701 	/* Sync in coming DMA buffer. */
1702 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1703 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1704 	/* Copy in coming DMA data. */
1705 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1706 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1707 	    DDI_DEV_AUTOINCR);
1708 
1709 	/* Copy response payload from DMA buffer to application. */
1710 	if (cmd->ResponseLen != 0) {
1711 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1712 		    cmd->ResponseLen);
1713 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1714 
1715 		/* Send response payload. */
1716 		if (ql_send_buffer_data(pld,
1717 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1718 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1719 			EL(ha, "failed, send_buffer_data\n");
1720 			cmd->Status = EXT_STATUS_COPY_ERR;
1721 			cmd->ResponseLen = 0;
1722 		}
1723 	}
1724 
1725 	kmem_free(pkt, pkt_size);
1726 	ql_free_dma_resource(ha, dma_mem);
1727 	kmem_free(dma_mem, sizeof (dma_mem_t));
1728 
1729 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1730 }
1731 
1732 /*
1733  * ql_aen_reg
1734  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1735  *
1736  * Input:
1737  *	ha:	adapter state pointer.
1738  *	cmd:	EXT_IOCTL cmd struct pointer.
1739  *	mode:	flags.
1740  *
1741  * Returns:
1742  *	None, request status indicated in cmd->Status.
1743  *
1744  * Context:
1745  *	Kernel context.
1746  */
1747 static void
1748 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1749 {
1750 	EXT_REG_AEN	reg_struct;
1751 	int		rval = 0;
1752 	ql_xioctl_t	*xp = ha->xioctl;
1753 
1754 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1755 
1756 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &reg_struct,
1757 	    cmd->RequestLen, mode);
1758 
1759 	if (rval == 0) {
1760 		if (reg_struct.Enable) {
1761 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1762 		} else {
1763 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1764 			/* Empty the queue. */
1765 			INTR_LOCK(ha);
1766 			xp->aen_q_head = 0;
1767 			xp->aen_q_tail = 0;
1768 			INTR_UNLOCK(ha);
1769 		}
1770 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1771 	} else {
1772 		cmd->Status = EXT_STATUS_COPY_ERR;
1773 		EL(ha, "failed, ddi_copyin\n");
1774 	}
1775 }
1776 
1777 /*
1778  * ql_aen_get
1779  *	IOCTL management server Asynchronous Event Record Transfer.
1780  *
1781  * Input:
1782  *	ha:	adapter state pointer.
1783  *	cmd:	EXT_IOCTL cmd struct pointer.
1784  *	mode:	flags.
1785  *
1786  * Returns:
1787  *	None, request status indicated in cmd->Status.
1788  *
1789  * Context:
1790  *	Kernel context.
1791  */
1792 static void
1793 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1794 {
1795 	uint32_t	out_size;
1796 	EXT_ASYNC_EVENT	*tmp_q;
1797 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1798 	uint8_t		i;
1799 	uint8_t		queue_cnt;
1800 	uint8_t		request_cnt;
1801 	ql_xioctl_t	*xp = ha->xioctl;
1802 
1803 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1804 
1805 	/* Compute the number of events that can be returned */
1806 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1807 
1808 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1809 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1810 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1811 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1812 		    "Len=%xh\n", request_cnt);
1813 		cmd->ResponseLen = 0;
1814 		return;
1815 	}
1816 
1817 	/* 1st: Make a local copy of the entire queue content. */
1818 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1819 	queue_cnt = 0;
1820 
1821 	INTR_LOCK(ha);
1822 	i = xp->aen_q_head;
1823 
1824 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1825 		if (tmp_q[i].AsyncEventCode != 0) {
1826 			bcopy(&tmp_q[i], &aen[queue_cnt],
1827 			    sizeof (EXT_ASYNC_EVENT));
1828 			queue_cnt++;
1829 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1830 		}
1831 		if (i == xp->aen_q_tail) {
1832 			/* done. */
1833 			break;
1834 		}
1835 		i++;
1836 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1837 			i = 0;
1838 		}
1839 	}
1840 
1841 	/* Empty the queue. */
1842 	xp->aen_q_head = 0;
1843 	xp->aen_q_tail = 0;
1844 
1845 	INTR_UNLOCK(ha);
1846 
1847 	/* 2nd: Now transfer the queue content to user buffer */
1848 	/* Copy the entire queue to user's buffer. */
1849 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1850 	if (queue_cnt == 0) {
1851 		cmd->ResponseLen = 0;
1852 	} else if (ddi_copyout((void *)&aen[0],
1853 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1854 	    out_size, mode) != 0) {
1855 		cmd->Status = EXT_STATUS_COPY_ERR;
1856 		cmd->ResponseLen = 0;
1857 		EL(ha, "failed, ddi_copyout\n");
1858 	} else {
1859 		cmd->ResponseLen = out_size;
1860 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1861 	}
1862 }
1863 
1864 /*
1865  * ql_enqueue_aen
1866  *
1867  * Input:
1868  *	ha:		adapter state pointer.
1869  *	event_code:	async event code of the event to add to queue.
1870  *	payload:	event payload for the queue.
1871  *	INTR_LOCK must be already obtained.
1872  *
1873  * Context:
1874  *	Interrupt or Kernel context, no mailbox commands allowed.
1875  */
1876 void
1877 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1878 {
1879 	uint8_t			new_entry;	/* index to current entry */
1880 	uint16_t		*mbx;
1881 	EXT_ASYNC_EVENT		*aen_queue;
1882 	ql_xioctl_t		*xp = ha->xioctl;
1883 
1884 	QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1885 	    event_code);
1886 
1887 	if (xp == NULL) {
1888 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1889 		return;
1890 	}
1891 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1892 
1893 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1894 		/* Need to change queue pointers to make room. */
1895 
1896 		/* Increment tail for adding new entry. */
1897 		xp->aen_q_tail++;
1898 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1899 			xp->aen_q_tail = 0;
1900 		}
1901 		if (xp->aen_q_head == xp->aen_q_tail) {
1902 			/*
1903 			 * We're overwriting the oldest entry, so need to
1904 			 * update the head pointer.
1905 			 */
1906 			xp->aen_q_head++;
1907 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1908 				xp->aen_q_head = 0;
1909 			}
1910 		}
1911 	}
1912 
1913 	new_entry = xp->aen_q_tail;
1914 	aen_queue[new_entry].AsyncEventCode = event_code;
1915 
1916 	/* Update payload */
1917 	if (payload != NULL) {
1918 		switch (event_code) {
1919 		case MBA_LIP_OCCURRED:
1920 		case MBA_LOOP_UP:
1921 		case MBA_LOOP_DOWN:
1922 		case MBA_LIP_F8:
1923 		case MBA_LIP_RESET:
1924 		case MBA_PORT_UPDATE:
1925 			break;
1926 		case MBA_RSCN_UPDATE:
1927 			mbx = (uint16_t *)payload;
1928 			/* al_pa */
1929 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1930 			    LSB(mbx[2]);
1931 			/* area */
1932 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1933 			    MSB(mbx[2]);
1934 			/* domain */
1935 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1936 			    LSB(mbx[1]);
1937 			/* save in big endian */
1938 			BIG_ENDIAN_24(&aen_queue[new_entry].
1939 			    Payload.RSCN.RSCNInfo[0]);
1940 
1941 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1942 			    MSB(mbx[1]);
1943 
1944 			break;
1945 		default:
1946 			/* Not supported */
1947 			EL(ha, "failed, event code not supported=%xh\n",
1948 			    event_code);
1949 			aen_queue[new_entry].AsyncEventCode = 0;
1950 			break;
1951 		}
1952 	}
1953 
1954 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1955 }
1956 
1957 /*
1958  * ql_scsi_passthru
1959  *	IOCTL SCSI passthrough.
1960  *
1961  * Input:
1962  *	ha:	adapter state pointer.
1963  *	cmd:	User space SCSI command pointer.
1964  *	mode:	flags.
1965  *
1966  * Returns:
1967  *	None, request status indicated in cmd->Status.
1968  *
1969  * Context:
1970  *	Kernel context.
1971  */
1972 static void
1973 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1974 {
1975 	ql_mbx_iocb_t		*pkt;
1976 	ql_mbx_data_t		mr;
1977 	dma_mem_t		*dma_mem;
1978 	caddr_t			pld;
1979 	uint32_t		pkt_size, pld_size;
1980 	uint16_t		qlnt, retries, cnt, cnt2;
1981 	uint8_t			*name;
1982 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1983 	EXT_SCSI_PASSTHRU	*usp_req;
1984 	int			rval;
1985 	union _passthru {
1986 		EXT_SCSI_PASSTHRU	sp_cmd;
1987 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
1988 	} pt_req;		/* Passthru request */
1989 	uint32_t		status, sense_sz = 0;
1990 	ql_tgt_t		*tq = NULL;
1991 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
1992 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
1993 
1994 	/* SCSI request struct for SCSI passthrough IOs. */
1995 	struct {
1996 		uint16_t	lun;
1997 		uint16_t	sense_length;	/* Sense buffer size */
1998 		size_t		resid;		/* Residual */
1999 		uint8_t		*cdbp;		/* Requestor's CDB */
2000 		uint8_t		*u_sense;	/* Requestor's sense buffer */
2001 		uint8_t		cdb_len;	/* Requestor's CDB length */
2002 		uint8_t		direction;
2003 	} scsi_req;
2004 
2005 	struct {
2006 		uint8_t		*rsp_info;
2007 		uint8_t		*req_sense_data;
2008 		uint32_t	residual_length;
2009 		uint32_t	rsp_info_length;
2010 		uint32_t	req_sense_length;
2011 		uint16_t	comp_status;
2012 		uint8_t		state_flags_l;
2013 		uint8_t		state_flags_h;
2014 		uint8_t		scsi_status_l;
2015 		uint8_t		scsi_status_h;
2016 	} sts;
2017 
2018 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2019 
2020 	/* Verify Sub Code and set cnt to needed request size. */
2021 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2022 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
2023 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2024 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2025 	} else {
2026 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2027 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2028 		cmd->ResponseLen = 0;
2029 		return;
2030 	}
2031 
2032 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2033 	if (dma_mem == NULL) {
2034 		EL(ha, "failed, kmem_zalloc\n");
2035 		cmd->Status = EXT_STATUS_NO_MEMORY;
2036 		cmd->ResponseLen = 0;
2037 		return;
2038 	}
2039 	/*  Verify the size of and copy in the passthru request structure. */
2040 	if (cmd->RequestLen != pld_size) {
2041 		/* Return error */
2042 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2043 		    cmd->RequestLen, pld_size);
2044 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2045 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2046 		cmd->ResponseLen = 0;
2047 		return;
2048 	}
2049 
2050 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2051 	    pld_size, mode) != 0) {
2052 		EL(ha, "failed, ddi_copyin\n");
2053 		cmd->Status = EXT_STATUS_COPY_ERR;
2054 		cmd->ResponseLen = 0;
2055 		return;
2056 	}
2057 
2058 	/*
2059 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2060 	 * request data structure.
2061 	 */
2062 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2063 		scsi_req.lun = sp_req->TargetAddr.Lun;
2064 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2065 		scsi_req.cdbp = &sp_req->Cdb[0];
2066 		scsi_req.cdb_len = sp_req->CdbLength;
2067 		scsi_req.direction = sp_req->Direction;
2068 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2069 		scsi_req.u_sense = &usp_req->SenseData[0];
2070 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2071 
2072 		qlnt = QLNT_PORT;
2073 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2074 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2075 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2076 		tq = ql_find_port(ha, name, qlnt);
2077 	} else {
2078 		/*
2079 		 * Must be FC PASSTHRU, verified above.
2080 		 */
2081 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2082 			qlnt = QLNT_PORT;
2083 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2084 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2085 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2086 			    ha->instance, cmd->SubCode, name[0], name[1],
2087 			    name[2], name[3], name[4], name[5], name[6],
2088 			    name[7]);
2089 			tq = ql_find_port(ha, name, qlnt);
2090 		} else if (fc_req->FCScsiAddr.DestType ==
2091 		    EXT_DEF_DESTTYPE_WWNN) {
2092 			qlnt = QLNT_NODE;
2093 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2094 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2095 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2096 			    ha->instance, cmd->SubCode, name[0], name[1],
2097 			    name[2], name[3], name[4], name[5], name[6],
2098 			    name[7]);
2099 			tq = ql_find_port(ha, name, qlnt);
2100 		} else if (fc_req->FCScsiAddr.DestType ==
2101 		    EXT_DEF_DESTTYPE_PORTID) {
2102 			qlnt = QLNT_PID;
2103 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2104 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2105 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2106 			    name[0], name[1], name[2]);
2107 			tq = ql_find_port(ha, name, qlnt);
2108 		} else {
2109 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2110 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2111 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2112 			cmd->ResponseLen = 0;
2113 			return;
2114 		}
2115 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2116 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2117 		scsi_req.cdbp = &sp_req->Cdb[0];
2118 		scsi_req.cdb_len = sp_req->CdbLength;
2119 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2120 		scsi_req.u_sense = &ufc_req->SenseData[0];
2121 		scsi_req.direction = fc_req->Direction;
2122 	}
2123 
2124 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2125 		EL(ha, "failed, fc_port not found\n");
2126 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2127 		cmd->ResponseLen = 0;
2128 		return;
2129 	}
2130 
2131 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2132 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2133 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2134 		cmd->ResponseLen = 0;
2135 		return;
2136 	}
2137 
2138 	/* Allocate command block. */
2139 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2140 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2141 	    cmd->ResponseLen) {
2142 		pld_size = cmd->ResponseLen;
2143 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2144 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2145 		if (pkt == NULL) {
2146 			EL(ha, "failed, kmem_zalloc\n");
2147 			cmd->Status = EXT_STATUS_NO_MEMORY;
2148 			cmd->ResponseLen = 0;
2149 			return;
2150 		}
2151 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2152 
2153 		/* Get DMA memory for the IOCB */
2154 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2155 		    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2156 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2157 			    "alloc failed", QL_NAME, ha->instance);
2158 			kmem_free(pkt, pkt_size);
2159 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2160 			cmd->ResponseLen = 0;
2161 			return;
2162 		}
2163 
2164 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2165 			scsi_req.direction = (uint8_t)
2166 			    (CFG_IST(ha, CFG_CTRL_242581) ?
2167 			    CF_RD : CF_DATA_IN | CF_STAG);
2168 		} else {
2169 			scsi_req.direction = (uint8_t)
2170 			    (CFG_IST(ha, CFG_CTRL_242581) ?
2171 			    CF_WR : CF_DATA_OUT | CF_STAG);
2172 			cmd->ResponseLen = 0;
2173 
2174 			/* Get command payload. */
2175 			if (ql_get_buffer_data(
2176 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2177 			    pld, pld_size, mode) != pld_size) {
2178 				EL(ha, "failed, get_buffer_data\n");
2179 				cmd->Status = EXT_STATUS_COPY_ERR;
2180 
2181 				kmem_free(pkt, pkt_size);
2182 				ql_free_dma_resource(ha, dma_mem);
2183 				kmem_free(dma_mem, sizeof (dma_mem_t));
2184 				return;
2185 			}
2186 
2187 			/* Copy out going data to DMA buffer. */
2188 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2189 			    (uint8_t *)dma_mem->bp, pld_size,
2190 			    DDI_DEV_AUTOINCR);
2191 
2192 			/* Sync DMA buffer. */
2193 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2194 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2195 		}
2196 	} else {
2197 		scsi_req.direction = (uint8_t)
2198 		    (CFG_IST(ha, CFG_CTRL_242581) ? 0 : CF_STAG);
2199 		cmd->ResponseLen = 0;
2200 
2201 		pkt_size = sizeof (ql_mbx_iocb_t);
2202 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2203 		if (pkt == NULL) {
2204 			EL(ha, "failed, kmem_zalloc-2\n");
2205 			cmd->Status = EXT_STATUS_NO_MEMORY;
2206 			return;
2207 		}
2208 		pld = NULL;
2209 		pld_size = 0;
2210 	}
2211 
2212 	/* retries = ha->port_down_retry_count; */
2213 	retries = 1;
2214 	cmd->Status = EXT_STATUS_OK;
2215 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2216 
2217 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2218 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2219 
2220 	do {
2221 		if (DRIVER_SUSPENDED(ha)) {
2222 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2223 			break;
2224 		}
2225 
2226 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2227 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2228 			pkt->cmd24.entry_count = 1;
2229 
2230 			/* Set LUN number */
2231 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2232 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2233 
2234 			/* Set N_port handle */
2235 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2236 
2237 			/* Set VP Index */
2238 			pkt->cmd24.vp_index = ha->vp_index;
2239 
2240 			/* Set target ID */
2241 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2242 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2243 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2244 
2245 			/* Set ISP command timeout. */
2246 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2247 
2248 			/* Load SCSI CDB */
2249 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2250 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2251 			    DDI_DEV_AUTOINCR);
2252 			for (cnt = 0; cnt < MAX_CMDSZ;
2253 			    cnt = (uint16_t)(cnt + 4)) {
2254 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2255 				    + cnt, 4);
2256 			}
2257 
2258 			/* Set tag queue control flags */
2259 			pkt->cmd24.task = TA_STAG;
2260 
2261 			if (pld_size) {
2262 				/* Set transfer direction. */
2263 				pkt->cmd24.control_flags = scsi_req.direction;
2264 
2265 				/* Set data segment count. */
2266 				pkt->cmd24.dseg_count = LE_16(1);
2267 
2268 				/* Load total byte count. */
2269 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2270 
2271 				/* Load data descriptor. */
2272 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2273 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2274 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2275 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2276 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2277 			}
2278 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2279 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2280 			pkt->cmd3.entry_count = 1;
2281 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2282 				pkt->cmd3.target_l = LSB(tq->loop_id);
2283 				pkt->cmd3.target_h = MSB(tq->loop_id);
2284 			} else {
2285 				pkt->cmd3.target_h = LSB(tq->loop_id);
2286 			}
2287 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2288 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2289 			pkt->cmd3.control_flags_l = scsi_req.direction;
2290 			pkt->cmd3.timeout = LE_16(15);
2291 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2292 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2293 			}
2294 			if (pld_size) {
2295 				pkt->cmd3.dseg_count = LE_16(1);
2296 				pkt->cmd3.byte_count = LE_32(pld_size);
2297 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2298 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2299 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2300 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2301 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2302 			}
2303 		} else {
2304 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2305 			pkt->cmd.entry_count = 1;
2306 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2307 				pkt->cmd.target_l = LSB(tq->loop_id);
2308 				pkt->cmd.target_h = MSB(tq->loop_id);
2309 			} else {
2310 				pkt->cmd.target_h = LSB(tq->loop_id);
2311 			}
2312 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2313 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2314 			pkt->cmd.control_flags_l = scsi_req.direction;
2315 			pkt->cmd.timeout = LE_16(15);
2316 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2317 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2318 			}
2319 			if (pld_size) {
2320 				pkt->cmd.dseg_count = LE_16(1);
2321 				pkt->cmd.byte_count = LE_32(pld_size);
2322 				pkt->cmd.dseg_0_address = (uint32_t)
2323 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2324 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2325 			}
2326 		}
2327 		/* Go issue command and wait for completion. */
2328 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2329 		QL_DUMP_9(pkt, 8, pkt_size);
2330 
2331 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2332 
2333 		if (pld_size) {
2334 			/* Sync in coming DMA buffer. */
2335 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2336 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2337 			/* Copy in coming DMA data. */
2338 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2339 			    (uint8_t *)dma_mem->bp, pld_size,
2340 			    DDI_DEV_AUTOINCR);
2341 		}
2342 
2343 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2344 			pkt->sts24.entry_status = (uint8_t)
2345 			    (pkt->sts24.entry_status & 0x3c);
2346 		} else {
2347 			pkt->sts.entry_status = (uint8_t)
2348 			    (pkt->sts.entry_status & 0x7e);
2349 		}
2350 
2351 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2352 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2353 			    pkt->sts.entry_status, tq->d_id.b24);
2354 			status = QL_FUNCTION_PARAMETER_ERROR;
2355 		}
2356 
2357 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2358 		    LE_16(pkt->sts24.comp_status) :
2359 		    LE_16(pkt->sts.comp_status));
2360 
2361 		/*
2362 		 * We have verified about all the request that can be so far.
2363 		 * Now we need to start verification of our ability to
2364 		 * actually issue the CDB.
2365 		 */
2366 		if (DRIVER_SUSPENDED(ha)) {
2367 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2368 			break;
2369 		} else if (status == QL_SUCCESS &&
2370 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2371 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2372 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2373 			if (tq->flags & TQF_FABRIC_DEVICE) {
2374 				rval = ql_login_fport(ha, tq, tq->loop_id,
2375 				    LFF_NO_PLOGI, &mr);
2376 				if (rval != QL_SUCCESS) {
2377 					EL(ha, "failed, login_fport=%xh, "
2378 					    "d_id=%xh\n", rval, tq->d_id.b24);
2379 				}
2380 			} else {
2381 				rval = ql_login_lport(ha, tq, tq->loop_id,
2382 				    LLF_NONE);
2383 				if (rval != QL_SUCCESS) {
2384 					EL(ha, "failed, login_lport=%xh, "
2385 					    "d_id=%xh\n", rval, tq->d_id.b24);
2386 				}
2387 			}
2388 		} else {
2389 			break;
2390 		}
2391 
2392 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2393 
2394 	} while (retries--);
2395 
2396 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2397 		/* Cannot issue command now, maybe later */
2398 		EL(ha, "failed, suspended\n");
2399 		kmem_free(pkt, pkt_size);
2400 		ql_free_dma_resource(ha, dma_mem);
2401 		kmem_free(dma_mem, sizeof (dma_mem_t));
2402 		cmd->Status = EXT_STATUS_SUSPENDED;
2403 		cmd->ResponseLen = 0;
2404 		return;
2405 	}
2406 
2407 	if (status != QL_SUCCESS) {
2408 		/* Command error */
2409 		EL(ha, "failed, I/O\n");
2410 		kmem_free(pkt, pkt_size);
2411 		ql_free_dma_resource(ha, dma_mem);
2412 		kmem_free(dma_mem, sizeof (dma_mem_t));
2413 		cmd->Status = EXT_STATUS_ERR;
2414 		cmd->DetailStatus = status;
2415 		cmd->ResponseLen = 0;
2416 		return;
2417 	}
2418 
2419 	/* Setup status. */
2420 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2421 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2422 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2423 
2424 		/* Setup residuals. */
2425 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2426 
2427 		/* Setup state flags. */
2428 		sts.state_flags_l = pkt->sts24.state_flags_l;
2429 		sts.state_flags_h = pkt->sts24.state_flags_h;
2430 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2431 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2432 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2433 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2434 		} else {
2435 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2436 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2437 			    SF_GOT_STATUS);
2438 		}
2439 		if (scsi_req.direction & CF_WR) {
2440 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2441 			    SF_DATA_OUT);
2442 		} else if (scsi_req.direction & CF_RD) {
2443 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2444 			    SF_DATA_IN);
2445 		}
2446 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2447 
2448 		/* Setup FCP response info. */
2449 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2450 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2451 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2452 		for (cnt = 0; cnt < sts.rsp_info_length;
2453 		    cnt = (uint16_t)(cnt + 4)) {
2454 			ql_chg_endian(sts.rsp_info + cnt, 4);
2455 		}
2456 
2457 		/* Setup sense data. */
2458 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2459 			sts.req_sense_length =
2460 			    LE_32(pkt->sts24.fcp_sense_length);
2461 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2462 			    SF_ARQ_DONE);
2463 		} else {
2464 			sts.req_sense_length = 0;
2465 		}
2466 		sts.req_sense_data =
2467 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2468 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2469 		    (uintptr_t)sts.req_sense_data);
2470 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2471 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2472 		}
2473 	} else {
2474 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2475 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2476 
2477 		/* Setup residuals. */
2478 		sts.residual_length = LE_32(pkt->sts.residual_length);
2479 
2480 		/* Setup state flags. */
2481 		sts.state_flags_l = pkt->sts.state_flags_l;
2482 		sts.state_flags_h = pkt->sts.state_flags_h;
2483 
2484 		/* Setup FCP response info. */
2485 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2486 		    LE_16(pkt->sts.rsp_info_length) : 0;
2487 		sts.rsp_info = &pkt->sts.rsp_info[0];
2488 
2489 		/* Setup sense data. */
2490 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2491 		    LE_16(pkt->sts.req_sense_length) : 0;
2492 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2493 	}
2494 
2495 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2496 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2497 
2498 	switch (sts.comp_status) {
2499 	case CS_INCOMPLETE:
2500 	case CS_ABORTED:
2501 	case CS_DEVICE_UNAVAILABLE:
2502 	case CS_PORT_UNAVAILABLE:
2503 	case CS_PORT_LOGGED_OUT:
2504 	case CS_PORT_CONFIG_CHG:
2505 	case CS_PORT_BUSY:
2506 	case CS_LOOP_DOWN_ABORT:
2507 		cmd->Status = EXT_STATUS_BUSY;
2508 		break;
2509 	case CS_RESET:
2510 	case CS_QUEUE_FULL:
2511 		cmd->Status = EXT_STATUS_ERR;
2512 		break;
2513 	case CS_TIMEOUT:
2514 		cmd->Status = EXT_STATUS_ERR;
2515 		break;
2516 	case CS_DATA_OVERRUN:
2517 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2518 		break;
2519 	case CS_DATA_UNDERRUN:
2520 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2521 		break;
2522 	}
2523 
2524 	/*
2525 	 * If non data transfer commands fix tranfer counts.
2526 	 */
2527 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2528 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2529 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2530 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2531 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2532 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2533 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2534 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2535 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2536 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2537 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2538 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2539 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2540 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2541 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2542 		/*
2543 		 * Non data transfer command, clear sts_entry residual
2544 		 * length.
2545 		 */
2546 		sts.residual_length = 0;
2547 		cmd->ResponseLen = 0;
2548 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2549 			sts.comp_status = CS_COMPLETE;
2550 			cmd->Status = EXT_STATUS_OK;
2551 		}
2552 	} else {
2553 		cmd->ResponseLen = pld_size;
2554 	}
2555 
2556 	/* Correct ISP completion status */
2557 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2558 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2559 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2560 		    ha->instance);
2561 		scsi_req.resid = 0;
2562 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2563 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2564 		    ha->instance);
2565 		scsi_req.resid = sts.residual_length;
2566 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2567 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2568 
2569 			cmd->ResponseLen = (uint32_t)
2570 			    (pld_size - scsi_req.resid);
2571 		} else {
2572 			EL(ha, "failed, Transfer ERROR\n");
2573 			cmd->Status = EXT_STATUS_ERR;
2574 			cmd->ResponseLen = 0;
2575 		}
2576 	} else {
2577 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2578 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2579 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2580 		    sts.scsi_status_l);
2581 
2582 		scsi_req.resid = pld_size;
2583 		/*
2584 		 * Handle residual count on SCSI check
2585 		 * condition.
2586 		 *
2587 		 * - If Residual Under / Over is set, use the
2588 		 *   Residual Transfer Length field in IOCB.
2589 		 * - If Residual Under / Over is not set, and
2590 		 *   Transferred Data bit is set in State Flags
2591 		 *   field of IOCB, report residual value of 0
2592 		 *   (you may want to do this for tape
2593 		 *   Write-type commands only). This takes care
2594 		 *   of logical end of tape problem and does
2595 		 *   not break Unit Attention.
2596 		 * - If Residual Under / Over is not set, and
2597 		 *   Transferred Data bit is not set in State
2598 		 *   Flags, report residual value equal to
2599 		 *   original data transfer length.
2600 		 */
2601 		if (sts.scsi_status_l & STATUS_CHECK) {
2602 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2603 			cmd->DetailStatus = sts.scsi_status_l;
2604 			if (sts.scsi_status_h &
2605 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2606 				scsi_req.resid = sts.residual_length;
2607 			} else if (sts.state_flags_h &
2608 			    STATE_XFERRED_DATA) {
2609 				scsi_req.resid = 0;
2610 			}
2611 		}
2612 	}
2613 
2614 	if (sts.scsi_status_l & STATUS_CHECK &&
2615 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2616 	    sts.req_sense_length) {
2617 		/*
2618 		 * Check condition with vaild sense data flag set and sense
2619 		 * length != 0
2620 		 */
2621 		if (sts.req_sense_length > scsi_req.sense_length) {
2622 			sense_sz = scsi_req.sense_length;
2623 		} else {
2624 			sense_sz = sts.req_sense_length;
2625 		}
2626 
2627 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2628 		    tq->d_id.b24);
2629 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2630 
2631 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2632 		    (size_t)sense_sz, mode) != 0) {
2633 			EL(ha, "failed, request sense ddi_copyout\n");
2634 		}
2635 
2636 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2637 		cmd->DetailStatus = sts.scsi_status_l;
2638 	}
2639 
2640 	/* Copy response payload from DMA buffer to application. */
2641 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2642 	    cmd->ResponseLen != 0) {
2643 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2644 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2645 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2646 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2647 
2648 		/* Send response payload. */
2649 		if (ql_send_buffer_data(pld,
2650 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2651 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2652 			EL(ha, "failed, send_buffer_data\n");
2653 			cmd->Status = EXT_STATUS_COPY_ERR;
2654 			cmd->ResponseLen = 0;
2655 		}
2656 	}
2657 
2658 	if (cmd->Status != EXT_STATUS_OK) {
2659 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2660 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2661 	} else {
2662 		/*EMPTY*/
2663 		QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2664 		    ha->instance, cmd->ResponseLen);
2665 	}
2666 
2667 	kmem_free(pkt, pkt_size);
2668 	ql_free_dma_resource(ha, dma_mem);
2669 	kmem_free(dma_mem, sizeof (dma_mem_t));
2670 }
2671 
2672 /*
2673  * ql_wwpn_to_scsiaddr
2674  *
2675  * Input:
2676  *	ha:	adapter state pointer.
2677  *	cmd:	EXT_IOCTL cmd struct pointer.
2678  *	mode:	flags.
2679  *
2680  * Context:
2681  *	Kernel context.
2682  */
2683 static void
2684 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2685 {
2686 	int		status;
2687 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2688 	EXT_SCSI_ADDR	*tmp_addr;
2689 	ql_tgt_t	*tq;
2690 
2691 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2692 
2693 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2694 		/* Return error */
2695 		EL(ha, "incorrect RequestLen\n");
2696 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2697 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2698 		return;
2699 	}
2700 
2701 	status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2702 	    cmd->RequestLen, mode);
2703 
2704 	if (status != 0) {
2705 		cmd->Status = EXT_STATUS_COPY_ERR;
2706 		EL(ha, "failed, ddi_copyin\n");
2707 		return;
2708 	}
2709 
2710 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2711 
2712 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2713 		/* no matching device */
2714 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2715 		EL(ha, "failed, device not found\n");
2716 		return;
2717 	}
2718 
2719 	/* Copy out the IDs found.  For now we can only return target ID. */
2720 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2721 
2722 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2723 
2724 	if (status != 0) {
2725 		cmd->Status = EXT_STATUS_COPY_ERR;
2726 		EL(ha, "failed, ddi_copyout\n");
2727 	} else {
2728 		cmd->Status = EXT_STATUS_OK;
2729 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2730 	}
2731 }
2732 
2733 /*
2734  * ql_host_idx
2735  *	Gets host order index.
2736  *
2737  * Input:
2738  *	ha:	adapter state pointer.
2739  *	cmd:	EXT_IOCTL cmd struct pointer.
2740  *	mode:	flags.
2741  *
2742  * Returns:
2743  *	None, request status indicated in cmd->Status.
2744  *
2745  * Context:
2746  *	Kernel context.
2747  */
2748 static void
2749 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2750 {
2751 	uint16_t	idx;
2752 
2753 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2754 
2755 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2756 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2757 		cmd->DetailStatus = sizeof (uint16_t);
2758 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2759 		cmd->ResponseLen = 0;
2760 		return;
2761 	}
2762 
2763 	idx = (uint16_t)ha->instance;
2764 
2765 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2766 	    sizeof (uint16_t), mode) != 0) {
2767 		cmd->Status = EXT_STATUS_COPY_ERR;
2768 		cmd->ResponseLen = 0;
2769 		EL(ha, "failed, ddi_copyout\n");
2770 	} else {
2771 		cmd->ResponseLen = sizeof (uint16_t);
2772 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2773 	}
2774 }
2775 
2776 /*
2777  * ql_host_drvname
2778  *	Gets host driver name
2779  *
2780  * Input:
2781  *	ha:	adapter state pointer.
2782  *	cmd:	EXT_IOCTL cmd struct pointer.
2783  *	mode:	flags.
2784  *
2785  * Returns:
2786  *	None, request status indicated in cmd->Status.
2787  *
2788  * Context:
2789  *	Kernel context.
2790  */
2791 static void
2792 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2793 {
2794 
2795 	char		drvname[] = QL_NAME;
2796 	uint32_t	qlnamelen;
2797 
2798 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2799 
2800 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2801 
2802 	if (cmd->ResponseLen < qlnamelen) {
2803 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2804 		cmd->DetailStatus = qlnamelen;
2805 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2806 		    cmd->ResponseLen, qlnamelen);
2807 		cmd->ResponseLen = 0;
2808 		return;
2809 	}
2810 
2811 	if (ddi_copyout((void *)&drvname,
2812 	    (void *)(uintptr_t)(cmd->ResponseAdr),
2813 	    qlnamelen, mode) != 0) {
2814 		cmd->Status = EXT_STATUS_COPY_ERR;
2815 		cmd->ResponseLen = 0;
2816 		EL(ha, "failed, ddi_copyout\n");
2817 	} else {
2818 		cmd->ResponseLen = qlnamelen-1;
2819 	}
2820 
2821 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2822 }
2823 
2824 /*
2825  * ql_read_nvram
2826  *	Get NVRAM contents.
2827  *
2828  * Input:
2829  *	ha:	adapter state pointer.
2830  *	cmd:	EXT_IOCTL cmd struct pointer.
2831  *	mode:	flags.
2832  *
2833  * Returns:
2834  *	None, request status indicated in cmd->Status.
2835  *
2836  * Context:
2837  *	Kernel context.
2838  */
2839 static void
2840 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2841 {
2842 	uint32_t	nv_size;
2843 
2844 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2845 
2846 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2847 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2848 	if (cmd->ResponseLen < nv_size) {
2849 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2850 		cmd->DetailStatus = nv_size;
2851 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2852 		    cmd->ResponseLen);
2853 		cmd->ResponseLen = 0;
2854 		return;
2855 	}
2856 
2857 	/* Get NVRAM data. */
2858 	if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2859 	    mode) != 0) {
2860 		cmd->Status = EXT_STATUS_COPY_ERR;
2861 		cmd->ResponseLen = 0;
2862 		EL(ha, "failed, copy error\n");
2863 	} else {
2864 		cmd->ResponseLen = nv_size;
2865 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2866 	}
2867 }
2868 
2869 /*
2870  * ql_write_nvram
2871  *	Loads NVRAM contents.
2872  *
2873  * Input:
2874  *	ha:	adapter state pointer.
2875  *	cmd:	EXT_IOCTL cmd struct pointer.
2876  *	mode:	flags.
2877  *
2878  * Returns:
2879  *	None, request status indicated in cmd->Status.
2880  *
2881  * Context:
2882  *	Kernel context.
2883  */
2884 static void
2885 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2886 {
2887 	uint32_t	nv_size;
2888 
2889 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2890 
2891 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2892 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2893 	if (cmd->RequestLen < nv_size) {
2894 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2895 		cmd->DetailStatus = sizeof (nvram_t);
2896 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2897 		    cmd->RequestLen);
2898 		return;
2899 	}
2900 
2901 	/* Load NVRAM data. */
2902 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2903 	    mode) != 0) {
2904 		cmd->Status = EXT_STATUS_COPY_ERR;
2905 		EL(ha, "failed, copy error\n");
2906 	} else {
2907 		/*EMPTY*/
2908 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2909 	}
2910 }
2911 
2912 /*
2913  * ql_write_vpd
2914  *	Loads VPD contents.
2915  *
2916  * Input:
2917  *	ha:	adapter state pointer.
2918  *	cmd:	EXT_IOCTL cmd struct pointer.
2919  *	mode:	flags.
2920  *
2921  * Returns:
2922  *	None, request status indicated in cmd->Status.
2923  *
2924  * Context:
2925  *	Kernel context.
2926  */
2927 static void
2928 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2929 {
2930 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2931 
2932 	int32_t		rval = 0;
2933 
2934 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2935 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2936 		EL(ha, "failed, invalid request for HBA\n");
2937 		return;
2938 	}
2939 
2940 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2941 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2942 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2943 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2944 		    cmd->RequestLen);
2945 		return;
2946 	}
2947 
2948 	/* Load VPD data. */
2949 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2950 	    mode)) != 0) {
2951 		cmd->Status = EXT_STATUS_COPY_ERR;
2952 		cmd->DetailStatus = rval;
2953 		EL(ha, "failed, errno=%x\n", rval);
2954 	} else {
2955 		/*EMPTY*/
2956 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2957 	}
2958 }
2959 
2960 /*
2961  * ql_read_vpd
2962  *	Dumps VPD contents.
2963  *
2964  * Input:
2965  *	ha:	adapter state pointer.
2966  *	cmd:	EXT_IOCTL cmd struct pointer.
2967  *	mode:	flags.
2968  *
2969  * Returns:
2970  *	None, request status indicated in cmd->Status.
2971  *
2972  * Context:
2973  *	Kernel context.
2974  */
2975 static void
2976 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2977 {
2978 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2979 
2980 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2981 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2982 		EL(ha, "failed, invalid request for HBA\n");
2983 		return;
2984 	}
2985 
2986 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2987 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2988 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2989 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2990 		    cmd->ResponseLen);
2991 		return;
2992 	}
2993 
2994 	/* Dump VPD data. */
2995 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2996 	    mode)) != 0) {
2997 		cmd->Status = EXT_STATUS_COPY_ERR;
2998 		EL(ha, "failed,\n");
2999 	} else {
3000 		/*EMPTY*/
3001 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3002 	}
3003 }
3004 
3005 /*
3006  * ql_get_fcache
3007  *	Dumps flash cache contents.
3008  *
3009  * Input:
3010  *	ha:	adapter state pointer.
3011  *	cmd:	EXT_IOCTL cmd struct pointer.
3012  *	mode:	flags.
3013  *
3014  * Returns:
3015  *	None, request status indicated in cmd->Status.
3016  *
3017  * Context:
3018  *	Kernel context.
3019  */
3020 static void
3021 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3022 {
3023 	uint32_t	bsize, boff, types, cpsize, hsize;
3024 	ql_fcache_t	*fptr;
3025 
3026 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3027 
3028 	CACHE_LOCK(ha);
3029 
3030 	if (ha->fcache == NULL) {
3031 		CACHE_UNLOCK(ha);
3032 		cmd->Status = EXT_STATUS_ERR;
3033 		EL(ha, "failed, adapter fcache not setup\n");
3034 		return;
3035 	}
3036 
3037 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
3038 		bsize = 100;
3039 	} else {
3040 		bsize = 400;
3041 	}
3042 
3043 	if (cmd->ResponseLen < bsize) {
3044 		CACHE_UNLOCK(ha);
3045 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3046 		cmd->DetailStatus = bsize;
3047 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3048 		    bsize, cmd->ResponseLen);
3049 		return;
3050 	}
3051 
3052 	boff = 0;
3053 	bsize = 0;
3054 	fptr = ha->fcache;
3055 
3056 	/*
3057 	 * For backwards compatibility, get one of each image type
3058 	 */
3059 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3060 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3061 		/* Get the next image */
3062 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3063 
3064 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3065 
3066 			if (ddi_copyout(fptr->buf,
3067 			    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3068 			    cpsize, mode) != 0) {
3069 				CACHE_UNLOCK(ha);
3070 				EL(ha, "ddicopy failed, done\n");
3071 				cmd->Status = EXT_STATUS_COPY_ERR;
3072 				cmd->DetailStatus = 0;
3073 				return;
3074 			}
3075 			boff += 100;
3076 			bsize += cpsize;
3077 			types &= ~(fptr->type);
3078 		}
3079 	}
3080 
3081 	/*
3082 	 * Get the firmware image -- it needs to be last in the
3083 	 * buffer at offset 300 for backwards compatibility. Also for
3084 	 * backwards compatibility, the pci header is stripped off.
3085 	 */
3086 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3087 
3088 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3089 		if (hsize > fptr->buflen) {
3090 			CACHE_UNLOCK(ha);
3091 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3092 			    hsize, fptr->buflen);
3093 			cmd->Status = EXT_STATUS_COPY_ERR;
3094 			cmd->DetailStatus = 0;
3095 			return;
3096 		}
3097 
3098 		cpsize = ((fptr->buflen - hsize) < 100 ?
3099 		    fptr->buflen - hsize : 100);
3100 
3101 		if (ddi_copyout(fptr->buf+hsize,
3102 		    (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3103 		    cpsize, mode) != 0) {
3104 			CACHE_UNLOCK(ha);
3105 			EL(ha, "fw ddicopy failed, done\n");
3106 			cmd->Status = EXT_STATUS_COPY_ERR;
3107 			cmd->DetailStatus = 0;
3108 			return;
3109 		}
3110 		bsize += 100;
3111 	}
3112 
3113 	CACHE_UNLOCK(ha);
3114 	cmd->Status = EXT_STATUS_OK;
3115 	cmd->DetailStatus = bsize;
3116 
3117 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3118 }
3119 
3120 /*
3121  * ql_get_fcache_ex
3122  *	Dumps flash cache contents.
3123  *
3124  * Input:
3125  *	ha:	adapter state pointer.
3126  *	cmd:	EXT_IOCTL cmd struct pointer.
3127  *	mode:	flags.
3128  *
3129  * Returns:
3130  *	None, request status indicated in cmd->Status.
3131  *
3132  * Context:
3133  *	Kernel context.
3134  */
3135 static void
3136 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3137 {
3138 	uint32_t	bsize = 0;
3139 	uint32_t	boff = 0;
3140 	ql_fcache_t	*fptr;
3141 
3142 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3143 
3144 	CACHE_LOCK(ha);
3145 	if (ha->fcache == NULL) {
3146 		CACHE_UNLOCK(ha);
3147 		cmd->Status = EXT_STATUS_ERR;
3148 		EL(ha, "failed, adapter fcache not setup\n");
3149 		return;
3150 	}
3151 
3152 	/* Make sure user passed enough buffer space */
3153 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3154 		bsize += FBUFSIZE;
3155 	}
3156 
3157 	if (cmd->ResponseLen < bsize) {
3158 		CACHE_UNLOCK(ha);
3159 		if (cmd->ResponseLen != 0) {
3160 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3161 			    bsize, cmd->ResponseLen);
3162 		}
3163 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3164 		cmd->DetailStatus = bsize;
3165 		return;
3166 	}
3167 
3168 	boff = 0;
3169 	fptr = ha->fcache;
3170 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3171 		/* Get the next image */
3172 		if (ddi_copyout(fptr->buf,
3173 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3174 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3175 		    mode) != 0) {
3176 			CACHE_UNLOCK(ha);
3177 			EL(ha, "failed, ddicopy at %xh, done\n", boff);
3178 			cmd->Status = EXT_STATUS_COPY_ERR;
3179 			cmd->DetailStatus = 0;
3180 			return;
3181 		}
3182 		boff += FBUFSIZE;
3183 		fptr = fptr->next;
3184 	}
3185 
3186 	CACHE_UNLOCK(ha);
3187 	cmd->Status = EXT_STATUS_OK;
3188 	cmd->DetailStatus = bsize;
3189 
3190 	QL_PRINT_9(CE_CONT, "(%d): done\n",