1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_xioctl.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
61 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
62     boolean_t (*)(EXT_IOCTL *));
63 static boolean_t ql_validate_signature(EXT_IOCTL *);
64 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
65 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
66 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
67 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
90 
91 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
92 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
93 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
94 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
95     uint8_t);
96 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
97 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
98 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
99 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
100 static int ql_load_fcode(ql_adapter_state_t *, uint8_t *, uint32_t);
101 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t, int);
102 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
103     uint8_t);
104 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
105 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
106 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
107 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
108 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
109 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
110 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
113 static uint32_t ql_setup_led(ql_adapter_state_t *);
114 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
115 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
116 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
117 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
118 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
119 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
120 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
121 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
122 static void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
123 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
124 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
125 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
126 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
127 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
128 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
129 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
130 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
131 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
132 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
134 static void ql_restart_hba(ql_adapter_state_t *);
135 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
138 
139 /* ******************************************************************** */
140 /*			External IOCTL support.				*/
141 /* ******************************************************************** */
142 
143 /*
144  * ql_alloc_xioctl_resource
145  *	Allocates resources needed by module code.
146  *
147  * Input:
148  *	ha:		adapter state pointer.
149  *
150  * Returns:
151  *	SYS_ERRNO
152  *
153  * Context:
154  *	Kernel context.
155  */
156 int
157 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
158 {
159 	ql_xioctl_t	*xp;
160 
161 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
162 
163 	if (ha->xioctl != NULL) {
164 		QL_PRINT_9(CE_CONT, "(%d): already allocated exiting\n",
165 		    ha->instance);
166 		return (0);
167 	}
168 
169 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
170 	if (xp == NULL) {
171 		EL(ha, "failed, kmem_zalloc\n");
172 		return (ENOMEM);
173 	}
174 	ha->xioctl = xp;
175 
176 	/* Allocate AEN tracking buffer */
177 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
178 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
179 	if (xp->aen_tracking_queue == NULL) {
180 		EL(ha, "failed, kmem_zalloc-2\n");
181 		ql_free_xioctl_resource(ha);
182 		return (ENOMEM);
183 	}
184 
185 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
186 
187 	return (0);
188 }
189 
190 /*
191  * ql_free_xioctl_resource
192  *	Frees resources used by module code.
193  *
194  * Input:
195  *	ha:		adapter state pointer.
196  *
197  * Context:
198  *	Kernel context.
199  */
200 void
201 ql_free_xioctl_resource(ql_adapter_state_t *ha)
202 {
203 	ql_xioctl_t	*xp = ha->xioctl;
204 
205 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
206 
207 	if (xp == NULL) {
208 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
209 		return;
210 	}
211 
212 	if (xp->aen_tracking_queue != NULL) {
213 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
214 		    sizeof (EXT_ASYNC_EVENT));
215 		xp->aen_tracking_queue = NULL;
216 	}
217 
218 	kmem_free(xp, sizeof (ql_xioctl_t));
219 	ha->xioctl = NULL;
220 
221 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
222 }
223 
224 /*
225  * ql_xioctl
226  *	External IOCTL processing.
227  *
228  * Input:
229  *	ha:	adapter state pointer.
230  *	cmd:	function to perform
231  *	arg:	data type varies with request
232  *	mode:	flags
233  *	cred_p:	credentials pointer
234  *	rval_p:	pointer to result value
235  *
236  * Returns:
237  *	0:		success
238  *	ENXIO:		No such device or address
239  *	ENOPROTOOPT:	Protocol not available
240  *
241  * Context:
242  *	Kernel context.
243  */
244 /* ARGSUSED */
245 int
246 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
247     cred_t *cred_p, int *rval_p)
248 {
249 	int	rval;
250 
251 	QL_PRINT_9(CE_CONT, "(%d): entered, cmd=%d\n", ha->instance, cmd);
252 
253 	if (ha->xioctl == NULL) {
254 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
255 		return (ENXIO);
256 	}
257 
258 	switch (cmd) {
259 	case EXT_CC_QUERY:
260 	case EXT_CC_SEND_FCCT_PASSTHRU:
261 	case EXT_CC_REG_AEN:
262 	case EXT_CC_GET_AEN:
263 	case EXT_CC_SEND_SCSI_PASSTHRU:
264 	case EXT_CC_WWPN_TO_SCSIADDR:
265 	case EXT_CC_SEND_ELS_RNID:
266 	case EXT_CC_SET_DATA:
267 	case EXT_CC_GET_DATA:
268 	case EXT_CC_HOST_IDX:
269 	case EXT_CC_READ_NVRAM:
270 	case EXT_CC_UPDATE_NVRAM:
271 	case EXT_CC_READ_OPTION_ROM:
272 	case EXT_CC_READ_OPTION_ROM_EX:
273 	case EXT_CC_UPDATE_OPTION_ROM:
274 	case EXT_CC_UPDATE_OPTION_ROM_EX:
275 	case EXT_CC_GET_VPD:
276 	case EXT_CC_SET_VPD:
277 	case EXT_CC_LOOPBACK:
278 	case EXT_CC_GET_FCACHE:
279 	case EXT_CC_GET_FCACHE_EX:
280 	case EXT_CC_HOST_DRVNAME:
281 	case EXT_CC_GET_SFP_DATA:
282 	case EXT_CC_PORT_PARAM:
283 	case EXT_CC_GET_PCI_DATA:
284 	case EXT_CC_GET_FWEXTTRACE:
285 	case EXT_CC_GET_FWFCETRACE:
286 	case EXT_CC_GET_VP_CNT_ID:
287 	case EXT_CC_VPORT_CMD:
288 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
289 		break;
290 	default:
291 		/* function not supported. */
292 		EL(ha, "function=%d not supported\n", cmd);
293 		rval = ENOPROTOOPT;
294 	}
295 
296 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
297 
298 	return (rval);
299 }
300 
301 /*
302  * ql_sdm_ioctl
303  *	Provides ioctl functions for SAN/Device Management functions
304  *	AKA External Ioctl functions.
305  *
306  * Input:
307  *	ha:		adapter state pointer.
308  *	ioctl_code:	ioctl function to perform
309  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
310  *	mode:		flags
311  *
312  * Returns:
313  *	0:	success
314  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
315  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
316  *		copyout of EXT_IOCTL status info failed.
317  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
318  *	EBUSY:	Device busy
319  *
320  * Context:
321  *	Kernel context.
322  */
323 static int
324 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
325 {
326 	EXT_IOCTL		*cmd;
327 	int			rval;
328 	ql_adapter_state_t	*vha;
329 
330 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
331 
332 	/* Copy argument structure (EXT_IOCTL) from application land. */
333 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
334 	    ql_validate_signature)) != 0) {
335 		/*
336 		 * a non-zero value at this time means a problem getting
337 		 * the requested information from application land, just
338 		 * return the error code and hope for the best.
339 		 */
340 		EL(ha, "failed, sdm_setup\n");
341 		return (rval);
342 	}
343 
344 	/*
345 	 * Map the physical ha ptr (which the ioctl is called with)
346 	 * to the virtual ha that the caller is addressing.
347 	 */
348 	if (ha->flags & VP_ENABLED) {
349 		/*
350 		 * Special case: HbaSelect == 0 is physical ha
351 		 */
352 		if (cmd->HbaSelect != 0) {
353 			vha = ha->vp_next;
354 			while (vha != NULL) {
355 				if (vha->vp_index == cmd->HbaSelect) {
356 					ha = vha;
357 					break;
358 				}
359 				vha = vha->vp_next;
360 			}
361 
362 			/*
363 			 * If we can't find the specified vp index then
364 			 * we probably have an error (vp indexes shifting
365 			 * under our feet?).
366 			 */
367 			if (vha == NULL) {
368 				EL(ha, "Invalid HbaSelect vp index: %xh\n",
369 				    cmd->HbaSelect);
370 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
371 				cmd->ResponseLen = 0;
372 				return (EFAULT);
373 			}
374 		}
375 	}
376 
377 	/*
378 	 * If driver is suspended, stalled, or powered down rtn BUSY
379 	 */
380 	if (ha->flags & ADAPTER_SUSPENDED ||
381 	    ha->task_daemon_flags & DRIVER_STALL ||
382 	    ha->power_level != PM_LEVEL_D0) {
383 		EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
384 		    "driver suspended" :
385 		    (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
386 		    "FCA powered down"));
387 		cmd->Status = EXT_STATUS_BUSY;
388 		cmd->ResponseLen = 0;
389 		rval = EBUSY;
390 
391 		/* Return results to caller */
392 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
393 			EL(ha, "failed, sdm_return\n");
394 			rval = EFAULT;
395 		}
396 		return (rval);
397 	}
398 
399 	switch (ioctl_code) {
400 	case EXT_CC_QUERY_OS:
401 		ql_query(ha, cmd, mode);
402 		break;
403 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
404 		ql_fcct(ha, cmd, mode);
405 		break;
406 	case EXT_CC_REG_AEN_OS:
407 		ql_aen_reg(ha, cmd, mode);
408 		break;
409 	case EXT_CC_GET_AEN_OS:
410 		ql_aen_get(ha, cmd, mode);
411 		break;
412 	case EXT_CC_GET_DATA_OS:
413 		ql_get_host_data(ha, cmd, mode);
414 		break;
415 	case EXT_CC_SET_DATA_OS:
416 		ql_set_host_data(ha, cmd, mode);
417 		break;
418 	case EXT_CC_SEND_ELS_RNID_OS:
419 		ql_send_els_rnid(ha, cmd, mode);
420 		break;
421 	case EXT_CC_SCSI_PASSTHRU_OS:
422 		ql_scsi_passthru(ha, cmd, mode);
423 		break;
424 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
425 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
426 		break;
427 	case EXT_CC_HOST_IDX_OS:
428 		ql_host_idx(ha, cmd, mode);
429 		break;
430 	case EXT_CC_HOST_DRVNAME_OS:
431 		ql_host_drvname(ha, cmd, mode);
432 		break;
433 	case EXT_CC_READ_NVRAM_OS:
434 		ql_read_nvram(ha, cmd, mode);
435 		break;
436 	case EXT_CC_UPDATE_NVRAM_OS:
437 		ql_write_nvram(ha, cmd, mode);
438 		break;
439 	case EXT_CC_READ_OPTION_ROM_OS:
440 	case EXT_CC_READ_OPTION_ROM_EX_OS:
441 		ql_read_flash(ha, cmd, mode);
442 		break;
443 	case EXT_CC_UPDATE_OPTION_ROM_OS:
444 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
445 		ql_write_flash(ha, cmd, mode);
446 		break;
447 	case EXT_CC_LOOPBACK_OS:
448 		ql_diagnostic_loopback(ha, cmd, mode);
449 		break;
450 	case EXT_CC_GET_VPD_OS:
451 		ql_read_vpd(ha, cmd, mode);
452 		break;
453 	case EXT_CC_SET_VPD_OS:
454 		ql_write_vpd(ha, cmd, mode);
455 		break;
456 	case EXT_CC_GET_FCACHE_OS:
457 		ql_get_fcache(ha, cmd, mode);
458 		break;
459 	case EXT_CC_GET_FCACHE_EX_OS:
460 		ql_get_fcache_ex(ha, cmd, mode);
461 		break;
462 	case EXT_CC_GET_SFP_DATA_OS:
463 		ql_get_sfp(ha, cmd, mode);
464 		break;
465 	case EXT_CC_PORT_PARAM_OS:
466 		ql_port_param(ha, cmd, mode);
467 		break;
468 	case EXT_CC_GET_PCI_DATA_OS:
469 		ql_get_pci_data(ha, cmd, mode);
470 		break;
471 	case EXT_CC_GET_FWEXTTRACE_OS:
472 		ql_get_fwexttrace(ha, cmd, mode);
473 		break;
474 	case EXT_CC_GET_FWFCETRACE_OS:
475 		ql_get_fwfcetrace(ha, cmd, mode);
476 		break;
477 	case EXT_CC_MENLO_RESET:
478 		ql_menlo_reset(ha, cmd, mode);
479 		break;
480 	case EXT_CC_MENLO_GET_FW_VERSION:
481 		ql_menlo_get_fw_version(ha, cmd, mode);
482 		break;
483 	case EXT_CC_MENLO_UPDATE_FW:
484 		ql_menlo_update_fw(ha, cmd, mode);
485 		break;
486 	case EXT_CC_MENLO_MANAGE_INFO:
487 		ql_menlo_manage_info(ha, cmd, mode);
488 		break;
489 	case EXT_CC_GET_VP_CNT_ID_OS:
490 		ql_get_vp_cnt_id(ha, cmd, mode);
491 		break;
492 	case EXT_CC_VPORT_CMD_OS:
493 		ql_vp_ioctl(ha, cmd, mode);
494 		break;
495 	default:
496 		/* function not supported. */
497 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
498 
499 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
500 		cmd->ResponseLen = 0;
501 		break;
502 	}
503 
504 	/* Return results to caller */
505 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
506 		EL(ha, "failed, sdm_return\n");
507 		return (EFAULT);
508 	}
509 
510 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
511 
512 	return (0);
513 }
514 
515 /*
516  * ql_sdm_setup
517  *	Make a local copy of the EXT_IOCTL struct and validate it.
518  *
519  * Input:
520  *	ha:		adapter state pointer.
521  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
522  *	arg:		Address of application EXT_IOCTL cmd data
523  *	mode:		flags
524  *	val_sig:	Pointer to a function to validate the ioctl signature.
525  *
526  * Returns:
527  *	0:		success
528  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
529  *	EINVAL:		Invalid version, signature.
530  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
531  *
532  * Context:
533  *	Kernel context.
534  */
535 static int
536 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
537     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
538 {
539 	int		rval;
540 	EXT_IOCTL	*cmd;
541 
542 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
543 
544 	/* Allocate local memory for EXT_IOCTL. */
545 	*cmd_struct = NULL;
546 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
547 	if (cmd == NULL) {
548 		EL(ha, "failed, kmem_zalloc\n");
549 		return (ENOMEM);
550 	}
551 	/* Get argument structure. */
552 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
553 	if (rval != 0) {
554 		EL(ha, "failed, ddi_copyin\n");
555 		rval = EFAULT;
556 	} else {
557 		/*
558 		 * Check signature and the version.
559 		 * If either are not valid then neither is the
560 		 * structure so don't attempt to return any error status
561 		 * because we can't trust what caller's arg points to.
562 		 * Just return the errno.
563 		 */
564 		if (val_sig(cmd) == 0) {
565 			EL(ha, "failed, signature\n");
566 			rval = EINVAL;
567 		} else if (cmd->Version > EXT_VERSION) {
568 			EL(ha, "failed, version\n");
569 			rval = EINVAL;
570 		}
571 	}
572 
573 	if (rval == 0) {
574 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
575 		*cmd_struct = cmd;
576 		cmd->Status = EXT_STATUS_OK;
577 		cmd->DetailStatus = 0;
578 	} else {
579 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
580 	}
581 
582 	return (rval);
583 }
584 
585 /*
586  * ql_validate_signature
587  *	Validate the signature string for an external ioctl call.
588  *
589  * Input:
590  *	sg:	Pointer to EXT_IOCTL signature to validate.
591  *
592  * Returns:
593  *	B_TRUE:		Signature is valid.
594  *	B_FALSE:	Signature is NOT valid.
595  *
596  * Context:
597  *	Kernel context.
598  */
599 static boolean_t
600 ql_validate_signature(EXT_IOCTL *cmd_struct)
601 {
602 	/*
603 	 * Check signature.
604 	 *
605 	 * If signature is not valid then neither is the rest of
606 	 * the structure (e.g., can't trust it), so don't attempt
607 	 * to return any error status other than the errno.
608 	 */
609 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
610 		QL_PRINT_2(CE_CONT, "failed,\n");
611 		return (B_FALSE);
612 	}
613 
614 	return (B_TRUE);
615 }
616 
617 /*
618  * ql_sdm_return
619  *	Copies return data/status to application land for
620  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
621  *
622  * Input:
623  *	ha:		adapter state pointer.
624  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
625  *	ioctl_code:	ioctl function to perform
626  *	arg:		EXT_IOCTL cmd data in application land.
627  *	mode:		flags
628  *
629  * Returns:
630  *	0:	success
631  *	EFAULT:	Copy out error.
632  *
633  * Context:
634  *	Kernel context.
635  */
636 /* ARGSUSED */
637 static int
638 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
639 {
640 	int	rval = 0;
641 
642 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
643 
644 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
645 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
646 	    mode);
647 
648 	rval |= ddi_copyout((void *)&cmd->Status,
649 	    (void *)&(((EXT_IOCTL*)arg)->Status),
650 	    sizeof (cmd->Status), mode);
651 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
652 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
653 	    sizeof (cmd->DetailStatus), mode);
654 
655 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
656 
657 	if (rval != 0) {
658 		/* Some copyout operation failed */
659 		EL(ha, "failed, ddi_copyout\n");
660 		return (EFAULT);
661 	}
662 
663 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
664 
665 	return (0);
666 }
667 
668 /*
669  * ql_query
670  *	Performs all EXT_CC_QUERY functions.
671  *
672  * Input:
673  *	ha:	adapter state pointer.
674  *	cmd:	Local EXT_IOCTL cmd struct pointer.
675  *	mode:	flags.
676  *
677  * Returns:
678  *	None, request status indicated in cmd->Status.
679  *
680  * Context:
681  *	Kernel context.
682  */
683 static void
684 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
685 {
686 	QL_PRINT_9(CE_CONT, "(%d): entered, cmd=%d\n", ha->instance,
687 	    cmd->SubCode);
688 
689 	/* case off on command subcode */
690 	switch (cmd->SubCode) {
691 	case EXT_SC_QUERY_HBA_NODE:
692 		ql_qry_hba_node(ha, cmd, mode);
693 		break;
694 	case EXT_SC_QUERY_HBA_PORT:
695 		ql_qry_hba_port(ha, cmd, mode);
696 		break;
697 	case EXT_SC_QUERY_DISC_PORT:
698 		ql_qry_disc_port(ha, cmd, mode);
699 		break;
700 	case EXT_SC_QUERY_DISC_TGT:
701 		ql_qry_disc_tgt(ha, cmd, mode);
702 		break;
703 	case EXT_SC_QUERY_DRIVER:
704 		ql_qry_driver(ha, cmd, mode);
705 		break;
706 	case EXT_SC_QUERY_FW:
707 		ql_qry_fw(ha, cmd, mode);
708 		break;
709 	case EXT_SC_QUERY_CHIP:
710 		ql_qry_chip(ha, cmd, mode);
711 		break;
712 	case EXT_SC_QUERY_DISC_LUN:
713 	default:
714 		/* function not supported. */
715 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
716 		EL(ha, "failed, Unsupported Subcode=%xh\n",
717 		    cmd->SubCode);
718 		break;
719 	}
720 
721 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
722 }
723 
724 /*
725  * ql_qry_hba_node
726  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
727  *
728  * Input:
729  *	ha:	adapter state pointer.
730  *	cmd:	EXT_IOCTL cmd struct pointer.
731  *	mode:	flags.
732  *
733  * Returns:
734  *	None, request status indicated in cmd->Status.
735  *
736  * Context:
737  *	Kernel context.
738  */
739 static void
740 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
741 {
742 	EXT_HBA_NODE	tmp_node = {0};
743 	uint_t		len;
744 	caddr_t		bufp;
745 	ql_mbx_data_t	mr;
746 
747 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
748 
749 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
750 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
751 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
752 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
753 		    "Len=%xh\n", cmd->ResponseLen);
754 		cmd->ResponseLen = 0;
755 		return;
756 	}
757 
758 	/* fill in the values */
759 
760 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
761 	    EXT_DEF_WWN_NAME_SIZE);
762 
763 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
764 
765 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
766 
767 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
768 
769 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
770 
771 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
772 		size_t		verlen;
773 		uint16_t	w;
774 		char		*tmpptr;
775 
776 		verlen = strlen((char *)(tmp_node.DriverVersion));
777 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
778 			EL(ha, "failed, No room for fpga version string\n");
779 		} else {
780 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
781 			    (uint16_t *)
782 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
783 
784 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
785 			if (tmpptr == NULL) {
786 				EL(ha, "Unable to insert fpga version str\n");
787 			} else {
788 				(void) sprintf(tmpptr, "%d.%d",
789 				    ((w & 0xf0) >> 4), (w & 0x0f));
790 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
791 			}
792 		}
793 	}
794 	(void) ql_get_fw_version(ha, &mr);
795 
796 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
797 	    mr.mb[1], mr.mb[2], mr.mb[3]);
798 
799 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
800 		switch (mr.mb[6]) {
801 		case FWATTRIB_EF:
802 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
803 			break;
804 		case FWATTRIB_TP:
805 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
806 			break;
807 		case FWATTRIB_IP:
808 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
809 			break;
810 		case FWATTRIB_IPX:
811 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
812 			break;
813 		case FWATTRIB_FL:
814 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
815 			break;
816 		case FWATTRIB_FPX:
817 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
818 			break;
819 		default:
820 			break;
821 		}
822 	}
823 
824 	/* FCode version. */
825 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
826 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
827 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
828 	    (int *)&len) == DDI_PROP_SUCCESS) {
829 		if (len < EXT_DEF_MAX_STR_SIZE) {
830 			bcopy(bufp, tmp_node.OptRomVersion, len);
831 		} else {
832 			bcopy(bufp, tmp_node.OptRomVersion,
833 			    EXT_DEF_MAX_STR_SIZE - 1);
834 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
835 			    '\0';
836 		}
837 		kmem_free(bufp, len);
838 	} else {
839 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
840 	}
841 	tmp_node.PortCount = 1;
842 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
843 
844 	if (ddi_copyout((void *)&tmp_node,
845 	    (void *)(uintptr_t)(cmd->ResponseAdr),
846 	    sizeof (EXT_HBA_NODE), mode) != 0) {
847 		cmd->Status = EXT_STATUS_COPY_ERR;
848 		cmd->ResponseLen = 0;
849 		EL(ha, "failed, ddi_copyout\n");
850 	} else {
851 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
852 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
853 	}
854 }
855 
856 /*
857  * ql_qry_hba_port
858  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
859  *
860  * Input:
861  *	ha:	adapter state pointer.
862  *	cmd:	EXT_IOCTL cmd struct pointer.
863  *	mode:	flags.
864  *
865  * Returns:
866  *	None, request status indicated in cmd->Status.
867  *
868  * Context:
869  *	Kernel context.
870  */
871 static void
872 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
873 {
874 	ql_link_t	*link;
875 	ql_tgt_t	*tq;
876 	ql_mbx_data_t	mr;
877 	EXT_HBA_PORT	tmp_port = {0};
878 	int		rval;
879 	uint16_t	port_cnt, tgt_cnt, index;
880 
881 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
882 
883 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
884 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
885 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
886 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
887 		    cmd->ResponseLen);
888 		cmd->ResponseLen = 0;
889 		return;
890 	}
891 
892 	/* fill in the values */
893 
894 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
895 	    EXT_DEF_WWN_NAME_SIZE);
896 	tmp_port.Id[0] = 0;
897 	tmp_port.Id[1] = ha->d_id.b.domain;
898 	tmp_port.Id[2] = ha->d_id.b.area;
899 	tmp_port.Id[3] = ha->d_id.b.al_pa;
900 
901 	/* For now we are initiator only driver */
902 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
903 
904 	if (ha->task_daemon_flags & LOOP_DOWN) {
905 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
906 	} else if (DRIVER_SUSPENDED(ha)) {
907 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
908 	} else {
909 		tmp_port.State = EXT_DEF_HBA_OK;
910 	}
911 
912 	if (ha->flags & POINT_TO_POINT) {
913 		tmp_port.Mode = EXT_DEF_P2P_MODE;
914 	} else {
915 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
916 	}
917 	/*
918 	 * fill in the portspeed values.
919 	 *
920 	 * default to not yet negotiated state
921 	 */
922 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
923 
924 	if (tmp_port.State == EXT_DEF_HBA_OK) {
925 		if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
926 			mr.mb[1] = 0;
927 			mr.mb[2] = 0;
928 			rval = ql_data_rate(ha, &mr);
929 			if (rval != QL_SUCCESS) {
930 				EL(ha, "failed, data_rate=%xh\n", rval);
931 			} else {
932 				switch (mr.mb[1]) {
933 				case 0:
934 					tmp_port.PortSpeed =
935 					    EXT_DEF_PORTSPEED_1GBIT;
936 					break;
937 				case 1:
938 					tmp_port.PortSpeed =
939 					    EXT_DEF_PORTSPEED_2GBIT;
940 					break;
941 				case 3:
942 					tmp_port.PortSpeed =
943 					    EXT_DEF_PORTSPEED_4GBIT;
944 					break;
945 				case 4:
946 					tmp_port.PortSpeed =
947 					    EXT_DEF_PORTSPEED_8GBIT;
948 					break;
949 				default:
950 					tmp_port.PortSpeed =
951 					    EXT_DEF_PORTSPEED_UNKNOWN;
952 					EL(ha, "failed, data rate=%xh\n",
953 					    mr.mb[1]);
954 					break;
955 				}
956 			}
957 		} else {
958 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
959 		}
960 	}
961 
962 	/* Report all supported port speeds */
963 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
964 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
965 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
966 		    EXT_DEF_PORTSPEED_1GBIT);
967 		/*
968 		 * Correct supported speeds based on type of
969 		 * sfp that is present
970 		 */
971 		switch (ha->sfp_stat) {
972 		case 1:
973 			/* no sfp detected */
974 			break;
975 		case 2:
976 		case 4:
977 			/* 4GB sfp */
978 			tmp_port.PortSupportedSpeed &=
979 			    ~EXT_DEF_PORTSPEED_8GBIT;
980 			break;
981 		case 3:
982 		case 5:
983 			/* 8GB sfp */
984 			tmp_port.PortSupportedSpeed &=
985 			    ~EXT_DEF_PORTSPEED_1GBIT;
986 			break;
987 		default:
988 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
989 			break;
990 
991 		}
992 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
993 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
994 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
995 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
996 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
997 		    EXT_DEF_PORTSPEED_1GBIT);
998 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
999 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1000 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
1001 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1002 	} else {
1003 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1004 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1005 	}
1006 	tmp_port.sfp_status = LSB(ha->sfp_stat);
1007 	port_cnt = 0;
1008 	tgt_cnt = 0;
1009 
1010 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1011 		for (link = ha->dev[index].first; link != NULL;
1012 		    link = link->next) {
1013 			tq = link->base_address;
1014 
1015 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1016 				continue;
1017 			}
1018 
1019 			port_cnt++;
1020 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1021 				tgt_cnt++;
1022 			}
1023 		}
1024 	}
1025 
1026 	tmp_port.DiscPortCount = port_cnt;
1027 	tmp_port.DiscTargetCount = tgt_cnt;
1028 
1029 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1030 
1031 	rval = ddi_copyout((void *)&tmp_port,
1032 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1033 	    sizeof (EXT_HBA_PORT), mode);
1034 	if (rval != 0) {
1035 		cmd->Status = EXT_STATUS_COPY_ERR;
1036 		cmd->ResponseLen = 0;
1037 		EL(ha, "failed, ddi_copyout\n");
1038 	} else {
1039 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1040 		QL_PRINT_9(CE_CONT, "(%d): exiting, ports=%d, targets=%d\n",
1041 		    ha->instance, port_cnt, tgt_cnt);
1042 	}
1043 }
1044 
1045 /*
1046  * ql_qry_disc_port
1047  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1048  *
1049  * Input:
1050  *	ha:	adapter state pointer.
1051  *	cmd:	EXT_IOCTL cmd struct pointer.
1052  *	mode:	flags.
1053  *
1054  *	cmd->Instance = Port instance in fcport chain.
1055  *
1056  * Returns:
1057  *	None, request status indicated in cmd->Status.
1058  *
1059  * Context:
1060  *	Kernel context.
1061  */
1062 static void
1063 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1064 {
1065 	EXT_DISC_PORT	tmp_port = {0};
1066 	ql_link_t	*link;
1067 	ql_tgt_t	*tq;
1068 	uint16_t	index;
1069 	uint16_t	inst = 0;
1070 
1071 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1072 
1073 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1074 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1075 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1076 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1077 		    cmd->ResponseLen);
1078 		cmd->ResponseLen = 0;
1079 		return;
1080 	}
1081 
1082 	for (link = NULL, index = 0;
1083 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1084 		for (link = ha->dev[index].first; link != NULL;
1085 		    link = link->next) {
1086 			tq = link->base_address;
1087 
1088 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1089 				continue;
1090 			}
1091 			if (inst != cmd->Instance) {
1092 				inst++;
1093 				continue;
1094 			}
1095 
1096 			/* fill in the values */
1097 			bcopy(tq->node_name, tmp_port.WWNN,
1098 			    EXT_DEF_WWN_NAME_SIZE);
1099 			bcopy(tq->port_name, tmp_port.WWPN,
1100 			    EXT_DEF_WWN_NAME_SIZE);
1101 
1102 			break;
1103 		}
1104 	}
1105 
1106 	if (link == NULL) {
1107 		/* no matching device */
1108 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1109 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1110 		cmd->ResponseLen = 0;
1111 		return;
1112 	}
1113 
1114 	tmp_port.Id[0] = 0;
1115 	tmp_port.Id[1] = tq->d_id.b.domain;
1116 	tmp_port.Id[2] = tq->d_id.b.area;
1117 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1118 
1119 	tmp_port.Type = 0;
1120 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1121 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1122 		    EXT_DEF_INITIATOR_DEV);
1123 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1124 		(void) ql_inq_scan(ha, tq, 1);
1125 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1126 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1127 	}
1128 
1129 	if (tq->flags & TQF_FABRIC_DEVICE) {
1130 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1131 	} else {
1132 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1133 	}
1134 
1135 	tmp_port.Status = 0;
1136 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1137 
1138 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1139 
1140 	if (ddi_copyout((void *)&tmp_port,
1141 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1142 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1143 		cmd->Status = EXT_STATUS_COPY_ERR;
1144 		cmd->ResponseLen = 0;
1145 		EL(ha, "failed, ddi_copyout\n");
1146 	} else {
1147 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1148 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1149 	}
1150 }
1151 
1152 /*
1153  * ql_qry_disc_tgt
1154  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1155  *
1156  * Input:
1157  *	ha:		adapter state pointer.
1158  *	cmd:		EXT_IOCTL cmd struct pointer.
1159  *	mode:		flags.
1160  *
1161  *	cmd->Instance = Port instance in fcport chain.
1162  *
1163  * Returns:
1164  *	None, request status indicated in cmd->Status.
1165  *
1166  * Context:
1167  *	Kernel context.
1168  */
1169 static void
1170 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1171 {
1172 	EXT_DISC_TARGET	tmp_tgt = {0};
1173 	ql_link_t	*link;
1174 	ql_tgt_t	*tq;
1175 	uint16_t	index;
1176 	uint16_t	inst = 0;
1177 
1178 	QL_PRINT_9(CE_CONT, "(%d): entered, target=%d\n", ha->instance,
1179 	    cmd->Instance);
1180 
1181 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1182 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1183 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1184 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1185 		    cmd->ResponseLen);
1186 		cmd->ResponseLen = 0;
1187 		return;
1188 	}
1189 
1190 	/* Scan port list for requested target and fill in the values */
1191 	for (link = NULL, index = 0;
1192 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1193 		for (link = ha->dev[index].first; link != NULL;
1194 		    link = link->next) {
1195 			tq = link->base_address;
1196 
1197 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1198 			    tq->flags & TQF_INITIATOR_DEVICE) {
1199 				continue;
1200 			}
1201 			if (inst != cmd->Instance) {
1202 				inst++;
1203 				continue;
1204 			}
1205 
1206 			/* fill in the values */
1207 			bcopy(tq->node_name, tmp_tgt.WWNN,
1208 			    EXT_DEF_WWN_NAME_SIZE);
1209 			bcopy(tq->port_name, tmp_tgt.WWPN,
1210 			    EXT_DEF_WWN_NAME_SIZE);
1211 
1212 			break;
1213 		}
1214 	}
1215 
1216 	if (link == NULL) {
1217 		/* no matching device */
1218 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1219 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1220 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1221 		cmd->ResponseLen = 0;
1222 		return;
1223 	}
1224 	tmp_tgt.Id[0] = 0;
1225 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1226 	tmp_tgt.Id[2] = tq->d_id.b.area;
1227 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1228 
1229 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1230 
1231 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1232 		(void) ql_inq_scan(ha, tq, 1);
1233 	}
1234 
1235 	tmp_tgt.Type = 0;
1236 	if (tq->flags & TQF_TAPE_DEVICE) {
1237 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1238 	}
1239 
1240 	if (tq->flags & TQF_FABRIC_DEVICE) {
1241 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1242 	} else {
1243 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1244 	}
1245 
1246 	tmp_tgt.Status = 0;
1247 
1248 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1249 
1250 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1251 
1252 	if (ddi_copyout((void *)&tmp_tgt,
1253 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1254 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1255 		cmd->Status = EXT_STATUS_COPY_ERR;
1256 		cmd->ResponseLen = 0;
1257 		EL(ha, "failed, ddi_copyout\n");
1258 	} else {
1259 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1260 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1261 	}
1262 }
1263 
1264 /*
1265  * ql_qry_fw
1266  *	Performs EXT_SC_QUERY_FW subfunction.
1267  *
1268  * Input:
1269  *	ha:	adapter state pointer.
1270  *	cmd:	EXT_IOCTL cmd struct pointer.
1271  *	mode:	flags.
1272  *
1273  * Returns:
1274  *	None, request status indicated in cmd->Status.
1275  *
1276  * Context:
1277  *	Kernel context.
1278  */
1279 static void
1280 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1281 {
1282 	ql_mbx_data_t	mr;
1283 	EXT_FW		fw_info = {0};
1284 
1285 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1286 
1287 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1288 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1289 		cmd->DetailStatus = sizeof (EXT_FW);
1290 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1291 		    cmd->ResponseLen);
1292 		cmd->ResponseLen = 0;
1293 		return;
1294 	}
1295 
1296 	(void) ql_get_fw_version(ha, &mr);
1297 
1298 	(void) sprintf((char *)(fw_info.Version), "%d.%d.%d", mr.mb[1],
1299 	    mr.mb[2], mr.mb[2]);
1300 
1301 	fw_info.Attrib = mr.mb[6];
1302 
1303 	if (ddi_copyout((void *)&fw_info,
1304 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1305 	    sizeof (EXT_FW), mode) != 0) {
1306 		cmd->Status = EXT_STATUS_COPY_ERR;
1307 		cmd->ResponseLen = 0;
1308 		EL(ha, "failed, ddi_copyout\n");
1309 		return;
1310 	} else {
1311 		cmd->ResponseLen = sizeof (EXT_FW);
1312 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1313 	}
1314 }
1315 
1316 /*
1317  * ql_qry_chip
1318  *	Performs EXT_SC_QUERY_CHIP subfunction.
1319  *
1320  * Input:
1321  *	ha:	adapter state pointer.
1322  *	cmd:	EXT_IOCTL cmd struct pointer.
1323  *	mode:	flags.
1324  *
1325  * Returns:
1326  *	None, request status indicated in cmd->Status.
1327  *
1328  * Context:
1329  *	Kernel context.
1330  */
1331 static void
1332 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1333 {
1334 	EXT_CHIP	chip = {0};
1335 
1336 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1337 
1338 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1339 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1340 		cmd->DetailStatus = sizeof (EXT_CHIP);
1341 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1342 		    cmd->ResponseLen);
1343 		cmd->ResponseLen = 0;
1344 		return;
1345 	}
1346 
1347 	chip.VendorId = ha->ven_id;
1348 	chip.DeviceId = ha->device_id;
1349 	chip.SubVendorId = ha->subven_id;
1350 	chip.SubSystemId = ha->subsys_id;
1351 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1352 	chip.IoAddrLen = 0x100;
1353 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1354 	chip.MemAddrLen = 0x100;
1355 	chip.ChipRevID = ha->rev_id;
1356 
1357 	if (ddi_copyout((void *)&chip,
1358 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1359 	    sizeof (EXT_CHIP), mode) != 0) {
1360 		cmd->Status = EXT_STATUS_COPY_ERR;
1361 		cmd->ResponseLen = 0;
1362 		EL(ha, "failed, ddi_copyout\n");
1363 	} else {
1364 		cmd->ResponseLen = sizeof (EXT_CHIP);
1365 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1366 	}
1367 }
1368 
1369 /*
1370  * ql_qry_driver
1371  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1372  *
1373  * Input:
1374  *	ha:	adapter state pointer.
1375  *	cmd:	EXT_IOCTL cmd struct pointer.
1376  *	mode:	flags.
1377  *
1378  * Returns:
1379  *	None, request status indicated in cmd->Status.
1380  *
1381  * Context:
1382  *	Kernel context.
1383  */
1384 static void
1385 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1386 {
1387 	EXT_DRIVER	qd = {0};
1388 
1389 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1390 
1391 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1392 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1393 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1394 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1395 		    cmd->ResponseLen);
1396 		cmd->ResponseLen = 0;
1397 		return;
1398 	}
1399 
1400 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1401 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1402 	qd.TargetsPerBus = (uint16_t)
1403 	    (CFG_IST(ha, (CFG_CTRL_2425|CFG_EXT_FW_INTERFACE)) ?
1404 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1405 	qd.LunsPerTarget = 2030;
1406 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1407 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1408 
1409 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1410 	    sizeof (EXT_DRIVER), mode) != 0) {
1411 		cmd->Status = EXT_STATUS_COPY_ERR;
1412 		cmd->ResponseLen = 0;
1413 		EL(ha, "failed, ddi_copyout\n");
1414 	} else {
1415 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1416 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1417 	}
1418 }
1419 
1420 /*
1421  * ql_fcct
1422  *	IOCTL management server FC-CT passthrough.
1423  *
1424  * Input:
1425  *	ha:	adapter state pointer.
1426  *	cmd:	User space CT arguments pointer.
1427  *	mode:	flags.
1428  *
1429  * Returns:
1430  *	None, request status indicated in cmd->Status.
1431  *
1432  * Context:
1433  *	Kernel context.
1434  */
1435 static void
1436 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1437 {
1438 	ql_mbx_iocb_t		*pkt;
1439 	ql_mbx_data_t		mr;
1440 	dma_mem_t		*dma_mem;
1441 	caddr_t			pld;
1442 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1443 	int			rval;
1444 	ql_ct_iu_preamble_t	*ct;
1445 	ql_xioctl_t		*xp = ha->xioctl;
1446 	ql_tgt_t		tq;
1447 	uint16_t		comp_status, loop_id;
1448 
1449 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1450 
1451 	/* Get CT argument structure. */
1452 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1453 		EL(ha, "failed, No switch\n");
1454 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1455 		cmd->ResponseLen = 0;
1456 		return;
1457 	}
1458 
1459 	if (DRIVER_SUSPENDED(ha)) {
1460 		EL(ha, "failed, LOOP_NOT_READY\n");
1461 		cmd->Status = EXT_STATUS_BUSY;
1462 		cmd->ResponseLen = 0;
1463 		return;
1464 	}
1465 
1466 	/* Login management server device. */
1467 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1468 		tq.d_id.b.al_pa = 0xfa;
1469 		tq.d_id.b.area = 0xff;
1470 		tq.d_id.b.domain = 0xff;
1471 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ?
1472 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1473 		    MANAGEMENT_SERVER_LOOP_ID);
1474 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1475 		if (rval != QL_SUCCESS) {
1476 			EL(ha, "failed, server login\n");
1477 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1478 			cmd->ResponseLen = 0;
1479 			return;
1480 		} else {
1481 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1482 		}
1483 	}
1484 
1485 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1486 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1487 
1488 	/* Allocate a DMA Memory Descriptor */
1489 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1490 	if (dma_mem == NULL) {
1491 		EL(ha, "failed, kmem_zalloc\n");
1492 		cmd->Status = EXT_STATUS_NO_MEMORY;
1493 		cmd->ResponseLen = 0;
1494 		return;
1495 	}
1496 	/* Determine maximum buffer size. */
1497 	if (cmd->RequestLen < cmd->ResponseLen) {
1498 		pld_byte_cnt = cmd->ResponseLen;
1499 	} else {
1500 		pld_byte_cnt = cmd->RequestLen;
1501 	}
1502 
1503 	/* Allocate command block. */
1504 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1505 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1506 	if (pkt == NULL) {
1507 		EL(ha, "failed, kmem_zalloc\n");
1508 		cmd->Status = EXT_STATUS_NO_MEMORY;
1509 		cmd->ResponseLen = 0;
1510 		return;
1511 	}
1512 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1513 
1514 	/* Get command payload data. */
1515 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1516 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1517 		EL(ha, "failed, get_buffer_data\n");
1518 		kmem_free(pkt, pkt_size);
1519 		cmd->Status = EXT_STATUS_COPY_ERR;
1520 		cmd->ResponseLen = 0;
1521 		return;
1522 	}
1523 
1524 	/* Get DMA memory for the IOCB */
1525 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1526 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1527 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1528 		    "alloc failed", QL_NAME, ha->instance);
1529 		kmem_free(pkt, pkt_size);
1530 		kmem_free(dma_mem, sizeof (dma_mem_t));
1531 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1532 		cmd->ResponseLen = 0;
1533 		return;
1534 	}
1535 
1536 	/* Copy out going payload data to IOCB DMA buffer. */
1537 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1538 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1539 
1540 	/* Sync IOCB DMA buffer. */
1541 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1542 	    DDI_DMA_SYNC_FORDEV);
1543 
1544 	/*
1545 	 * Setup IOCB
1546 	 */
1547 	ct = (ql_ct_iu_preamble_t *)pld;
1548 	if (CFG_IST(ha, CFG_CTRL_2425)) {
1549 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1550 		pkt->ms24.entry_count = 1;
1551 
1552 		/* Set loop ID */
1553 		pkt->ms24.n_port_hdl = (uint16_t)
1554 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1555 		    LE_16(SNS_24XX_HDL) :
1556 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1557 
1558 		/* Set ISP command timeout. */
1559 		pkt->ms24.timeout = LE_16(120);
1560 
1561 		/* Set cmd/response data segment counts. */
1562 		pkt->ms24.cmd_dseg_count = LE_16(1);
1563 		pkt->ms24.resp_dseg_count = LE_16(1);
1564 
1565 		/* Load ct cmd byte count. */
1566 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1567 
1568 		/* Load ct rsp byte count. */
1569 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1570 
1571 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1572 
1573 		/* Load MS command entry data segments. */
1574 		*long_ptr++ = (uint32_t)
1575 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1576 		*long_ptr++ = (uint32_t)
1577 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1578 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1579 
1580 		/* Load MS response entry data segments. */
1581 		*long_ptr++ = (uint32_t)
1582 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1583 		*long_ptr++ = (uint32_t)
1584 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1585 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1586 
1587 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1588 		    sizeof (ql_mbx_iocb_t));
1589 
1590 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1591 		if (comp_status == CS_DATA_UNDERRUN) {
1592 			if ((BE_16(ct->max_residual_size)) == 0) {
1593 				comp_status = CS_COMPLETE;
1594 			}
1595 		}
1596 
1597 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1598 		    0) {
1599 			EL(ha, "failed, I/O timeout or "
1600 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1601 			    pkt->sts24.entry_status,
1602 			    pkt->sts24.scsi_status_l, rval);
1603 			kmem_free(pkt, pkt_size);
1604 			ql_free_dma_resource(ha, dma_mem);
1605 			kmem_free(dma_mem, sizeof (dma_mem_t));
1606 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1607 			cmd->ResponseLen = 0;
1608 			return;
1609 		}
1610 	} else {
1611 		pkt->ms.entry_type = MS_TYPE;
1612 		pkt->ms.entry_count = 1;
1613 
1614 		/* Set loop ID */
1615 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1616 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1617 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1618 			pkt->ms.loop_id_l = LSB(loop_id);
1619 			pkt->ms.loop_id_h = MSB(loop_id);
1620 		} else {
1621 			pkt->ms.loop_id_h = LSB(loop_id);
1622 		}
1623 
1624 		/* Set ISP command timeout. */
1625 		pkt->ms.timeout = LE_16(120);
1626 
1627 		/* Set data segment counts. */
1628 		pkt->ms.cmd_dseg_count_l = 1;
1629 		pkt->ms.total_dseg_count = LE_16(2);
1630 
1631 		/* Response total byte count. */
1632 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1633 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1634 
1635 		/* Command total byte count. */
1636 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1637 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1638 
1639 		/* Load command/response data segments. */
1640 		pkt->ms.dseg_0_address[0] = (uint32_t)
1641 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1642 		pkt->ms.dseg_0_address[1] = (uint32_t)
1643 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1644 		pkt->ms.dseg_1_address[0] = (uint32_t)
1645 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1646 		pkt->ms.dseg_1_address[1] = (uint32_t)
1647 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1648 
1649 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1650 		    sizeof (ql_mbx_iocb_t));
1651 
1652 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1653 		if (comp_status == CS_DATA_UNDERRUN) {
1654 			if ((BE_16(ct->max_residual_size)) == 0) {
1655 				comp_status = CS_COMPLETE;
1656 			}
1657 		}
1658 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1659 			EL(ha, "failed, I/O timeout or "
1660 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1661 			kmem_free(pkt, pkt_size);
1662 			ql_free_dma_resource(ha, dma_mem);
1663 			kmem_free(dma_mem, sizeof (dma_mem_t));
1664 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1665 			cmd->ResponseLen = 0;
1666 			return;
1667 		}
1668 	}
1669 
1670 	/* Sync in coming DMA buffer. */
1671 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1672 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1673 	/* Copy in coming DMA data. */
1674 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1675 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1676 	    DDI_DEV_AUTOINCR);
1677 
1678 	/* Copy response payload from DMA buffer to application. */
1679 	if (cmd->ResponseLen != 0) {
1680 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1681 		    cmd->ResponseLen);
1682 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1683 
1684 		/* Send response payload. */
1685 		if (ql_send_buffer_data(pld,
1686 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1687 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1688 			EL(ha, "failed, send_buffer_data\n");
1689 			cmd->Status = EXT_STATUS_COPY_ERR;
1690 			cmd->ResponseLen = 0;
1691 		}
1692 	}
1693 
1694 	kmem_free(pkt, pkt_size);
1695 	ql_free_dma_resource(ha, dma_mem);
1696 	kmem_free(dma_mem, sizeof (dma_mem_t));
1697 
1698 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1699 }
1700 
1701 /*
1702  * ql_aen_reg
1703  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1704  *
1705  * Input:
1706  *	ha:	adapter state pointer.
1707  *	cmd:	EXT_IOCTL cmd struct pointer.
1708  *	mode:	flags.
1709  *
1710  * Returns:
1711  *	None, request status indicated in cmd->Status.
1712  *
1713  * Context:
1714  *	Kernel context.
1715  */
1716 static void
1717 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1718 {
1719 	EXT_REG_AEN	reg_struct;
1720 	int		rval = 0;
1721 	ql_xioctl_t	*xp = ha->xioctl;
1722 
1723 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1724 
1725 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &reg_struct,
1726 	    cmd->RequestLen, mode);
1727 
1728 	if (rval == 0) {
1729 		if (reg_struct.Enable) {
1730 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1731 		} else {
1732 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1733 			/* Empty the queue. */
1734 			INTR_LOCK(ha);
1735 			xp->aen_q_head = 0;
1736 			xp->aen_q_tail = 0;
1737 			INTR_UNLOCK(ha);
1738 		}
1739 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1740 	} else {
1741 		cmd->Status = EXT_STATUS_COPY_ERR;
1742 		EL(ha, "failed, ddi_copyin\n");
1743 	}
1744 }
1745 
1746 /*
1747  * ql_aen_get
1748  *	IOCTL management server Asynchronous Event Record Transfer.
1749  *
1750  * Input:
1751  *	ha:	adapter state pointer.
1752  *	cmd:	EXT_IOCTL cmd struct pointer.
1753  *	mode:	flags.
1754  *
1755  * Returns:
1756  *	None, request status indicated in cmd->Status.
1757  *
1758  * Context:
1759  *	Kernel context.
1760  */
1761 static void
1762 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1763 {
1764 	uint32_t	out_size;
1765 	EXT_ASYNC_EVENT	*tmp_q;
1766 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1767 	uint8_t		i;
1768 	uint8_t		queue_cnt;
1769 	uint8_t		request_cnt;
1770 	ql_xioctl_t	*xp = ha->xioctl;
1771 
1772 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1773 
1774 	/* Compute the number of events that can be returned */
1775 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1776 
1777 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1778 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1779 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1780 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1781 		    "Len=%xh\n", request_cnt);
1782 		cmd->ResponseLen = 0;
1783 		return;
1784 	}
1785 
1786 	/* 1st: Make a local copy of the entire queue content. */
1787 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1788 	queue_cnt = 0;
1789 
1790 	INTR_LOCK(ha);
1791 	i = xp->aen_q_head;
1792 
1793 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1794 		if (tmp_q[i].AsyncEventCode != 0) {
1795 			bcopy(&tmp_q[i], &aen[queue_cnt],
1796 			    sizeof (EXT_ASYNC_EVENT));
1797 			queue_cnt++;
1798 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1799 		}
1800 		if (i == xp->aen_q_tail) {
1801 			/* done. */
1802 			break;
1803 		}
1804 		i++;
1805 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1806 			i = 0;
1807 		}
1808 	}
1809 
1810 	/* Empty the queue. */
1811 	xp->aen_q_head = 0;
1812 	xp->aen_q_tail = 0;
1813 
1814 	INTR_UNLOCK(ha);
1815 
1816 	/* 2nd: Now transfer the queue content to user buffer */
1817 	/* Copy the entire queue to user's buffer. */
1818 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1819 	if (queue_cnt == 0) {
1820 		cmd->ResponseLen = 0;
1821 	} else if (ddi_copyout((void *)&aen[0],
1822 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1823 	    out_size, mode) != 0) {
1824 		cmd->Status = EXT_STATUS_COPY_ERR;
1825 		cmd->ResponseLen = 0;
1826 		EL(ha, "failed, ddi_copyout\n");
1827 	} else {
1828 		cmd->ResponseLen = out_size;
1829 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1830 	}
1831 }
1832 
1833 /*
1834  * ql_enqueue_aen
1835  *
1836  * Input:
1837  *	ha:		adapter state pointer.
1838  *	event_code:	async event code of the event to add to queue.
1839  *	payload:	event payload for the queue.
1840  *	INTR_LOCK must be already obtained.
1841  *
1842  * Context:
1843  *	Interrupt or Kernel context, no mailbox commands allowed.
1844  */
1845 void
1846 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1847 {
1848 	uint8_t			new_entry;	/* index to current entry */
1849 	uint16_t		*mbx;
1850 	EXT_ASYNC_EVENT		*aen_queue;
1851 	ql_xioctl_t		*xp = ha->xioctl;
1852 
1853 	QL_PRINT_9(CE_CONT, "(%d): entered, event_code=%d\n", ha->instance,
1854 	    event_code);
1855 
1856 	if (xp == NULL) {
1857 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1858 		return;
1859 	}
1860 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1861 
1862 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1863 		/* Need to change queue pointers to make room. */
1864 
1865 		/* Increment tail for adding new entry. */
1866 		xp->aen_q_tail++;
1867 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1868 			xp->aen_q_tail = 0;
1869 		}
1870 		if (xp->aen_q_head == xp->aen_q_tail) {
1871 			/*
1872 			 * We're overwriting the oldest entry, so need to
1873 			 * update the head pointer.
1874 			 */
1875 			xp->aen_q_head++;
1876 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1877 				xp->aen_q_head = 0;
1878 			}
1879 		}
1880 	}
1881 
1882 	new_entry = xp->aen_q_tail;
1883 	aen_queue[new_entry].AsyncEventCode = event_code;
1884 
1885 	/* Update payload */
1886 	if (payload != NULL) {
1887 		switch (event_code) {
1888 		case MBA_LIP_OCCURRED:
1889 		case MBA_LOOP_UP:
1890 		case MBA_LOOP_DOWN:
1891 		case MBA_LIP_F8:
1892 		case MBA_LIP_RESET:
1893 		case MBA_PORT_UPDATE:
1894 			break;
1895 		case MBA_RSCN_UPDATE:
1896 			mbx = (uint16_t *)payload;
1897 			/* al_pa */
1898 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1899 			    LSB(mbx[2]);
1900 			/* area */
1901 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1902 			    MSB(mbx[2]);
1903 			/* domain */
1904 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1905 			    LSB(mbx[1]);
1906 			/* save in big endian */
1907 			BIG_ENDIAN_24(&aen_queue[new_entry].
1908 			    Payload.RSCN.RSCNInfo[0]);
1909 
1910 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1911 			    MSB(mbx[1]);
1912 
1913 			break;
1914 		default:
1915 			/* Not supported */
1916 			EL(ha, "failed, event code not supported=%xh\n",
1917 			    event_code);
1918 			aen_queue[new_entry].AsyncEventCode = 0;
1919 			break;
1920 		}
1921 	}
1922 
1923 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1924 }
1925 
1926 /*
1927  * ql_scsi_passthru
1928  *	IOCTL SCSI passthrough.
1929  *
1930  * Input:
1931  *	ha:	adapter state pointer.
1932  *	cmd:	User space SCSI command pointer.
1933  *	mode:	flags.
1934  *
1935  * Returns:
1936  *	None, request status indicated in cmd->Status.
1937  *
1938  * Context:
1939  *	Kernel context.
1940  */
1941 static void
1942 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1943 {
1944 	ql_mbx_iocb_t		*pkt;
1945 	ql_mbx_data_t		mr;
1946 	dma_mem_t		*dma_mem;
1947 	caddr_t			pld;
1948 	uint32_t		pkt_size, pld_size;
1949 	uint16_t		qlnt, retries, cnt, cnt2;
1950 	uint8_t			*name;
1951 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1952 	EXT_SCSI_PASSTHRU	*usp_req;
1953 	int			rval;
1954 	union _passthru {
1955 		EXT_SCSI_PASSTHRU	sp_cmd;
1956 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
1957 	} pt_req;		/* Passthru request */
1958 	uint32_t		status, sense_sz = 0;
1959 	ql_tgt_t		*tq = NULL;
1960 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
1961 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
1962 
1963 	/* SCSI request struct for SCSI passthrough IOs. */
1964 	struct {
1965 		uint16_t	lun;
1966 		uint16_t	sense_length;	/* Sense buffer size */
1967 		size_t		resid;		/* Residual */
1968 		uint8_t		*cdbp;		/* Requestor's CDB */
1969 		uint8_t		*u_sense;	/* Requestor's sense buffer */
1970 		uint8_t		cdb_len;	/* Requestor's CDB length */
1971 		uint8_t		direction;
1972 	} scsi_req;
1973 
1974 	struct {
1975 		uint8_t		*rsp_info;
1976 		uint8_t		*req_sense_data;
1977 		uint32_t	residual_length;
1978 		uint32_t	rsp_info_length;
1979 		uint32_t	req_sense_length;
1980 		uint16_t	comp_status;
1981 		uint8_t		state_flags_l;
1982 		uint8_t		state_flags_h;
1983 		uint8_t		scsi_status_l;
1984 		uint8_t		scsi_status_h;
1985 	} sts;
1986 
1987 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1988 
1989 	/* Verify Sub Code and set cnt to needed request size. */
1990 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
1991 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
1992 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
1993 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
1994 	} else {
1995 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
1996 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
1997 		cmd->ResponseLen = 0;
1998 		return;
1999 	}
2000 
2001 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2002 	if (dma_mem == NULL) {
2003 		EL(ha, "failed, kmem_zalloc\n");
2004 		cmd->Status = EXT_STATUS_NO_MEMORY;
2005 		cmd->ResponseLen = 0;
2006 		return;
2007 	}
2008 	/*  Verify the size of and copy in the passthru request structure. */
2009 	if (cmd->RequestLen != pld_size) {
2010 		/* Return error */
2011 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2012 		    cmd->RequestLen, pld_size);
2013 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2014 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2015 		cmd->ResponseLen = 0;
2016 		return;
2017 	}
2018 
2019 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2020 	    pld_size, mode) != 0) {
2021 		EL(ha, "failed, ddi_copyin\n");
2022 		cmd->Status = EXT_STATUS_COPY_ERR;
2023 		cmd->ResponseLen = 0;
2024 		return;
2025 	}
2026 
2027 	/*
2028 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2029 	 * request data structure.
2030 	 */
2031 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2032 		scsi_req.lun = sp_req->TargetAddr.Lun;
2033 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2034 		scsi_req.cdbp = &sp_req->Cdb[0];
2035 		scsi_req.cdb_len = sp_req->CdbLength;
2036 		scsi_req.direction = sp_req->Direction;
2037 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2038 		scsi_req.u_sense = &usp_req->SenseData[0];
2039 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2040 
2041 		qlnt = QLNT_PORT;
2042 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2043 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2044 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2045 		tq = ql_find_port(ha, name, qlnt);
2046 	} else {
2047 		/*
2048 		 * Must be FC PASSTHRU, verified above.
2049 		 */
2050 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2051 			qlnt = QLNT_PORT;
2052 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2053 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2054 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2055 			    ha->instance, cmd->SubCode, name[0], name[1],
2056 			    name[2], name[3], name[4], name[5], name[6],
2057 			    name[7]);
2058 			tq = ql_find_port(ha, name, qlnt);
2059 		} else if (fc_req->FCScsiAddr.DestType ==
2060 		    EXT_DEF_DESTTYPE_WWNN) {
2061 			qlnt = QLNT_NODE;
2062 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2063 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2064 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2065 			    ha->instance, cmd->SubCode, name[0], name[1],
2066 			    name[2], name[3], name[4], name[5], name[6],
2067 			    name[7]);
2068 			tq = ql_find_port(ha, name, qlnt);
2069 		} else if (fc_req->FCScsiAddr.DestType ==
2070 		    EXT_DEF_DESTTYPE_PORTID) {
2071 			qlnt = QLNT_PID;
2072 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2073 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2074 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2075 			    name[0], name[1], name[2]);
2076 			tq = ql_find_port(ha, name, qlnt);
2077 		} else {
2078 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2079 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2080 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2081 			cmd->ResponseLen = 0;
2082 			return;
2083 		}
2084 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2085 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2086 		scsi_req.cdbp = &sp_req->Cdb[0];
2087 		scsi_req.cdb_len = sp_req->CdbLength;
2088 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2089 		scsi_req.u_sense = &ufc_req->SenseData[0];
2090 		scsi_req.direction = fc_req->Direction;
2091 	}
2092 
2093 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2094 		EL(ha, "failed, fc_port not found\n");
2095 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2096 		cmd->ResponseLen = 0;
2097 		return;
2098 	}
2099 
2100 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2101 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2102 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2103 		cmd->ResponseLen = 0;
2104 		return;
2105 	}
2106 
2107 	/* Allocate command block. */
2108 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2109 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2110 	    cmd->ResponseLen) {
2111 		pld_size = cmd->ResponseLen;
2112 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2113 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2114 		if (pkt == NULL) {
2115 			EL(ha, "failed, kmem_zalloc\n");
2116 			cmd->Status = EXT_STATUS_NO_MEMORY;
2117 			cmd->ResponseLen = 0;
2118 			return;
2119 		}
2120 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2121 
2122 		/* Get DMA memory for the IOCB */
2123 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2124 		    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2125 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2126 			    "alloc failed", QL_NAME, ha->instance);
2127 			kmem_free(pkt, pkt_size);
2128 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2129 			cmd->ResponseLen = 0;
2130 			return;
2131 		}
2132 
2133 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2134 			scsi_req.direction = (uint8_t)
2135 			    (CFG_IST(ha, CFG_CTRL_2425) ?
2136 			    CF_RD : CF_DATA_IN | CF_STAG);
2137 		} else {
2138 			scsi_req.direction = (uint8_t)
2139 			    (CFG_IST(ha, CFG_CTRL_2425) ?
2140 			    CF_WR : CF_DATA_OUT | CF_STAG);
2141 			cmd->ResponseLen = 0;
2142 
2143 			/* Get command payload. */
2144 			if (ql_get_buffer_data(
2145 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2146 			    pld, pld_size, mode) != pld_size) {
2147 				EL(ha, "failed, get_buffer_data\n");
2148 				cmd->Status = EXT_STATUS_COPY_ERR;
2149 
2150 				kmem_free(pkt, pkt_size);
2151 				ql_free_dma_resource(ha, dma_mem);
2152 				kmem_free(dma_mem, sizeof (dma_mem_t));
2153 				return;
2154 			}
2155 
2156 			/* Copy out going data to DMA buffer. */
2157 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2158 			    (uint8_t *)dma_mem->bp, pld_size,
2159 			    DDI_DEV_AUTOINCR);
2160 
2161 			/* Sync DMA buffer. */
2162 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2163 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2164 		}
2165 	} else {
2166 		scsi_req.direction = (uint8_t)
2167 		    (CFG_IST(ha, CFG_CTRL_2425) ? 0 : CF_STAG);
2168 		cmd->ResponseLen = 0;
2169 
2170 		pkt_size = sizeof (ql_mbx_iocb_t);
2171 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2172 		if (pkt == NULL) {
2173 			EL(ha, "failed, kmem_zalloc-2\n");
2174 			cmd->Status = EXT_STATUS_NO_MEMORY;
2175 			return;
2176 		}
2177 		pld = NULL;
2178 		pld_size = 0;
2179 	}
2180 
2181 	/* retries = ha->port_down_retry_count; */
2182 	retries = 1;
2183 	cmd->Status = EXT_STATUS_OK;
2184 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2185 
2186 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2187 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2188 
2189 	do {
2190 		if (DRIVER_SUSPENDED(ha)) {
2191 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2192 			break;
2193 		}
2194 
2195 		if (CFG_IST(ha, CFG_CTRL_2425)) {
2196 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2197 			pkt->cmd24.entry_count = 1;
2198 
2199 			/* Set LUN number */
2200 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2201 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2202 
2203 			/* Set N_port handle */
2204 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2205 
2206 			/* Set VP Index */
2207 			pkt->cmd24.vp_index = ha->vp_index;
2208 
2209 			/* Set target ID */
2210 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2211 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2212 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2213 
2214 			/* Set ISP command timeout. */
2215 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2216 
2217 			/* Load SCSI CDB */
2218 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2219 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2220 			    DDI_DEV_AUTOINCR);
2221 			for (cnt = 0; cnt < MAX_CMDSZ;
2222 			    cnt = (uint16_t)(cnt + 4)) {
2223 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2224 				    + cnt, 4);
2225 			}
2226 
2227 			/* Set tag queue control flags */
2228 			pkt->cmd24.task = TA_STAG;
2229 
2230 			if (pld_size) {
2231 				/* Set transfer direction. */
2232 				pkt->cmd24.control_flags = scsi_req.direction;
2233 
2234 				/* Set data segment count. */
2235 				pkt->cmd24.dseg_count = LE_16(1);
2236 
2237 				/* Load total byte count. */
2238 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2239 
2240 				/* Load data descriptor. */
2241 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2242 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2243 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2244 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2245 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2246 			}
2247 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2248 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2249 			pkt->cmd3.entry_count = 1;
2250 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2251 				pkt->cmd3.target_l = LSB(tq->loop_id);
2252 				pkt->cmd3.target_h = MSB(tq->loop_id);
2253 			} else {
2254 				pkt->cmd3.target_h = LSB(tq->loop_id);
2255 			}
2256 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2257 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2258 			pkt->cmd3.control_flags_l = scsi_req.direction;
2259 			pkt->cmd3.timeout = LE_16(15);
2260 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2261 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2262 			}
2263 			if (pld_size) {
2264 				pkt->cmd3.dseg_count = LE_16(1);
2265 				pkt->cmd3.byte_count = LE_32(pld_size);
2266 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2267 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2268 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2269 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2270 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2271 			}
2272 		} else {
2273 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2274 			pkt->cmd.entry_count = 1;
2275 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2276 				pkt->cmd.target_l = LSB(tq->loop_id);
2277 				pkt->cmd.target_h = MSB(tq->loop_id);
2278 			} else {
2279 				pkt->cmd.target_h = LSB(tq->loop_id);
2280 			}
2281 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2282 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2283 			pkt->cmd.control_flags_l = scsi_req.direction;
2284 			pkt->cmd.timeout = LE_16(15);
2285 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2286 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2287 			}
2288 			if (pld_size) {
2289 				pkt->cmd.dseg_count = LE_16(1);
2290 				pkt->cmd.byte_count = LE_32(pld_size);
2291 				pkt->cmd.dseg_0_address = (uint32_t)
2292 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2293 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2294 			}
2295 		}
2296 		/* Go issue command and wait for completion. */
2297 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2298 		QL_DUMP_9(pkt, 8, pkt_size);
2299 
2300 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2301 
2302 		if (pld_size) {
2303 			/* Sync in coming DMA buffer. */
2304 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2305 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2306 			/* Copy in coming DMA data. */
2307 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2308 			    (uint8_t *)dma_mem->bp, pld_size,
2309 			    DDI_DEV_AUTOINCR);
2310 		}
2311 
2312 		if (CFG_IST(ha, CFG_CTRL_2425)) {
2313 			pkt->sts24.entry_status = (uint8_t)
2314 			    (pkt->sts24.entry_status & 0x3c);
2315 		} else {
2316 			pkt->sts.entry_status = (uint8_t)
2317 			    (pkt->sts.entry_status & 0x7e);
2318 		}
2319 
2320 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2321 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2322 			    pkt->sts.entry_status, tq->d_id.b24);
2323 			status = QL_FUNCTION_PARAMETER_ERROR;
2324 		}
2325 
2326 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ?
2327 		    LE_16(pkt->sts24.comp_status) :
2328 		    LE_16(pkt->sts.comp_status));
2329 
2330 		/*
2331 		 * We have verified about all the request that can be so far.
2332 		 * Now we need to start verification of our ability to
2333 		 * actually issue the CDB.
2334 		 */
2335 		if (DRIVER_SUSPENDED(ha)) {
2336 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2337 			break;
2338 		} else if (status == QL_SUCCESS &&
2339 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2340 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2341 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2342 			if (tq->flags & TQF_FABRIC_DEVICE) {
2343 				rval = ql_login_fport(ha, tq, tq->loop_id,
2344 				    LFF_NO_PLOGI, &mr);
2345 				if (rval != QL_SUCCESS) {
2346 					EL(ha, "failed, login_fport=%xh, "
2347 					    "d_id=%xh\n", rval, tq->d_id.b24);
2348 				}
2349 			} else {
2350 				rval = ql_login_lport(ha, tq, tq->loop_id,
2351 				    LLF_NONE);
2352 				if (rval != QL_SUCCESS) {
2353 					EL(ha, "failed, login_lport=%xh, "
2354 					    "d_id=%xh\n", rval, tq->d_id.b24);
2355 				}
2356 			}
2357 		} else {
2358 			break;
2359 		}
2360 
2361 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2362 
2363 	} while (retries--);
2364 
2365 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2366 		/* Cannot issue command now, maybe later */
2367 		EL(ha, "failed, suspended\n");
2368 		kmem_free(pkt, pkt_size);
2369 		ql_free_dma_resource(ha, dma_mem);
2370 		kmem_free(dma_mem, sizeof (dma_mem_t));
2371 		cmd->Status = EXT_STATUS_SUSPENDED;
2372 		cmd->ResponseLen = 0;
2373 		return;
2374 	}
2375 
2376 	if (status != QL_SUCCESS) {
2377 		/* Command error */
2378 		EL(ha, "failed, I/O\n");
2379 		kmem_free(pkt, pkt_size);
2380 		ql_free_dma_resource(ha, dma_mem);
2381 		kmem_free(dma_mem, sizeof (dma_mem_t));
2382 		cmd->Status = EXT_STATUS_ERR;
2383 		cmd->DetailStatus = status;
2384 		cmd->ResponseLen = 0;
2385 		return;
2386 	}
2387 
2388 	/* Setup status. */
2389 	if (CFG_IST(ha, CFG_CTRL_2425)) {
2390 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2391 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2392 
2393 		/* Setup residuals. */
2394 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2395 
2396 		/* Setup state flags. */
2397 		sts.state_flags_l = pkt->sts24.state_flags_l;
2398 		sts.state_flags_h = pkt->sts24.state_flags_h;
2399 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2400 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2401 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2402 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2403 		} else {
2404 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2405 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2406 			    SF_GOT_STATUS);
2407 		}
2408 		if (scsi_req.direction & CF_WR) {
2409 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2410 			    SF_DATA_OUT);
2411 		} else if (scsi_req.direction & CF_RD) {
2412 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2413 			    SF_DATA_IN);
2414 		}
2415 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2416 
2417 		/* Setup FCP response info. */
2418 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2419 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2420 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2421 		for (cnt = 0; cnt < sts.rsp_info_length;
2422 		    cnt = (uint16_t)(cnt + 4)) {
2423 			ql_chg_endian(sts.rsp_info + cnt, 4);
2424 		}
2425 
2426 		/* Setup sense data. */
2427 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2428 			sts.req_sense_length =
2429 			    LE_32(pkt->sts24.fcp_sense_length);
2430 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2431 			    SF_ARQ_DONE);
2432 		} else {
2433 			sts.req_sense_length = 0;
2434 		}
2435 		sts.req_sense_data =
2436 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2437 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2438 		    (uintptr_t)sts.req_sense_data);
2439 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2440 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2441 		}
2442 	} else {
2443 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2444 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2445 
2446 		/* Setup residuals. */
2447 		sts.residual_length = LE_32(pkt->sts.residual_length);
2448 
2449 		/* Setup state flags. */
2450 		sts.state_flags_l = pkt->sts.state_flags_l;
2451 		sts.state_flags_h = pkt->sts.state_flags_h;
2452 
2453 		/* Setup FCP response info. */
2454 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2455 		    LE_16(pkt->sts.rsp_info_length) : 0;
2456 		sts.rsp_info = &pkt->sts.rsp_info[0];
2457 
2458 		/* Setup sense data. */
2459 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2460 		    LE_16(pkt->sts.req_sense_length) : 0;
2461 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2462 	}
2463 
2464 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2465 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2466 
2467 	switch (sts.comp_status) {
2468 	case CS_INCOMPLETE:
2469 	case CS_ABORTED:
2470 	case CS_DEVICE_UNAVAILABLE:
2471 	case CS_PORT_UNAVAILABLE:
2472 	case CS_PORT_LOGGED_OUT:
2473 	case CS_PORT_CONFIG_CHG:
2474 	case CS_PORT_BUSY:
2475 	case CS_LOOP_DOWN_ABORT:
2476 		cmd->Status = EXT_STATUS_BUSY;
2477 		break;
2478 	case CS_RESET:
2479 	case CS_QUEUE_FULL:
2480 		cmd->Status = EXT_STATUS_ERR;
2481 		break;
2482 	case CS_TIMEOUT:
2483 		cmd->Status = EXT_STATUS_ERR;
2484 		break;
2485 	case CS_DATA_OVERRUN:
2486 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2487 		break;
2488 	case CS_DATA_UNDERRUN:
2489 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2490 		break;
2491 	}
2492 
2493 	/*
2494 	 * If non data transfer commands fix tranfer counts.
2495 	 */
2496 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2497 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2498 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2499 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2500 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2501 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2502 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2503 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2504 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2505 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2506 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2507 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2508 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2509 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2510 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2511 		/*
2512 		 * Non data transfer command, clear sts_entry residual
2513 		 * length.
2514 		 */
2515 		sts.residual_length = 0;
2516 		cmd->ResponseLen = 0;
2517 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2518 			sts.comp_status = CS_COMPLETE;
2519 			cmd->Status = EXT_STATUS_OK;
2520 		}
2521 	} else {
2522 		cmd->ResponseLen = pld_size;
2523 	}
2524 
2525 	/* Correct ISP completion status */
2526 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2527 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2528 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2529 		    ha->instance);
2530 		scsi_req.resid = 0;
2531 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2532 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2533 		    ha->instance);
2534 		scsi_req.resid = sts.residual_length;
2535 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2536 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2537 
2538 			cmd->ResponseLen = (uint32_t)
2539 			    (pld_size - scsi_req.resid);
2540 		} else {
2541 			EL(ha, "failed, Transfer ERROR\n");
2542 			cmd->Status = EXT_STATUS_ERR;
2543 			cmd->ResponseLen = 0;
2544 		}
2545 	} else {
2546 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2547 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2548 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2549 		    sts.scsi_status_l);
2550 
2551 		scsi_req.resid = pld_size;
2552 		/*
2553 		 * Handle residual count on SCSI check
2554 		 * condition.
2555 		 *
2556 		 * - If Residual Under / Over is set, use the
2557 		 *   Residual Transfer Length field in IOCB.
2558 		 * - If Residual Under / Over is not set, and
2559 		 *   Transferred Data bit is set in State Flags
2560 		 *   field of IOCB, report residual value of 0
2561 		 *   (you may want to do this for tape
2562 		 *   Write-type commands only). This takes care
2563 		 *   of logical end of tape problem and does
2564 		 *   not break Unit Attention.
2565 		 * - If Residual Under / Over is not set, and
2566 		 *   Transferred Data bit is not set in State
2567 		 *   Flags, report residual value equal to
2568 		 *   original data transfer length.
2569 		 */
2570 		if (sts.scsi_status_l & STATUS_CHECK) {
2571 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2572 			cmd->DetailStatus = sts.scsi_status_l;
2573 			if (sts.scsi_status_h &
2574 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2575 				scsi_req.resid = sts.residual_length;
2576 			} else if (sts.state_flags_h &
2577 			    STATE_XFERRED_DATA) {
2578 				scsi_req.resid = 0;
2579 			}
2580 		}
2581 	}
2582 
2583 	if (sts.scsi_status_l & STATUS_CHECK &&
2584 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2585 	    sts.req_sense_length) {
2586 		/*
2587 		 * Check condition with vaild sense data flag set and sense
2588 		 * length != 0
2589 		 */
2590 		if (sts.req_sense_length > scsi_req.sense_length) {
2591 			sense_sz = scsi_req.sense_length;
2592 		} else {
2593 			sense_sz = sts.req_sense_length;
2594 		}
2595 
2596 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2597 		    tq->d_id.b24);
2598 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2599 
2600 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2601 		    (size_t)sense_sz, mode) != 0) {
2602 			EL(ha, "failed, request sense ddi_copyout\n");
2603 		}
2604 
2605 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2606 		cmd->DetailStatus = sts.scsi_status_l;
2607 	}
2608 
2609 	/* Copy response payload from DMA buffer to application. */
2610 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2611 	    cmd->ResponseLen != 0) {
2612 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2613 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2614 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2615 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2616 
2617 		/* Send response payload. */
2618 		if (ql_send_buffer_data(pld,
2619 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2620 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2621 			EL(ha, "failed, send_buffer_data\n");
2622 			cmd->Status = EXT_STATUS_COPY_ERR;
2623 			cmd->ResponseLen = 0;
2624 		}
2625 	}
2626 
2627 	if (cmd->Status != EXT_STATUS_OK) {
2628 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2629 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2630 	} else {
2631 		/*EMPTY*/
2632 		QL_PRINT_9(CE_CONT, "(%d): exiting, ResponseLen=%d\n",
2633 		    ha->instance, cmd->ResponseLen);
2634 	}
2635 
2636 	kmem_free(pkt, pkt_size);
2637 	ql_free_dma_resource(ha, dma_mem);
2638 	kmem_free(dma_mem, sizeof (dma_mem_t));
2639 }
2640 
2641 /*
2642  * ql_wwpn_to_scsiaddr
2643  *
2644  * Input:
2645  *	ha:	adapter state pointer.
2646  *	cmd:	EXT_IOCTL cmd struct pointer.
2647  *	mode:	flags.
2648  *
2649  * Context:
2650  *	Kernel context.
2651  */
2652 static void
2653 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2654 {
2655 	int		status;
2656 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2657 	EXT_SCSI_ADDR	*tmp_addr;
2658 	ql_tgt_t	*tq;
2659 
2660 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2661 
2662 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2663 		/* Return error */
2664 		EL(ha, "incorrect RequestLen\n");
2665 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2666 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2667 		return;
2668 	}
2669 
2670 	status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2671 	    cmd->RequestLen, mode);
2672 
2673 	if (status != 0) {
2674 		cmd->Status = EXT_STATUS_COPY_ERR;
2675 		EL(ha, "failed, ddi_copyin\n");
2676 		return;
2677 	}
2678 
2679 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2680 
2681 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2682 		/* no matching device */
2683 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2684 		EL(ha, "failed, device not found\n");
2685 		return;
2686 	}
2687 
2688 	/* Copy out the IDs found.  For now we can only return target ID. */
2689 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2690 
2691 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2692 
2693 	if (status != 0) {
2694 		cmd->Status = EXT_STATUS_COPY_ERR;
2695 		EL(ha, "failed, ddi_copyout\n");
2696 	} else {
2697 		cmd->Status = EXT_STATUS_OK;
2698 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2699 	}
2700 }
2701 
2702 /*
2703  * ql_host_idx
2704  *	Gets host order index.
2705  *
2706  * Input:
2707  *	ha:	adapter state pointer.
2708  *	cmd:	EXT_IOCTL cmd struct pointer.
2709  *	mode:	flags.
2710  *
2711  * Returns:
2712  *	None, request status indicated in cmd->Status.
2713  *
2714  * Context:
2715  *	Kernel context.
2716  */
2717 static void
2718 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2719 {
2720 	uint16_t	idx;
2721 
2722 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2723 
2724 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2725 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2726 		cmd->DetailStatus = sizeof (uint16_t);
2727 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2728 		cmd->ResponseLen = 0;
2729 		return;
2730 	}
2731 
2732 	idx = (uint16_t)ha->instance;
2733 
2734 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2735 	    sizeof (uint16_t), mode) != 0) {
2736 		cmd->Status = EXT_STATUS_COPY_ERR;
2737 		cmd->ResponseLen = 0;
2738 		EL(ha, "failed, ddi_copyout\n");
2739 	} else {
2740 		cmd->ResponseLen = sizeof (uint16_t);
2741 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2742 	}
2743 }
2744 
2745 /*
2746  * ql_host_drvname
2747  *	Gets host driver name
2748  *
2749  * Input:
2750  *	ha:	adapter state pointer.
2751  *	cmd:	EXT_IOCTL cmd struct pointer.
2752  *	mode:	flags.
2753  *
2754  * Returns:
2755  *	None, request status indicated in cmd->Status.
2756  *
2757  * Context:
2758  *	Kernel context.
2759  */
2760 static void
2761 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2762 {
2763 
2764 	char		drvname[] = QL_NAME;
2765 	uint32_t	qlnamelen;
2766 
2767 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2768 
2769 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2770 
2771 	if (cmd->ResponseLen < qlnamelen) {
2772 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2773 		cmd->DetailStatus = qlnamelen;
2774 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2775 		    cmd->ResponseLen, qlnamelen);
2776 		cmd->ResponseLen = 0;
2777 		return;
2778 	}
2779 
2780 	if (ddi_copyout((void *)&drvname,
2781 	    (void *)(uintptr_t)(cmd->ResponseAdr),
2782 	    qlnamelen, mode) != 0) {
2783 		cmd->Status = EXT_STATUS_COPY_ERR;
2784 		cmd->ResponseLen = 0;
2785 		EL(ha, "failed, ddi_copyout\n");
2786 	} else {
2787 		cmd->ResponseLen = qlnamelen-1;
2788 	}
2789 
2790 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2791 }
2792 
2793 /*
2794  * ql_read_nvram
2795  *	Get NVRAM contents.
2796  *
2797  * Input:
2798  *	ha:	adapter state pointer.
2799  *	cmd:	EXT_IOCTL cmd struct pointer.
2800  *	mode:	flags.
2801  *
2802  * Returns:
2803  *	None, request status indicated in cmd->Status.
2804  *
2805  * Context:
2806  *	Kernel context.
2807  */
2808 static void
2809 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2810 {
2811 	uint32_t	nv_size;
2812 
2813 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2814 
2815 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_2425) ?
2816 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2817 	if (cmd->ResponseLen < nv_size) {
2818 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2819 		cmd->DetailStatus = nv_size;
2820 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2821 		    cmd->ResponseLen);
2822 		cmd->ResponseLen = 0;
2823 		return;
2824 	}
2825 
2826 	/* Get NVRAM data. */
2827 	if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2828 	    mode) != 0) {
2829 		cmd->Status = EXT_STATUS_COPY_ERR;
2830 		cmd->ResponseLen = 0;
2831 		EL(ha, "failed, copy error\n");
2832 	} else {
2833 		cmd->ResponseLen = nv_size;
2834 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2835 	}
2836 }
2837 
2838 /*
2839  * ql_write_nvram
2840  *	Loads NVRAM contents.
2841  *
2842  * Input:
2843  *	ha:	adapter state pointer.
2844  *	cmd:	EXT_IOCTL cmd struct pointer.
2845  *	mode:	flags.
2846  *
2847  * Returns:
2848  *	None, request status indicated in cmd->Status.
2849  *
2850  * Context:
2851  *	Kernel context.
2852  */
2853 static void
2854 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2855 {
2856 	uint32_t	nv_size;
2857 
2858 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2859 
2860 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_2425) ?
2861 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2862 	if (cmd->RequestLen < nv_size) {
2863 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2864 		cmd->DetailStatus = sizeof (nvram_t);
2865 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2866 		    cmd->RequestLen);
2867 		return;
2868 	}
2869 
2870 	/* Load NVRAM data. */
2871 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2872 	    mode) != 0) {
2873 		cmd->Status = EXT_STATUS_COPY_ERR;
2874 		EL(ha, "failed, copy error\n");
2875 	} else {
2876 		/*EMPTY*/
2877 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2878 	}
2879 }
2880 
2881 /*
2882  * ql_write_vpd
2883  *	Loads VPD contents.
2884  *
2885  * Input:
2886  *	ha:	adapter state pointer.
2887  *	cmd:	EXT_IOCTL cmd struct pointer.
2888  *	mode:	flags.
2889  *
2890  * Returns:
2891  *	None, request status indicated in cmd->Status.
2892  *
2893  * Context:
2894  *	Kernel context.
2895  */
2896 static void
2897 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2898 {
2899 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2900 
2901 	int32_t		rval = 0;
2902 
2903 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
2904 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2905 		EL(ha, "failed, invalid request for HBA\n");
2906 		return;
2907 	}
2908 
2909 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2910 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2911 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2912 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2913 		    cmd->RequestLen);
2914 		return;
2915 	}
2916 
2917 	/* Load VPD data. */
2918 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2919 	    mode)) != 0) {
2920 		cmd->Status = EXT_STATUS_COPY_ERR;
2921 		cmd->DetailStatus = rval;
2922 		EL(ha, "failed, errno=%x\n", rval);
2923 	} else {
2924 		/*EMPTY*/
2925 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2926 	}
2927 }
2928 
2929 /*
2930  * ql_read_vpd
2931  *	Dumps VPD contents.
2932  *
2933  * Input:
2934  *	ha:	adapter state pointer.
2935  *	cmd:	EXT_IOCTL cmd struct pointer.
2936  *	mode:	flags.
2937  *
2938  * Returns:
2939  *	None, request status indicated in cmd->Status.
2940  *
2941  * Context:
2942  *	Kernel context.
2943  */
2944 static void
2945 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2946 {
2947 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2948 
2949 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
2950 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2951 		EL(ha, "failed, invalid request for HBA\n");
2952 		return;
2953 	}
2954 
2955 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2956 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2957 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2958 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2959 		    cmd->ResponseLen);
2960 		return;
2961 	}
2962 
2963 	/* Dump VPD data. */
2964 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2965 	    mode)) != 0) {
2966 		cmd->Status = EXT_STATUS_COPY_ERR;
2967 		EL(ha, "failed,\n");
2968 	} else {
2969 		/*EMPTY*/
2970 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2971 	}
2972 }
2973 
2974 /*
2975  * ql_get_fcache
2976  *	Dumps flash cache contents.
2977  *
2978  * Input:
2979  *	ha:	adapter state pointer.
2980  *	cmd:	EXT_IOCTL cmd struct pointer.
2981  *	mode:	flags.
2982  *
2983  * Returns:
2984  *	None, request status indicated in cmd->Status.
2985  *
2986  * Context:
2987  *	Kernel context.
2988  */
2989 static void
2990 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2991 {
2992 	uint32_t	bsize, boff, types, cpsize, hsize;
2993 	ql_fcache_t	*fptr;
2994 
2995 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2996 
2997 	CACHE_LOCK(ha);
2998 
2999 	if (ha->fcache == NULL) {
3000 		CACHE_UNLOCK(ha);
3001 		cmd->Status = EXT_STATUS_ERR;
3002 		EL(ha, "failed, adapter fcache not setup\n");
3003 		return;
3004 	}
3005 
3006 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
3007 		bsize = 100;
3008 	} else {
3009 		bsize = 400;
3010 	}
3011 
3012 	if (cmd->ResponseLen < bsize) {
3013 		CACHE_UNLOCK(ha);
3014 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3015 		cmd->DetailStatus = bsize;
3016 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3017 		    bsize, cmd->ResponseLen);
3018 		return;
3019 	}
3020 
3021 	boff = 0;
3022 	bsize = 0;
3023 	fptr = ha->fcache;
3024 
3025 	/*
3026 	 * For backwards compatibility, get one of each image type
3027 	 */
3028 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3029 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3030 		/* Get the next image */
3031 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3032 
3033 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3034 
3035 			if (ddi_copyout(fptr->buf,
3036 			    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3037 			    cpsize, mode) != 0) {
3038 				CACHE_UNLOCK(ha);
3039 				EL(ha, "ddicopy failed, exiting\n");
3040 				cmd->Status = EXT_STATUS_COPY_ERR;
3041 				cmd->DetailStatus = 0;
3042 				return;
3043 			}
3044 			boff += 100;
3045 			bsize += cpsize;
3046 			types &= ~(fptr->type);
3047 		}
3048 	}
3049 
3050 	/*
3051 	 * Get the firmware image -- it needs to be last in the
3052 	 * buffer at offset 300 for backwards compatibility. Also for
3053 	 * backwards compatibility, the pci header is stripped off.
3054 	 */
3055 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3056 
3057 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3058 		if (hsize > fptr->buflen) {
3059 			CACHE_UNLOCK(ha);
3060 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3061 			    hsize, fptr->buflen);
3062 			cmd->Status = EXT_STATUS_COPY_ERR;
3063 			cmd->DetailStatus = 0;
3064 			return;
3065 		}
3066 
3067 		cpsize = ((fptr->buflen - hsize) < 100 ?
3068 		    fptr->buflen - hsize : 100);
3069 
3070 		if (ddi_copyout(fptr->buf+hsize,
3071 		    (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3072 		    cpsize, mode) != 0) {
3073 			CACHE_UNLOCK(ha);
3074 			EL(ha, "fw ddicopy failed, exiting\n");
3075 			cmd->Status = EXT_STATUS_COPY_ERR;
3076 			cmd->DetailStatus = 0;
3077 			return;
3078 		}
3079 		bsize += 100;
3080 	}
3081 
3082 	CACHE_UNLOCK(ha);
3083 	cmd->Status = EXT_STATUS_OK;
3084 	cmd->DetailStatus = bsize;
3085 
3086 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3087 }
3088 
3089 /*
3090  * ql_get_fcache_ex
3091  *	Dumps flash cache contents.
3092  *
3093  * Input:
3094  *	ha:	adapter state pointer.
3095  *	cmd:	EXT_IOCTL cmd struct pointer.
3096  *	mode:	flags.
3097  *
3098  * Returns:
3099  *	None, request status indicated in cmd->Status.
3100  *
3101  * Context:
3102  *	Kernel context.
3103  */
3104 static void
3105 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3106 {
3107 	uint32_t	bsize = 0;
3108 	uint32_t	boff = 0;
3109 	ql_fcache_t	*fptr;
3110 
3111 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3112 
3113 	CACHE_LOCK(ha);
3114 	if (ha->fcache == NULL) {
3115 		CACHE_UNLOCK(ha);
3116 		cmd->Status = EXT_STATUS_ERR;
3117 		EL(ha, "failed, adapter fcache not setup\n");
3118 		return;
3119 	}
3120 
3121 	/* Make sure user passed enough buffer space */
3122 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3123 		bsize += FBUFSIZE;
3124 	}
3125 
3126 	if (cmd->ResponseLen < bsize) {
3127 		CACHE_UNLOCK(ha);
3128 		if (cmd->ResponseLen != 0) {
3129 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3130 			    bsize, cmd->ResponseLen);
3131 		}
3132 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3133 		cmd->DetailStatus = bsize;
3134 		return;
3135 	}
3136 
3137 	boff = 0;
3138 	fptr = ha->fcache;
3139 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3140 		/* Get the next image */
3141 		if (ddi_copyout(fptr->buf,
3142 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3143 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3144 		    mode) != 0) {
3145 			CACHE_UNLOCK(ha);
3146 			EL(ha, "failed, ddicopy at %xh, exiting\n", boff);
3147 			cmd->Status = EXT_STATUS_COPY_ERR;
3148 			cmd->DetailStatus = 0;
3149 			return;
3150 		}
3151 		boff += FBUFSIZE;
3152 		fptr = fptr->next;
3153 	}
3154 
3155 	CACHE_UNLOCK(ha);
3156 	cmd->Status = EXT_STATUS_OK;
3157 	cmd->DetailStatus = bsize;
3158 
3159 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3160 }
3161 
3162 
3163 /*
3164  * ql_read_flash
3165  *	Get flash contents.
3166  *
3167  * Input:
3168  *	ha:	adapter state pointer.
3169  *	cmd:	EXT_IOCTL cmd struct pointer.
3170  *	mode:	flags.
3171  *
3172  * Returns:
3173  *	None, request status indicated in cmd->Status.
3174  *
3175  * Context:
3176  *	Kernel context.
3177  */
3178 static void
3179 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3180 {
3181 	ql_xioctl_t	*xp = ha->xioctl;
3182 
3183 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3184 
3185 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3186 		EL(ha, "ql_stall_driver failed\n");
3187 		cmd->Status = EXT_STATUS_BUSY;
3188 		cmd->DetailStatus = xp->fdesc.flash_size;
3189 		cmd->ResponseLen = 0;
3190 		return;
3191 	}
3192 
3193 	if (ql_setup_flash(ha) != QL_SUCCESS) {
3194 		cmd->Status = EXT_STATUS_ERR;
3195 		cmd->DetailStatus = xp->fdesc.flash_size;
3196 		EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3197 		    cmd->ResponseLen, xp->fdesc.flash_size);
3198 		cmd->ResponseLen = 0;
3199 	} else {
3200 		/* adjust read size to flash size */
3201 		if (cmd->ResponseLen > xp->fdesc.flash_size) {
3202 			EL(ha, "adjusting req=%xh, max=%xh\n",
3203 			    cmd->ResponseLen, xp->fdesc.flash_size);
3204 			cmd->ResponseLen = xp->fdesc.flash_size;
3205 		}
3206 
3207 		/* Get flash data. */
3208 		if (ql_flash_fcode_dump(ha,
3209 		    (void *)(uintptr_t)(cmd->ResponseAdr),
3210 		    (size_t)(cmd->ResponseLen), mode) != 0) {
3211 			cmd->Status = EXT_STATUS_COPY_ERR;
3212 			cmd->ResponseLen = 0;
3213 			EL(ha, "failed,\n");
3214 		}
3215 	}
3216 
3217 	/* Resume I/O */
3218 	if (CFG_IST(ha, CFG_CTRL_2425)) {
3219 		ql_restart_driver(ha);
3220 	} else {
3221 		EL(ha, "isp_abort_needed for restart\n");
3222 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3223 		    DRIVER_STALL);
3224 	}
3225 
3226 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3227 }
3228 
3229 /*
3230  * ql_write_flash
3231  *	Loads flash contents.
3232  *
3233  * Input:
3234  *	ha:	adapter state pointer.
3235  *	cmd:	EXT_IOCTL cmd struct pointer.
3236  *	mode:	flags.
3237  *
3238  * Returns:
3239  *	None, request status indicated in cmd->Status.
3240  *
3241  * Context:
3242  *	Kernel context.
3243  */
3244 static void
3245 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3246 {
3247 	ql_xioctl_t	*xp = ha->xioctl;
3248 
3249 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3250 
3251 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3252 		EL(ha, "ql_stall_driver failed\n");
3253 		cmd->Status = EXT_STATUS_BUSY;
3254 		cmd->DetailStatus = xp->fdesc.flash_size;
3255 		cmd->ResponseLen = 0;
3256 		return;
3257 	}
3258 
3259 	if (ql_setup_flash(ha) != QL_SUCCESS) {
3260 		cmd->Status = EXT_STATUS_ERR;
3261 		cmd->DetailStatus = xp->fdesc.flash_size;
3262 		EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3263 		    cmd->RequestLen, xp->fdesc.flash_size);
3264 		cmd->ResponseLen = 0;
3265 	} else {
3266 		/* Load flash data. */
3267 		if (cmd->RequestLen > xp->fdesc.flash_size) {
3268 			cmd->Status = EXT_STATUS_ERR;
3269 			cmd->DetailStatus =  xp->fdesc.flash_size;
3270 			EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3271 			    cmd->RequestLen, xp->fdesc.flash_size);
3272 		} else if (ql_flash_fcode_load(ha,
3273 		    (void *)(uintptr_t)(cmd->RequestAdr),
3274 		    (size_t)(cmd->RequestLen), mode) != 0) {
3275 			cmd->Status = EXT_STATUS_COPY_ERR;
3276 			EL(ha, "failed,\n");
3277 		}
3278 	}
3279 
3280 	/* Resume I/O */
3281 	if (CFG_IST(ha, CFG_CTRL_2425)) {
3282 		ql_restart_driver(ha);
3283 	} else {
3284 		EL(ha, "isp_abort_needed for restart\n");
3285 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3286 		    DRIVER_STALL);
3287 	}
3288 
3289 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3290 }
3291 
3292 /*
3293  * ql_diagnostic_loopback
3294  *	Performs EXT_CC_LOOPBACK Command
3295  *
3296  * Input:
3297  *	ha:	adapter state pointer.
3298  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3299  *	mode:	flags.
3300  *
3301  * Returns:
3302  *	None, request status indicated in cmd->Status.
3303  *
3304  * Context:
3305  *	Kernel context.
3306  */
3307 static void
3308 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3309 {
3310 	EXT_LOOPBACK_REQ	plbreq;
3311 	EXT_LOOPBACK_RSP	plbrsp;
3312 	ql_mbx_data_t		mr;
3313 	uint32_t		rval;
3314 	caddr_t			bp;
3315 
3316 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3317 
3318 	/* Get loop back request. */
3319 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
3320 	    (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3321 		EL(ha, "failed, ddi_copyin\n");
3322 		cmd->Status = EXT_STATUS_COPY_ERR;
3323 		cmd->ResponseLen = 0;
3324 		return;
3325 	}
3326 
3327 	/* Check transfer length fits in buffer. */
3328 	if (plbreq.BufferLength < plbreq.TransferCount &&
3329 	    plbreq.TransferCount < MAILBOX_BUFFER_SIZE) {
3330 		EL(ha, "failed, BufferLength=%d, xfercnt=%d, "
3331 		    "mailbox_buffer_size=%d\n", plbreq.BufferLength,
3332 		    plbreq.TransferCount, MAILBOX_BUFFER_SIZE);
3333 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3334 		cmd->ResponseLen = 0;
3335 		return;
3336 	}
3337 
3338 	/* Allocate command memory. */
3339 	bp = kmem_zalloc(plbreq.TransferCount, KM_SLEEP);
3340 	if (bp == NULL) {
3341 		EL(ha, "failed, kmem_zalloc\n");
3342 		cmd->Status = EXT_STATUS_NO_MEMORY;
3343 		cmd->ResponseLen = 0;
3344 		return;
3345 	}
3346 
3347 	/* Get loopback data. */
3348 	if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3349 	    bp, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3350 		EL(ha, "failed, ddi_copyin-2\n");
3351 		kmem_free(bp, plbreq.TransferCount);
3352 		cmd->Status = EXT_STATUS_COPY_ERR;
3353 		cmd->ResponseLen = 0;
3354 		return;
3355 	}
3356 
3357 	if (DRIVER_SUSPENDED(ha) || ql_stall_driver(ha, 0) != QL_SUCCESS) {
3358 		EL(ha, "failed, LOOP_NOT_READY\n");
3359 		kmem_free(bp, plbreq.TransferCount);
3360 		cmd->Status = EXT_STATUS_BUSY;
3361 		cmd->ResponseLen = 0;
3362 		return;
3363 	}
3364 
3365 	/* Shutdown IP. */
3366 	if (ha->flags & IP_INITIALIZED) {
3367 		(void) ql_shutdown_ip(ha);
3368 	}
3369 
3370 	/* determine topology so we can send the loopback or the echo */
3371 	/* Echo is supported on 2300's only and above */
3372 
3373 	if ((ha->topology & QL_F_PORT) && ha->device_id >= 0x2300) {
3374 		QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using echo\n",
3375 		    ha->instance);
3376 		plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3377 		rval = ql_diag_echo(ha, bp, plbreq.TransferCount, 0, &mr);
3378 	} else {
3379 		plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3380 		rval = ql_diag_loopback(ha, bp, plbreq.TransferCount,
3381 		    plbreq.Options, plbreq.IterationCount, &mr);
3382 	}
3383 
3384 	ql_restart_driver(ha);
3385 
3386 	/* Restart IP if it was shutdown. */
3387 	if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3388 		(void) ql_initialize_ip(ha);
3389 		ql_isp_rcvbuf(ha);
3390 	}
3391 
3392 	if (rval != QL_SUCCESS) {
3393 		EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3394 		kmem_free(bp, plbreq.TransferCount);
3395 		cmd->Status = EXT_STATUS_MAILBOX;
3396 		cmd->DetailStatus = rval;
3397 		cmd->ResponseLen = 0;
3398 		return;
3399 	}
3400 
3401 	/* Return loopback data. */
3402 	if (ql_send_buffer_data(bp, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3403 	    plbreq.TransferCount, mode) != plbreq.TransferCount) {
3404 		EL(ha, "failed, ddi_copyout\n");
3405 		kmem_free(bp, plbreq.TransferCount);
3406 		cmd->Status = EXT_STATUS_COPY_ERR;
3407 		cmd->ResponseLen = 0;
3408 		return;
3409 	}
3410 	kmem_free(bp, plbreq.TransferCount);
3411 
3412 	/* Return loopback results. */
3413 	plbrsp.BufferAddress = plbreq.BufferAddress;
3414 	plbrsp.BufferLength = plbreq.TransferCount;
3415 	plbrsp.CompletionStatus = mr.mb[0];
3416 
3417 	if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3418 		plbrsp.CrcErrorCount = 0;
3419 		plbrsp.DisparityErrorCount = 0;
3420 		plbrsp.FrameLengthErrorCount = 0;
3421 		plbrsp.IterationCountLastError = 0;
3422 	} else {
3423 		plbrsp.CrcErrorCount = mr.mb[1];
3424 		plbrsp.DisparityErrorCount = mr.mb[2];
3425 		plbrsp.FrameLengthErrorCount = mr.mb[3];
3426 		plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18];
3427 	}
3428 
3429 	rval = ddi_copyout((void *)&plbrsp,
3430 	    (void *)(uintptr_t)cmd->ResponseAdr,
3431 	    sizeof (EXT_LOOPBACK_RSP), mode);
3432 	if (rval != 0) {
3433 		EL(ha, "failed, ddi_copyout-2\n");
3434 		cmd->Status = EXT_STATUS_COPY_ERR;
3435 		cmd->ResponseLen = 0;
3436 		return;
3437 	}
3438 	cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3439 
3440 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3441 }
3442 
3443 /*
3444  * ql_send_els_rnid
3445  *	IOCTL for extended link service RNID command.
3446  *
3447  * Input:
3448  *	ha:	adapter state pointer.
3449  *	cmd:	User space CT arguments pointer.
3450  *	mode:	flags.
3451  *
3452  * Returns:
3453  *	None, request status indicated in cmd->Status.
3454  *
3455  * Context:
3456  *	Kernel context.
3457  */
3458 static void
3459 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3460 {
3461 	EXT_RNID_REQ	tmp_rnid;
3462 	port_id_t	tmp_fcid;
3463 	caddr_t		tmp_buf, bptr;
3464 	uint32_t	copy_len;
3465 	ql_tgt_t	*tq;
3466 	EXT_RNID_DATA	rnid_data;
3467 	uint32_t	loop_ready_wait = 10 * 60 * 10;
3468 	int		rval = 0;
3469 	uint32_t	local_hba = 0;
3470 
3471 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3472 
3473 	if (DRIVER_SUSPENDED(ha)) {
3474 		EL(ha, "failed, LOOP_NOT_READY\n");
3475 		cmd->Status = EXT_STATUS_BUSY;
3476 		cmd->ResponseLen = 0;
3477 		return;
3478 	}
3479 
3480 	if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3481 		/* parameter error */
3482 		EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3483 		    cmd->RequestLen);
3484 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3485 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3486 		cmd->ResponseLen = 0;
3487 		return;
3488 	}
3489 
3490 	if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
3491 	    &tmp_rnid, cmd->RequestLen, mode) != 0) {
3492 		EL(ha, "failed, ddi_copyin\n");
3493 		cmd->Status = EXT_STATUS_COPY_ERR;
3494 		cmd->ResponseLen = 0;
3495 		return;
3496 	}
3497 
3498 	/* Find loop ID of the device */
3499 	if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3500 		bptr = CFG_IST(ha, CFG_CTRL_2425) ?
3501 		    (caddr_t)&ha->init_ctrl_blk.cb24.node_name :
3502 		    (caddr_t)&ha->init_ctrl_blk.cb.node_name;
3503 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3504 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3505 			local_hba = 1;
3506 		} else {
3507 			tq = ql_find_port(ha,
3508 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3509 		}
3510 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3511 		bptr = CFG_IST(ha, CFG_CTRL_2425) ?
3512 		    (caddr_t)&ha->init_ctrl_blk.cb24.port_name :
3513 		    (caddr_t)&ha->init_ctrl_blk.cb.port_name;
3514 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3515 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3516 			local_hba = 1;
3517 		} else {
3518 			tq = ql_find_port(ha,
3519 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3520 		}
3521 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3522 		/*
3523 		 * Copy caller's d_id to tmp space.
3524 		 */
3525 		bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3526 		    EXT_DEF_PORTID_SIZE_ACTUAL);
3527 		BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3528 
3529 		if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
3530 		    EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3531 			local_hba = 1;
3532 		} else {
3533 			tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3534 			    QLNT_PID);
3535 		}
3536 	}
3537 
3538 	/* Allocate memory for command. */
3539 	tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP);
3540 	if (tmp_buf == NULL) {
3541 		EL(ha, "failed, kmem_zalloc\n");
3542 		cmd->Status = EXT_STATUS_NO_MEMORY;
3543 		cmd->ResponseLen = 0;
3544 		return;
3545 	}
3546 
3547 	if (local_hba) {
3548 		rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3549 		if (rval != QL_SUCCESS) {
3550 			EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3551 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3552 			cmd->Status = EXT_STATUS_ERR;
3553 			cmd->ResponseLen = 0;
3554 			return;
3555 		}
3556 
3557 		/* Save gotten RNID data. */
3558 		bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3559 
3560 		/* Now build the Send RNID response */
3561 		tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3562 		tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3563 		tmp_buf[2] = 0;
3564 		tmp_buf[3] = sizeof (EXT_RNID_DATA);
3565 
3566 		if (CFG_IST(ha, CFG_CTRL_2425)) {
3567 			bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4],
3568 			    EXT_DEF_WWN_NAME_SIZE);
3569 			bcopy(ha->init_ctrl_blk.cb24.node_name,
3570 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3571 			    EXT_DEF_WWN_NAME_SIZE);
3572 		} else {
3573 			bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4],
3574 			    EXT_DEF_WWN_NAME_SIZE);
3575 			bcopy(ha->init_ctrl_blk.cb.node_name,
3576 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3577 			    EXT_DEF_WWN_NAME_SIZE);
3578 		}
3579 
3580 		bcopy((uint8_t *)&rnid_data,
3581 		    &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3582 		    sizeof (EXT_RNID_DATA));
3583 	} else {
3584 		if (tq == NULL) {
3585 			/* no matching device */
3586 			EL(ha, "failed, device not found\n");
3587 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3588 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3589 			cmd->DetailStatus = EXT_DSTATUS_TARGET;
3590 			cmd->ResponseLen = 0;
3591 			return;
3592 		}
3593 
3594 		/* Send command */
3595 		rval = ql_send_rnid_els(ha, tq->loop_id,
3596 		    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3597 		if (rval != QL_SUCCESS) {
3598 			EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3599 			    rval, tq->loop_id);
3600 			while (LOOP_NOT_READY(ha)) {
3601 				ql_delay(ha, 100000);
3602 				if (loop_ready_wait-- == 0) {
3603 					EL(ha, "failed, loop not ready\n");
3604 					cmd->Status = EXT_STATUS_ERR;
3605 					cmd->ResponseLen = 0;
3606 				}
3607 			}
3608 			rval = ql_send_rnid_els(ha, tq->loop_id,
3609 			    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE,
3610 			    tmp_buf);
3611 			if (rval != QL_SUCCESS) {
3612 				/* error */
3613 				EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3614 				    rval, tq->loop_id);
3615 				kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3616 				cmd->Status = EXT_STATUS_ERR;
3617 				cmd->ResponseLen = 0;
3618 				return;
3619 			}
3620 		}
3621 	}
3622 
3623 	/* Copy the response */
3624 	copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3625 	    SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3626 
3627 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3628 	    copy_len, mode) != copy_len) {
3629 		cmd->Status = EXT_STATUS_COPY_ERR;
3630 		EL(ha, "failed, ddi_copyout\n");
3631 	} else {
3632 		cmd->ResponseLen = copy_len;
3633 		if (copy_len < SEND_RNID_RSP_SIZE) {
3634 			cmd->Status = EXT_STATUS_DATA_OVERRUN;
3635 			EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3636 
3637 		} else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3638 			cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3639 			EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3640 		} else {
3641 			cmd->Status = EXT_STATUS_OK;
3642 			QL_PRINT_9(CE_CONT, "(%d): exiting\n",
3643 			    ha->instance);
3644 		}
3645 	}
3646 
3647 	kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3648 }
3649 
3650 /*
3651  * ql_set_host_data
3652  *	Process IOCTL subcommand to set host/adapter related data.
3653  *
3654  * Input:
3655  *	ha:	adapter state pointer.
3656  *	cmd:	User space CT arguments pointer.
3657  *	mode:	flags.
3658  *
3659  * Returns:
3660  *	None, request status indicated in cmd->Status.
3661  *
3662  * Context:
3663  *	Kernel context.
3664  */
3665 static void
3666 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3667 {
3668 	QL_PRINT_9(CE_CONT, "(%d): entered, SubCode=%d\n", ha->instance,
3669 	    cmd->SubCode);
3670 
3671 	/*
3672 	 * case off on command subcode
3673 	 */
3674 	switch (cmd->SubCode) {
3675 	case EXT_SC_SET_RNID:
3676 		ql_set_rnid_parameters(ha, cmd, mode);
3677 		break;
3678 	case EXT_SC_RST_STATISTICS:
3679 		(void) ql_reset_statistics(ha, cmd);
3680 		break;
3681 	case EXT_SC_SET_BEACON_STATE:
3682 		ql_set_led_state(ha, cmd, mode);
3683 		break;
3684 	case EXT_SC_SET_PARMS:
3685 	case EXT_SC_SET_BUS_MODE:
3686 	case EXT_SC_SET_DR_DUMP_BUF:
3687 	case EXT_SC_SET_RISC_CODE:
3688 	case EXT_SC_SET_FLASH_RAM:
3689 	case EXT_SC_SET_LUN_BITMASK:
3690 	case EXT_SC_SET_RETRY_CNT:
3691 	case EXT_SC_SET_RTIN:
3692 	case EXT_SC_SET_FC_LUN_BITMASK:
3693 	case EXT_SC_ADD_TARGET_DEVICE:
3694 	case EXT_SC_SWAP_TARGET_DEVICE:
3695 	case EXT_SC_SET_SEL_TIMEOUT:
3696 	default:
3697 		/* function not supported. */
3698 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3699 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3700 		break;
3701 	}
3702 
3703 	if (cmd->Status != EXT_STATUS_OK) {
3704 		EL(ha, "failed, Status=%d\n", cmd->Status);
3705 	} else {
3706 		/*EMPTY*/
3707 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3708 	}
3709 }
3710 
3711 /*
3712  * ql_get_host_data
3713  *	Performs EXT_CC_GET_DATA subcommands.
3714  *
3715  * Input:
3716  *	ha:	adapter state pointer.
3717  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3718  *	mode:	flags.
3719  *
3720  * Returns:
3721  *	None, request status indicated in cmd->Status.
3722  *
3723  * Context:
3724  *	Kernel context.
3725  */
3726 static void
3727 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3728 {
3729 	int	out_size = 0;
3730 
3731 	QL_PRINT_9(CE_CONT, "(%d): entered, SubCode=%d\n", ha->instance,
3732 	    cmd->SubCode);
3733 
3734 	/* case off on command subcode */
3735 	switch (cmd->SubCode) {
3736 	case EXT_SC_GET_STATISTICS:
3737 		out_size = sizeof (EXT_HBA_PORT_STAT);
3738 		break;
3739 	case EXT_SC_GET_FC_STATISTICS:
3740 		out_size = sizeof (EXT_HBA_PORT_STAT);
3741 		break;
3742 	case EXT_SC_GET_PORT_SUMMARY:
3743 		out_size = sizeof (EXT_DEVICEDATA);
3744 		break;
3745 	case EXT_SC_GET_RNID:
3746 		out_size = sizeof (EXT_RNID_DATA);
3747 		break;
3748 	case EXT_SC_GET_TARGET_ID:
3749 		out_size = sizeof (EXT_DEST_ADDR);
3750 		break;
3751 	case EXT_SC_GET_BEACON_STATE:
3752 		out_size = sizeof (EXT_BEACON_CONTROL);
3753 		break;
3754 	case EXT_SC_GET_FC4_STATISTICS:
3755 		out_size = sizeof (EXT_HBA_FC4STATISTICS);
3756 		break;
3757 	case EXT_SC_GET_SCSI_ADDR:
3758 	case EXT_SC_GET_ERR_DETECTIONS:
3759 	case EXT_SC_GET_BUS_MODE:
3760 	case EXT_SC_GET_DR_DUMP_BUF:
3761 	case EXT_SC_GET_RISC_CODE:
3762 	case EXT_SC_GET_FLASH_RAM:
3763 	case EXT_SC_GET_LINK_STATUS:
3764 	case EXT_SC_GET_LOOP_ID:
3765 	case EXT_SC_GET_LUN_BITMASK:
3766 	case EXT_SC_GET_PORT_DATABASE:
3767 	case EXT_SC_GET_PORT_DATABASE_MEM:
3768 	case EXT_SC_GET_POSITION_MAP:
3769 	case EXT_SC_GET_RETRY_CNT:
3770 	case EXT_SC_GET_RTIN:
3771 	case EXT_SC_GET_FC_LUN_BITMASK:
3772 	case EXT_SC_GET_SEL_TIMEOUT:
3773 	default:
3774 		/* function not supported. */
3775 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3776 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3777 		cmd->ResponseLen = 0;
3778 		return;
3779 	}
3780 
3781 	if (cmd->ResponseLen < out_size) {
3782 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3783 		cmd->DetailStatus = out_size;
3784 		EL(ha, "failed, ResponseLen=%xh, size=%xh\n",
3785 		    cmd->ResponseLen, out_size);
3786 		cmd->ResponseLen = 0;
3787 		return;
3788 	}
3789 
3790 	switch (cmd->SubCode) {
3791 	case EXT_SC_GET_RNID:
3792 		ql_get_rnid_parameters(ha, cmd, mode);
3793 		break;
3794 	case EXT_SC_GET_STATISTICS:
3795 		ql_get_statistics(ha, cmd, mode);
3796 		break;
3797 	case EXT_SC_GET_FC_STATISTICS:
3798 		ql_get_statistics_fc(ha, cmd, mode);
3799 		break;
3800 	case EXT_SC_GET_FC4_STATISTICS:
3801 		ql_get_statistics_fc4(ha, cmd, mode);
3802 		break;
3803 	case EXT_SC_GET_PORT_SUMMARY:
3804 		ql_get_port_summary(ha, cmd, mode);
3805 		break;
3806 	case EXT_SC_GET_TARGET_ID:
3807 		ql_get_target_id(ha, cmd, mode);
3808 		break;
3809 	case EXT_SC_GET_BEACON_STATE:
3810 		ql_get_led_state(ha, cmd, mode);
3811 		break;
3812 	}
3813 
3814 	if (cmd->Status != EXT_STATUS_OK) {
3815 		EL(ha, "failed, Status=%d\n", cmd->Status);
3816 	} else {
3817 		/*EMPTY*/
3818 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3819 	}
3820 }
3821 
3822 /* ******************************************************************** */
3823 /*			Helper Functions				*/
3824 /* ******************************************************************** */
3825 
3826 /*
3827  * ql_lun_count
3828  *	Get numbers of LUNS on target.
3829  *
3830  * Input:
3831  *	ha:	adapter state pointer.
3832  *	q:	device queue pointer.
3833  *
3834  * Returns:
3835  *	Number of LUNs.
3836  *
3837  * Context:
3838  *	Kernel context.
3839  */
3840 static int
3841 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
3842 {
3843 	int	cnt;
3844 
3845 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3846 
3847 	/* Bypass LUNs that failed. */
3848 	cnt = ql_report_lun(ha, tq);
3849 	if (cnt == 0) {
3850 		cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
3851 	}
3852 
3853 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3854 
3855 	return (cnt);
3856 }
3857 
3858 /*
3859  * ql_report_lun
3860  *	Get numbers of LUNS using report LUN command.
3861  *
3862  * Input:
3863  *	ha:	adapter state pointer.
3864  *	q:	target queue pointer.
3865  *
3866  * Returns:
3867  *	Number of LUNs.
3868  *
3869  * Context:
3870  *	Kernel context.
3871  */
3872 static int
3873 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
3874 {
3875 	int			rval;
3876 	uint8_t			retries;
3877 	ql_mbx_iocb_t		*pkt;
3878 	ql_rpt_lun_lst_t	*rpt;
3879 	dma_mem_t		dma_mem;
3880 	uint32_t		pkt_size, cnt;
3881 	uint16_t		comp_status;
3882 	uint8_t			scsi_status_h, scsi_status_l, *reqs;
3883 
3884 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3885 
3886 	if (DRIVER_SUSPENDED(ha)) {
3887 		EL(ha, "failed, LOOP_NOT_READY\n");
3888 		return (0);
3889 	}
3890 
3891 	pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
3892 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
3893 	if (pkt == NULL) {
3894 		EL(ha, "failed, kmem_zalloc\n");
3895 		return (0);
3896 	}
3897 	rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
3898 
3899 	/* Get DMA memory for the IOCB */
3900 	if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
3901 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
3902 		cmn_err(CE_WARN, "%s(%d): DMA memory "
3903 		    "alloc failed", QL_NAME, ha->instance);
3904 		kmem_free(pkt, pkt_size);
3905 		return (0);
3906 	}
3907 
3908 	for (retries = 0; retries < 4; retries++) {
3909 		if (CFG_IST(ha, CFG_CTRL_2425)) {
3910 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
3911 			pkt->cmd24.entry_count = 1;
3912 
3913 			/* Set N_port handle */
3914 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
3915 
3916 			/* Set target ID */
3917 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
3918 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
3919 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
3920 
3921 			/* Set ISP command timeout. */
3922 			pkt->cmd24.timeout = LE_16(15);
3923 
3924 			/* Load SCSI CDB */
3925 			pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
3926 			pkt->cmd24.scsi_cdb[6] =
3927 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3928 			pkt->cmd24.scsi_cdb[7] =
3929 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3930 			pkt->cmd24.scsi_cdb[8] =
3931 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3932 			pkt->cmd24.scsi_cdb[9] =
3933 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3934 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
3935 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
3936 				    + cnt, 4);
3937 			}
3938 
3939 			/* Set tag queue control flags */
3940 			pkt->cmd24.task = TA_STAG;
3941 
3942 			/* Set transfer direction. */
3943 			pkt->cmd24.control_flags = CF_RD;
3944 
3945 			/* Set data segment count. */
3946 			pkt->cmd24.dseg_count = LE_16(1);
3947 
3948 			/* Load total byte count. */
3949 			/* Load data descriptor. */
3950 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
3951 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
3952 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
3953 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
3954 			pkt->cmd24.total_byte_count =
3955 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3956 			pkt->cmd24.dseg_0_length =
3957 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3958 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
3959 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
3960 			pkt->cmd3.entry_count = 1;
3961 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3962 				pkt->cmd3.target_l = LSB(tq->loop_id);
3963 				pkt->cmd3.target_h = MSB(tq->loop_id);
3964 			} else {
3965 				pkt->cmd3.target_h = LSB(tq->loop_id);
3966 			}
3967 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
3968 			pkt->cmd3.timeout = LE_16(15);
3969 			pkt->cmd3.dseg_count = LE_16(1);
3970 			pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
3971 			pkt->cmd3.scsi_cdb[6] =
3972 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3973 			pkt->cmd3.scsi_cdb[7] =
3974 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3975 			pkt->cmd3.scsi_cdb[8] =
3976 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3977 			pkt->cmd3.scsi_cdb[9] =
3978 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3979 			pkt->cmd3.byte_count =
3980 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3981 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
3982 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
3983 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
3984 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
3985 			pkt->cmd3.dseg_0_length =
3986 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3987 		} else {
3988 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
3989 			pkt->cmd.entry_count = 1;
3990 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3991 				pkt->cmd.target_l = LSB(tq->loop_id);
3992 				pkt->cmd.target_h = MSB(tq->loop_id);
3993 			} else {
3994 				pkt->cmd.target_h = LSB(tq->loop_id);
3995 			}
3996 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
3997 			pkt->cmd.timeout = LE_16(15);
3998 			pkt->cmd.dseg_count = LE_16(1);
3999 			pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
4000 			pkt->cmd.scsi_cdb[6] =
4001 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4002 			pkt->cmd.scsi_cdb[7] =
4003 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4004 			pkt->cmd.scsi_cdb[8] =
4005 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4006 			pkt->cmd.scsi_cdb[9] =
4007 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4008 			pkt->cmd.byte_count =
4009 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4010 			pkt->cmd.dseg_0_address = (uint32_t)
4011 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4012 			pkt->cmd.dseg_0_length =
4013 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4014 		}
4015 
4016 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4017 		    sizeof (ql_mbx_iocb_t));
4018 
4019 		/* Sync in coming DMA buffer. */
4020 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4021 		    DDI_DMA_SYNC_FORKERNEL);
4022 		/* Copy in coming DMA data. */
4023 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4024 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4025 
4026 		if (CFG_IST(ha, CFG_CTRL_2425)) {
4027 			pkt->sts24.entry_status = (uint8_t)
4028 			    (pkt->sts24.entry_status & 0x3c);
4029 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4030 			scsi_status_h = pkt->sts24.scsi_status_h;
4031 			scsi_status_l = pkt->sts24.scsi_status_l;
4032 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4033 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4034 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4035 		} else {
4036 			pkt->sts.entry_status = (uint8_t)
4037 			    (pkt->sts.entry_status & 0x7e);
4038 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4039 			scsi_status_h = pkt->sts.scsi_status_h;
4040 			scsi_status_l = pkt->sts.scsi_status_l;
4041 			reqs = &pkt->sts.req_sense_data[0];
4042 		}
4043 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4044 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4045 			    pkt->sts.entry_status, tq->d_id.b24);
4046 			rval = QL_FUNCTION_PARAMETER_ERROR;
4047 		}
4048 
4049 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4050 		    scsi_status_l & STATUS_CHECK) {
4051 			/* Device underrun, treat as OK. */
4052 			if (rval == QL_SUCCESS &&
4053 			    comp_status == CS_DATA_UNDERRUN &&
4054 			    scsi_status_h & FCP_RESID_UNDER) {
4055 				break;
4056 			}
4057 
4058 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4059 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4060 			    comp_status, scsi_status_h, scsi_status_l);
4061 
4062 			if (rval == QL_SUCCESS) {
4063 				if ((comp_status == CS_TIMEOUT) ||
4064 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4065 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4066 					rval = QL_FUNCTION_TIMEOUT;
4067 					break;
4068 				}
4069 				rval = QL_FUNCTION_FAILED;
4070 			} else if (rval == QL_ABORTED) {
4071 				break;
4072 			}
4073 
4074 			if (scsi_status_l & STATUS_CHECK) {
4075 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4076 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4077 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4078 				    reqs[1], reqs[2], reqs[3], reqs[4],
4079 				    reqs[5], reqs[6], reqs[7], reqs[8],
4080 				    reqs[9], reqs[10], reqs[11], reqs[12],
4081 				    reqs[13], reqs[14], reqs[15], reqs[16],
4082 				    reqs[17]);
4083 			}
4084 		} else {
4085 			break;
4086 		}
4087 		bzero((caddr_t)pkt, pkt_size);
4088 	}
4089 
4090 	if (rval != QL_SUCCESS) {
4091 		EL(ha, "failed=%xh\n", rval);
4092 		rval = 0;
4093 	} else {
4094 		QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance);
4095 		QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4096 		rval = (int)(BE_32(rpt->hdr.len) / 8);
4097 	}
4098 
4099 	kmem_free(pkt, pkt_size);
4100 	ql_free_dma_resource(ha, &dma_mem);
4101 
4102 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
4103 
4104 	return (rval);
4105 }
4106 
4107 /*
4108  * ql_inq_scan
4109  *	Get numbers of LUNS using inquiry command.
4110  *
4111  * Input:
4112  *	ha:		adapter state pointer.
4113  *	tq:		target queue pointer.
4114  *	count:		scan for the number of existing LUNs.
4115  *
4116  * Returns:
4117  *	Number of LUNs.
4118  *
4119  * Context:
4120  *	Kernel context.
4121  */
4122 static int
4123 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
4124 {
4125 	int		lun, cnt, rval;
4126 	ql_mbx_iocb_t	*pkt;
4127 	uint8_t		*inq;
4128 	uint32_t	pkt_size;
4129 
4130 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
4131 
4132 	pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4133 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4134 	if (pkt == NULL) {
4135 		EL(ha, "failed, kmem_zalloc\n");
4136 		return (0);
4137 	}
4138 	inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4139 
4140 	cnt = 0;
4141 	for (lun = 0; lun < MAX_LUNS; lun++) {
4142 
4143 		if (DRIVER_SUSPENDED(ha)) {
4144 			rval = QL_LOOP_DOWN;
4145 			cnt = 0;
4146 			break;
4147 		}
4148 
4149 		rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4150 		if (rval == QL_SUCCESS) {
4151 			switch (*inq) {
4152 			case DTYPE_DIRECT:
4153 			case DTYPE_PROCESSOR:	/* Appliance. */
4154 			case DTYPE_WORM:
4155 			case DTYPE_RODIRECT:
4156 			case DTYPE_SCANNER:
4157 			case DTYPE_OPTICAL:
4158 			case DTYPE_CHANGER:
4159 			case DTYPE_ESI:
4160 				cnt++;
4161 				break;
4162 			case DTYPE_SEQUENTIAL:
4163 				cnt++;
4164 				tq->flags |= TQF_TAPE_DEVICE;
4165 				break;
4166 			default:
4167 				QL_PRINT_9(CE_CONT, "(%d): failed, "
4168 				    "unsupported device id=%xh, lun=%d, "
4169 				    "type=%xh\n", ha->instance, tq->loop_id,
4170 				    lun, *inq);
4171 				break;
4172 			}
4173 
4174 			if (*inq == DTYPE_ESI || cnt >= count) {
4175 				break;
4176 			}
4177 		} else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4178 			cnt = 0;
4179 			break;
4180 		}
4181 	}
4182 
4183 	kmem_free(pkt, pkt_size);
4184 
4185 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
4186 
4187 	return (cnt);
4188 }
4189 
4190 /*
4191  * ql_inq
4192  *	Issue inquiry command.
4193  *
4194  * Input:
4195  *	ha:		adapter state pointer.
4196  *	tq:		target queue pointer.
4197  *	lun:		LUN number.
4198  *	pkt:		command and buffer pointer.
4199  *	inq_len:	amount of inquiry data.
4200  *
4201  * Returns:
4202  *	ql local function return status code.
4203  *
4204  * Context:
4205  *	Kernel context.
4206  */
4207 static int
4208 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4209     uint8_t inq_len)
4210 {
4211 	dma_mem_t	dma_mem;
4212 	int		rval, retries;
4213 	uint32_t	pkt_size, cnt;
4214 	uint16_t	comp_status;
4215 	uint8_t		scsi_status_h, scsi_status_l, *reqs;
4216 	caddr_t		inq_data;
4217 
4218 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
4219 
4220 	if (DRIVER_SUSPENDED(ha)) {
4221 		EL(ha, "failed, loop down\n");
4222 		return (QL_FUNCTION_TIMEOUT);
4223 	}
4224 
4225 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4226 	bzero((caddr_t)pkt, pkt_size);
4227 
4228 	inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4229 
4230 	/* Get DMA memory for the IOCB */
4231 	if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4232 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4233 		cmn_err(CE_WARN, "%s(%d): DMA memory "
4234 		    "alloc failed", QL_NAME, ha->instance);
4235 		return (0);
4236 	}
4237 
4238 	for (retries = 0; retries < 4; retries++) {
4239 		if (CFG_IST(ha, CFG_CTRL_2425)) {
4240 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4241 			pkt->cmd24.entry_count = 1;
4242 
4243 			/* Set LUN number */
4244 			pkt->cmd24.fcp_lun[2] = LSB(lun);
4245 			pkt->cmd24.fcp_lun[3] = MSB(lun);
4246 
4247 			/* Set N_port handle */
4248 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4249 
4250 			/* Set target ID */
4251 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4252 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4253 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4254 
4255 			/* Set ISP command timeout. */
4256 			pkt->cmd24.timeout = LE_16(15);
4257 
4258 			/* Load SCSI CDB */
4259 			pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4260 			pkt->cmd24.scsi_cdb[4] = inq_len;
4261 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4262 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4263 				    + cnt, 4);
4264 			}
4265 
4266 			/* Set tag queue control flags */
4267 			pkt->cmd24.task = TA_STAG;
4268 
4269 			/* Set transfer direction. */
4270 			pkt->cmd24.control_flags = CF_RD;
4271 
4272 			/* Set data segment count. */
4273 			pkt->cmd24.dseg_count = LE_16(1);
4274 
4275 			/* Load total byte count. */
4276 			pkt->cmd24.total_byte_count = LE_32(inq_len);
4277 
4278 			/* Load data descriptor. */
4279 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
4280 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4281 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
4282 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4283 			pkt->cmd24.dseg_0_length = LE_32(inq_len);
4284 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4285 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4286 			cnt = CMD_TYPE_3_DATA_SEGMENTS;
4287 
4288 			pkt->cmd3.entry_count = 1;
4289 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4290 				pkt->cmd3.target_l = LSB(tq->loop_id);
4291 				pkt->cmd3.target_h = MSB(tq->loop_id);
4292 			} else {
4293 				pkt->cmd3.target_h = LSB(tq->loop_id);
4294 			}
4295 			pkt->cmd3.lun_l = LSB(lun);
4296 			pkt->cmd3.lun_h = MSB(lun);
4297 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4298 			pkt->cmd3.timeout = LE_16(15);
4299 			pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4300 			pkt->cmd3.scsi_cdb[4] = inq_len;
4301 			pkt->cmd3.dseg_count = LE_16(1);
4302 			pkt->cmd3.byte_count = LE_32(inq_len);
4303 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4304 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4305 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4306 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4307 			pkt->cmd3.dseg_0_length = LE_32(inq_len);
4308 		} else {
4309 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4310 			cnt = CMD_TYPE_2_DATA_SEGMENTS;
4311 
4312 			pkt->cmd.entry_count = 1;
4313 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4314 				pkt->cmd.target_l = LSB(tq->loop_id);
4315 				pkt->cmd.target_h = MSB(tq->loop_id);
4316 			} else {
4317 				pkt->cmd.target_h = LSB(tq->loop_id);
4318 			}
4319 			pkt->cmd.lun_l = LSB(lun);
4320 			pkt->cmd.lun_h = MSB(lun);
4321 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4322 			pkt->cmd.timeout = LE_16(15);
4323 			pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4324 			pkt->cmd.scsi_cdb[4] = inq_len;
4325 			pkt->cmd.dseg_count = LE_16(1);
4326 			pkt->cmd.byte_count = LE_32(inq_len);
4327 			pkt->cmd.dseg_0_address = (uint32_t)
4328 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4329 			pkt->cmd.dseg_0_length = LE_32(inq_len);
4330 		}
4331 
4332 /*		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4333 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4334 		    sizeof (ql_mbx_iocb_t));
4335 
4336 		/* Sync in coming IOCB DMA buffer. */
4337 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4338 		    DDI_DMA_SYNC_FORKERNEL);
4339 		/* Copy in coming DMA data. */
4340 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4341 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4342 
4343 		if (CFG_IST(ha, CFG_CTRL_2425)) {
4344 			pkt->sts24.entry_status = (uint8_t)
4345 			    (pkt->sts24.entry_status & 0x3c);
4346 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4347 			scsi_status_h = pkt->sts24.scsi_status_h;
4348 			scsi_status_l = pkt->sts24.scsi_status_l;
4349 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4350 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4351 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4352 		} else {
4353 			pkt->sts.entry_status = (uint8_t)
4354 			    (pkt->sts.entry_status & 0x7e);
4355 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4356 			scsi_status_h = pkt->sts.scsi_status_h;
4357 			scsi_status_l = pkt->sts.scsi_status_l;
4358 			reqs = &pkt->sts.req_sense_data[0];
4359 		}
4360 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4361 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4362 			    pkt->sts.entry_status, tq->d_id.b24);
4363 			rval = QL_FUNCTION_PARAMETER_ERROR;
4364 		}
4365 
4366 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4367 		    scsi_status_l & STATUS_CHECK) {
4368 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4369 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4370 			    comp_status, scsi_status_h, scsi_status_l);
4371 
4372 			if (rval == QL_SUCCESS) {
4373 				if ((comp_status == CS_TIMEOUT) ||
4374 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4375 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4376 					rval = QL_FUNCTION_TIMEOUT;
4377 					break;
4378 				}
4379 				rval = QL_FUNCTION_FAILED;
4380 			}
4381 
4382 			if (scsi_status_l & STATUS_CHECK) {
4383 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4384 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4385 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4386 				    reqs[1], reqs[2], reqs[3], reqs[4],
4387 				    reqs[5], reqs[6], reqs[7], reqs[8],
4388 				    reqs[9], reqs[10], reqs[11], reqs[12],
4389 				    reqs[13], reqs[14], reqs[15], reqs[16],
4390 				    reqs[17]);
4391 			}
4392 		} else {
4393 			break;
4394 		}
4395 	}
4396 	ql_free_dma_resource(ha, &dma_mem);
4397 
4398 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
4399 
4400 	return (rval);
4401 }
4402 
4403 /*
4404  * ql_get_buffer_data
4405  *	Copies data from user space to kernal buffer.
4406  *
4407  * Input:
4408  *	src:	User source buffer address.
4409  *	dst:	Kernal destination buffer address.
4410  *	size:	Amount of data.
4411  *	mode:	flags.
4412  *
4413  * Returns:
4414  *	Returns number of bytes transferred.
4415  *
4416  * Context:
4417  *	Kernel context.
4418  */
4419 static uint32_t
4420 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4421 {
4422 	uint32_t	cnt;
4423 
4424 	for (cnt = 0; cnt < size; cnt++) {
4425 		if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4426 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4427 			break;
4428 		}
4429 	}
4430 
4431 	return (cnt);
4432 }
4433 
4434 /*
4435  * ql_send_buffer_data
4436  *	Copies data from kernal buffer to user space.
4437  *
4438  * Input:
4439  *	src:	Kernal source buffer address.
4440  *	dst:	User destination buffer address.
4441  *	size:	Amount of data.
4442  *	mode:	flags.
4443  *
4444  * Returns:
4445  *	Returns number of bytes transferred.
4446  *
4447  * Context:
4448  *	Kernel context.
4449  */
4450 static uint32_t
4451 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4452 {
4453 	uint32_t	cnt;
4454 
4455 	for (cnt = 0; cnt < size; cnt++) {
4456 		if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4457 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4458 			break;
4459 		}
4460 	}
4461 
4462 	return (cnt);
4463 }
4464 
4465 /*
4466  * ql_find_port
4467  *	Locates device queue.
4468  *
4469  * Input:
4470  *	ha:	adapter state pointer.
4471  *	name:	device port name.
4472  *
4473  * Returns:
4474  *	Returns target queue pointer.
4475  *
4476  * Context:
4477  *	Kernel context.
4478  */
4479 static ql_tgt_t *
4480 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type)
4481 {
4482 	ql_link_t	*link;
4483 	ql_tgt_t	*tq;
4484 	uint16_t	index;
4485 
4486 	/* Scan port list for requested target */
4487 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4488 		for (link = ha->dev[index].first; link != NULL;
4489 		    link = link->next) {
4490 			tq = link->base_address;
4491 
4492 			switch (type) {
4493 			case QLNT_LOOP_ID:
4494 				if (bcmp(name, &tq->loop_id,
4495 				    sizeof (uint16_t)) == 0) {
4496 					return (tq);
4497 				}
4498 				break;
4499 			case QLNT_PORT:
4500 				if (bcmp(name, tq->port_name, 8) == 0) {
4501 					return (tq);
4502 				}
4503 				break;
4504 			case QLNT_NODE:
4505 				if (bcmp(name, tq->node_name, 8) == 0) {
4506 					return (tq);
4507 				}
4508 				break;
4509 			case QLNT_PID:
4510 				if (bcmp(name, tq->d_id.r.d_id,
4511 				    sizeof (tq->d_id.r.d_id)) == 0) {
4512 					return (tq);
4513 				}
4514 				break;
4515 			default:
4516 				EL(ha, "failed, invalid type=%d\n",  type);
4517 				return (NULL);
4518 			}
4519 		}
4520 	}
4521 
4522 	return (NULL);
4523 }
4524 
4525 /*
4526  * ql_24xx_flash_desc
4527  *	Get flash descriptor table.
4528  *
4529  * Input:
4530  *	ha:		adapter state pointer.
4531  *
4532  * Returns:
4533  *	ql local function return status code.
4534  *
4535  * Context:
4536  *	Kernel context.
4537  */
4538 static int
4539 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4540 {
4541 	uint8_t		w8;
4542 	uint32_t	cnt;
4543 	uint16_t	chksum, *bp, data;
4544 	int		rval;
4545 	ql_xioctl_t	*xp = ha->xioctl;
4546 
4547 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
4548 
4549 	rval = ql_dump_fcode(ha, (uint8_t *)&xp->fdesc,
4550 	    sizeof (flash_desc_t), FLASH_2500_DESCRIPTOR_TABLE << 2);
4551 	if (rval != QL_SUCCESS) {
4552 		EL(ha, "read status=%xh\n", rval);
4553 		bzero(&xp->fdesc, sizeof (flash_desc_t));
4554 		return (rval);
4555 	}
4556 
4557 	QL_DUMP_9(&xp->fdesc, 8, sizeof (flash_desc_t));
4558 
4559 	chksum = 0;
4560 	data = 0;
4561 	bp = (uint16_t *)&xp->fdesc;
4562 	for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4563 		data = *bp;
4564 		LITTLE_ENDIAN_16(&data);
4565 		chksum += data;
4566 		bp++;
4567 	}
4568 
4569 	LITTLE_ENDIAN_32(&xp->fdesc.flash_valid);
4570 	LITTLE_ENDIAN_16(&xp->fdesc.flash_version);
4571 	LITTLE_ENDIAN_16(&xp->fdesc.flash_len);
4572 	LITTLE_ENDIAN_16(&xp->fdesc.flash_checksum);
4573 	LITTLE_ENDIAN_16(&xp->fdesc.flash_manuf);
4574 	LITTLE_ENDIAN_16(&xp->fdesc.flash_id);
4575 	LITTLE_ENDIAN_32(&xp->fdesc.block_size);
4576 	LITTLE_ENDIAN_32(&xp->fdesc.alt_block_size);
4577 	LITTLE_ENDIAN_32(&xp->fdesc.flash_size);
4578 	LITTLE_ENDIAN_32(&xp->fdesc.write_enable_data);
4579 	LITTLE_ENDIAN_32(&xp->fdesc.read_timeout);
4580 
4581 	/* flash size in desc table is in 1024 bytes */
4582 	xp->fdesc.flash_size = xp->fdesc.flash_size * 0x400;
4583 
4584 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_valid=%xh\n", ha->instance,
4585 	    xp->fdesc.flash_valid);
4586 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_version=%xh\n", ha->instance,
4587 	    xp->fdesc.flash_version);
4588 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_len=%xh\n", ha->instance,
4589 	    xp->fdesc.flash_len);
4590 
4591 	w8 = xp->fdesc.flash_model[17];
4592 	xp->fdesc.flash_model[17] = 0;
4593 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_model=%s\n", ha->instance,
4594 	    xp->fdesc.flash_model);
4595 	xp->fdesc.flash_model[17] = w8;
4596 
4597 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_checksum=%xh\n",
4598 	    ha->instance, xp->fdesc.flash_checksum);
4599 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_unused=%xh\n", ha->instance,
4600 	    xp->fdesc.flash_unused);
4601 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_manuf=%xh\n", ha->instance,
4602 	    xp->fdesc.flash_manuf);
4603 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_id=%xh\n", ha->instance,
4604 	    xp->fdesc.flash_id);
4605 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_flag=%xh\n", ha->instance,
4606 	    xp->fdesc.flash_flag);
4607 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.erase_cmd=%xh\n", ha->instance,
4608 	    xp->fdesc.erase_cmd);
4609 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.alt_erase_cmd=%xh\n", ha->instance,
4610 	    xp->fdesc.alt_erase_cmd);
4611 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.write_enable_cmd=%xh\n",
4612 	    ha->instance, xp->fdesc.write_enable_cmd);
4613 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.write_enable_bits=%xh\n",
4614 	    ha->instance, xp->fdesc.write_enable_bits);
4615 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.write_statusreg_cmd=%xh\n",
4616 	    ha->instance, xp->fdesc.write_statusreg_cmd);
4617 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.unprotect_sector_cmd=%xh\n",
4618 	    ha->instance, xp->fdesc.unprotect_sector_cmd);
4619 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.read_manuf_cmd=%xh\n",
4620 	    ha->instance, xp->fdesc.read_manuf_cmd);
4621 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.block_size=%xh\n", ha->instance,
4622 	    xp->fdesc.block_size);
4623 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.alt_block_size=%xh\n",
4624 	    ha->instance, xp->fdesc.alt_block_size);
4625 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_size=%xh\n", ha->instance,
4626 	    xp->fdesc.flash_size);
4627 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.write_enable_data=%xh\n",
4628 	    ha->instance, xp->fdesc.write_enable_data);
4629 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.readid_address_len=%xh\n",
4630 	    ha->instance, xp->fdesc.readid_address_len);
4631 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.write_disable_bits=%xh\n",
4632 	    ha->instance, xp->fdesc.write_disable_bits);
4633 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.read_device_id_len=%xh\n",
4634 	    ha->instance, xp->fdesc.read_device_id_len);
4635 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.chip_erase_cmd=%xh\n",
4636 	    ha->instance, xp->fdesc.chip_erase_cmd);
4637 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.read_timeout=%xh\n", ha->instance,
4638 	    xp->fdesc.read_timeout);
4639 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.protect_sector_cmd=%xh\n",
4640 	    ha->instance, xp->fdesc.protect_sector_cmd);
4641 
4642 	if (chksum != 0 || xp->fdesc.flash_valid != FLASH_DESC_VAILD ||
4643 	    xp->fdesc.flash_version != FLASH_DESC_VERSION) {
4644 		EL(ha, "invalid descriptor table\n");
4645 		bzero(&xp->fdesc, sizeof (flash_desc_t));
4646 		return (QL_FUNCTION_FAILED);
4647 	}
4648 
4649 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
4650 
4651 	return (QL_SUCCESS);
4652 }
4653 /*
4654  * ql_setup_flash
4655  *	Gets the manufacturer and id number of the flash chip, and
4656  *	sets up the size parameter.
4657  *
4658  * Input:
4659  *	ha:	adapter state pointer.
4660  *
4661  * Returns:
4662  *	int:	ql local function return status code.
4663  *
4664  * Context:
4665  *	Kernel context.
4666  */
4667 int
4668 ql_setup_flash(ql_adapter_state_t *ha)
4669 {
4670 	ql_xioctl_t	*xp = ha->xioctl;
4671 	int		rval = QL_SUCCESS;
4672 
4673 	if (xp->fdesc.flash_size != 0) {
4674 		return (rval);
4675 	}
4676 
4677 	if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) {
4678 		return (QL_FUNCTION_FAILED);
4679 	}
4680 
4681 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
4682 		/*
4683 		 * Temporarily set the ha->xioctl->fdesc.flash_size to
4684 		 * 25xx flash size to avoid failing of ql_dump_focde.
4685 		 */
4686 		ha->xioctl->fdesc.flash_size = 0x200000;
4687 		if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4688 			EL(ha, "flash desc table ok, exit\n");
4689 			return (rval);
4690 		}
4691 		(void) ql_24xx_flash_id(ha);
4692 
4693 	} else if (CFG_IST(ha, CFG_CTRL_2425)) {
4694 		(void) ql_24xx_flash_id(ha);
4695 	} else {
4696 		ql_flash_enable(ha);
4697 
4698 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4699 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4700 		ql_write_flash_byte(ha, 0x5555, 0x90);
4701 		xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4702 
4703 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
4704 			ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4705 			ql_write_flash_byte(ha, 0x5555, 0x55);
4706 			ql_write_flash_byte(ha, 0xaaaa, 0x90);
4707 			xp->fdesc.flash_id = (uint16_t)
4708 			    ql_read_flash_byte(ha, 0x0002);
4709 		} else {
4710 			ql_write_flash_byte(ha, 0x5555, 0xaa);
4711 			ql_write_flash_byte(ha, 0x2aaa, 0x55);
4712 			ql_write_flash_byte(ha, 0x5555, 0x90);
4713 			xp->fdesc.flash_id = (uint16_t)
4714 			    ql_read_flash_byte(ha, 0x0001);
4715 		}
4716 
4717 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4718 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4719 		ql_write_flash_byte(ha, 0x5555, 0xf0);
4720 
4721 		ql_flash_disable(ha);
4722 	}
4723 
4724 	/* Default flash descriptor table. */
4725 	xp->fdesc.write_statusreg_cmd = 1;
4726 	xp->fdesc.write_enable_bits = 0;
4727 	xp->fdesc.unprotect_sector_cmd = 0;
4728 	xp->fdesc.protect_sector_cmd = 0;
4729 	xp->fdesc.write_disable_bits = 0x9c;
4730 	xp->fdesc.block_size = 0x10000;
4731 	xp->fdesc.erase_cmd = 0xd8;
4732 
4733 	switch (xp->fdesc.flash_manuf) {
4734 	case AMD_FLASH:
4735 		switch (xp->fdesc.flash_id) {
4736 		case SPAN_FLASHID_2048K:
4737 			xp->fdesc.flash_size = 0x200000;
4738 			break;
4739 		case AMD_FLASHID_1024K:
4740 			xp->fdesc.flash_size = 0x100000;
4741 			break;
4742 		case AMD_FLASHID_512K:
4743 		case AMD_FLASHID_512Kt:
4744 		case AMD_FLASHID_512Kb:
4745 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
4746 				xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
4747 			} else {
4748 				xp->fdesc.flash_size = 0x80000;
4749 			}
4750 			break;
4751 		case AMD_FLASHID_128K:
4752 			xp->fdesc.flash_size = 0x20000;
4753 			break;
4754 		default:
4755 			rval = QL_FUNCTION_FAILED;
4756 			break;
4757 		}
4758 		break;
4759 	case ST_FLASH:
4760 		switch (xp->fdesc.flash_id) {
4761 		case ST_FLASHID_128K:
4762 			xp->fdesc.flash_size = 0x20000;
4763 			break;
4764 		case ST_FLASHID_512K:
4765 			xp->fdesc.flash_size = 0x80000;
4766 			break;
4767 		case ST_FLASHID_M25PXX:
4768 			if (xp->fdesc.flash_len == 0x14) {
4769 				xp->fdesc.flash_size = 0x100000;
4770 			} else if (xp->fdesc.flash_len == 0x15) {
4771 				xp->fdesc.flash_size = 0x200000;
4772 			} else {
4773 				rval = QL_FUNCTION_FAILED;
4774 			}
4775 			break;
4776 		default:
4777 			rval = QL_FUNCTION_FAILED;
4778 			break;
4779 		}
4780 		break;
4781 	case SST_FLASH:
4782 		switch (xp->fdesc.flash_id) {
4783 		case SST_FLASHID_128K:
4784 			xp->fdesc.flash_size = 0x20000;
4785 			break;
4786 		case SST_FLASHID_1024K_A:
4787 			xp->fdesc.flash_size = 0x100000;
4788 			xp->fdesc.block_size = 0x8000;
4789 			xp->fdesc.erase_cmd = 0x52;
4790 			break;
4791 		case SST_FLASHID_1024K:
4792 		case SST_FLASHID_1024K_B:
4793 			xp->fdesc.flash_size = 0x100000;
4794 			break;
4795 		case SST_FLASHID_2048K:
4796 			xp->fdesc.flash_size = 0x200000;
4797 			break;
4798 		default:
4799 			rval = QL_FUNCTION_FAILED;
4800 			break;
4801 		}
4802 		break;
4803 	case MXIC_FLASH:
4804 		switch (xp->fdesc.flash_id) {
4805 		case MXIC_FLASHID_512K:
4806 			xp->fdesc.flash_size = 0x80000;
4807 			break;
4808 		case MXIC_FLASHID_1024K:
4809 			xp->fdesc.flash_size = 0x100000;
4810 			break;
4811 		case MXIC_FLASHID_25LXX:
4812 			if (xp->fdesc.flash_len == 0x14) {
4813 				xp->fdesc.flash_size = 0x100000;
4814 			} else if (xp->fdesc.flash_len == 0x15) {
4815 				xp->fdesc.flash_size = 0x200000;
4816 			} else {
4817 				rval = QL_FUNCTION_FAILED;
4818 			}
4819 			break;
4820 		default:
4821 			rval = QL_FUNCTION_FAILED;
4822 			break;
4823 		}
4824 		break;
4825 	case ATMEL_FLASH:
4826 		switch (xp->fdesc.flash_id) {
4827 		case ATMEL_FLASHID_1024K:
4828 			xp->fdesc.flash_size = 0x100000;
4829 			xp->fdesc.write_disable_bits = 0xbc;
4830 			xp->fdesc.unprotect_sector_cmd = 0x39;
4831 			xp->fdesc.protect_sector_cmd = 0x36;
4832 			break;
4833 		default:
4834 			rval = QL_FUNCTION_FAILED;
4835 			break;
4836 		}
4837 		break;
4838 	case WINBOND_FLASH:
4839 		switch (xp->fdesc.flash_id) {
4840 		case WINBOND_FLASHID:
4841 			if (xp->fdesc.flash_len == 0x15) {
4842 				xp->fdesc.flash_size = 0x200000;
4843 			} else if (xp->fdesc.flash_len == 0x16) {
4844 				xp->fdesc.flash_size = 0x400000;
4845 			} else if (xp->fdesc.flash_len == 0x17) {
4846 				xp->fdesc.flash_size = 0x800000;
4847 			} else {
4848 				rval = QL_FUNCTION_FAILED;
4849 			}
4850 			break;
4851 		default:
4852 			rval = QL_FUNCTION_FAILED;
4853 			break;
4854 		}
4855 		break;
4856 	case INTEL_FLASH:
4857 		switch (xp->fdesc.flash_id) {
4858 		case INTEL_FLASHID:
4859 			if (xp->fdesc.flash_len == 0x11) {
4860 				xp->fdesc.flash_size = 0x200000;
4861 			} else if (xp->fdesc.flash_len == 0x12) {
4862 				xp->fdesc.flash_size = 0x400000;
4863 			} else if (xp->fdesc.flash_len == 0x13) {
4864 				xp->fdesc.flash_size = 0x800000;
4865 			} else {
4866 				rval = QL_FUNCTION_FAILED;
4867 			}
4868 			break;
4869 		default:
4870 			rval = QL_FUNCTION_FAILED;
4871 			break;
4872 		}
4873 		break;
4874 	default:
4875 		rval = QL_FUNCTION_FAILED;
4876 		break;
4877 	}
4878 
4879 	/*
4880 	 * hack for non std 2312 and 6312 boards. hardware people need to
4881 	 * use either the 128k flash chip (original), or something larger.
4882 	 * For driver purposes, we'll treat it as a 128k flash chip.
4883 	 */
4884 	if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
4885 	    ha->device_id == 0x6322) && (xp->fdesc.flash_size > 0x20000) &&
4886 	    (CFG_IST(ha, CFG_SBUS_CARD) ==  0)) {
4887 		EL(ha, "chip exceeds max size: %xh, using 128k\n",
4888 		    xp->fdesc.flash_size);
4889 		xp->fdesc.flash_size = 0x20000;
4890 	}
4891 
4892 	if (rval == QL_SUCCESS) {
4893 		EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n",
4894 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id,
4895 		    xp->fdesc.flash_size);
4896 	} else {
4897 		EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n",
4898 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id);
4899 	}
4900 
4901 	return (rval);
4902 }
4903 
4904 /*
4905  * ql_flash_fcode_load
4906  *	Loads fcode data into flash from application.
4907  *
4908  * Input:
4909  *	ha:	adapter state pointer.
4910  *	bp:	user buffer address.
4911  *	size:	user buffer size.
4912  *	mode:	flags
4913  *
4914  * Returns:
4915  *
4916  * Context:
4917  *	Kernel context.
4918  */
4919 static int
4920 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
4921     int mode)
4922 {
4923 	uint8_t		*bfp;
4924 	ql_xioctl_t	*xp = ha->xioctl;
4925 	int		rval = 0;
4926 
4927 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
4928 
4929 	if (bsize > xp->fdesc.flash_size) {
4930 		EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
4931 		    xp->fdesc.flash_size);
4932 		return (ENOMEM);
4933 	}
4934 
4935 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
4936 		EL(ha, "failed, kmem_zalloc\n");
4937 		rval = ENOMEM;
4938 	} else  {
4939 		if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
4940 			EL(ha, "failed, ddi_copyin\n");
4941 			rval = EFAULT;
4942 		} else if (ql_load_fcode(ha, bfp, bsize) != QL_SUCCESS) {
4943 			EL(ha, "failed, load_fcode\n");
4944 			rval = EFAULT;
4945 		} else {
4946 			/* update the fcache */
4947 			ql_update_fcache(ha, bfp, bsize);
4948 			rval = 0;
4949 		}
4950 		kmem_free(bfp, bsize);
4951 	}
4952 
4953 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
4954 
4955 	return (rval);
4956 }
4957 
4958 /*
4959  * ql_load_fcode
4960  *	Loads fcode in to flash.
4961  *
4962  * Input:
4963  *	ha:	adapter state pointer.
4964  *	dp:	data pointer.
4965  *	size:	data length.
4966  *
4967  * Returns:
4968  *	ql local function return status code.
4969  *
4970  * Context:
4971  *	Kernel context.
4972  */
4973 static int
4974 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
4975 {
4976 	uint32_t	cnt;
4977 	int		rval;
4978 
4979 	if (CFG_IST(ha, CFG_CTRL_2425)) {
4980 		return (ql_24xx_load_flash(ha, dp, size, 0));
4981 	}
4982 
4983 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
4984 
4985 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
4986 		/*
4987 		 * sbus has an additional check to make
4988 		 * sure they don't brick the HBA.
4989 		 */
4990 		if (dp[0] != 0xf1) {
4991 			EL(ha, "failed, incorrect fcode for sbus\n");
4992 			return (QL_FUNCTION_PARAMETER_ERROR);
4993 		}
4994 	}
4995 
4996 	GLOBAL_HW_LOCK();
4997 
4998 	/* Enable Flash Read/Write. */
4999 	ql_flash_enable(ha);
5000 
5001 	/* Erase flash prior to write. */
5002 	rval = ql_erase_flash(ha, 0);
5003 
5004 	if (rval == QL_SUCCESS) {
5005 		/* Write fcode data to flash. */
5006 		for (cnt = 0; cnt < (uint32_t)size; cnt++) {
5007 			/* Allow other system activity. */
5008 			if (cnt % 0x1000 == 0) {
5009 				drv_usecwait(1);
5010 			}
5011 			rval = ql_program_flash_address(ha, cnt, *dp++);
5012 			if (rval != QL_SUCCESS)
5013 				break;
5014 		}
5015 	}
5016 
5017 	ql_flash_disable(ha);
5018 
5019 	GLOBAL_HW_UNLOCK();
5020 
5021 	if (rval != QL_SUCCESS) {
5022 		EL(ha, "failed, rval=%xh\n", rval);
5023 	} else {
5024 		/*EMPTY*/
5025 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5026 	}
5027 	return (rval);
5028 }
5029 
5030 /*
5031  * ql_flash_util_dump
5032  *	Dumps FLASH to application.
5033  *
5034  * Input:
5035  *	ha:	adapter state pointer.
5036  *	bp:	user buffer address.
5037  *	bsize:	user buffer size
5038  *	mode:	flags
5039  *
5040  * Returns:
5041  *
5042  * Context:
5043  *	Kernel context.
5044  */
5045 static int
5046 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5047     int mode)
5048 {
5049 	uint8_t		*bfp;
5050 	int		rval;
5051 	ql_xioctl_t	*xp = ha->xioctl;
5052 
5053 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5054 
5055 	/* adjust max read size to flash size */
5056 	if (bsize > xp->fdesc.flash_size) {
5057 		EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5058 		    xp->fdesc.flash_size);
5059 		bsize = xp->fdesc.flash_size;
5060 	}
5061 
5062 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5063 		EL(ha, "failed, kmem_zalloc\n");
5064 		rval = ENOMEM;
5065 	} else {
5066 		/* Dump Flash fcode. */
5067 		rval = ql_dump_fcode(ha, bfp, bsize, 0);
5068 
5069 		if (rval != QL_SUCCESS) {
5070 			EL(ha, "failed, dump_fcode = %x\n", rval);
5071 			rval = EFAULT;
5072 		} else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5073 			EL(ha, "failed, ddi_copyout\n");
5074 			rval = EFAULT;
5075 		} else {
5076 			rval = 0;
5077 		}
5078 		kmem_free(bfp, bsize);
5079 	}
5080 
5081 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5082 
5083 	return (rval);
5084 }
5085 
5086 /*
5087  * ql_dump_fcode
5088  *	Dumps fcode from flash.
5089  *
5090  * Input:
5091  *	ha:		adapter state pointer.
5092  *	dp:		data pointer.
5093  *	size:		data length.
5094  *	startpos:	starting position in flash.
5095  *			(start position must be 4 byte aligned)
5096  *
5097  * Returns:
5098  *	ql local function return status code.
5099  *
5100  * Context:
5101  *	Kernel context.
5102  *
5103  */
5104 int
5105 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5106     uint32_t startpos)
5107 {
5108 	uint32_t	cnt, data, addr;
5109 	int		rval = QL_SUCCESS;
5110 
5111 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5112 
5113 	/* make sure startpos+size doesn't exceed flash */
5114 	if (size + startpos > ha->xioctl->fdesc.flash_size) {
5115 		EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5116 		    size, startpos, ha->xioctl->fdesc.flash_size);
5117 		return (QL_FUNCTION_PARAMETER_ERROR);
5118 	}
5119 
5120 	if (CFG_IST(ha, CFG_CTRL_2425)) {
5121 
5122 		/* check start addr is 32 bit aligned for 24xx */
5123 		if ((startpos & 0x3) != 0) {
5124 			EL(ha, "incorrect buffer size alignment\n");
5125 			return (QL_FUNCTION_PARAMETER_ERROR);
5126 		}
5127 
5128 		/* adjust 24xx start addr for 32 bit words */
5129 		addr = startpos / 4 | FLASH_DATA_ADDR;
5130 	}
5131 
5132 	GLOBAL_HW_LOCK();
5133 
5134 	/* Enable Flash Read/Write. */
5135 	if (CFG_IST(ha, CFG_CTRL_2425) == 0) {
5136 		ql_flash_enable(ha);
5137 	}
5138 
5139 	/* Read fcode data from flash. */
5140 	cnt = startpos;
5141 	size += startpos;
5142 	while (cnt < size) {
5143 		/* Allow other system activity. */
5144 		if (cnt % 0x1000 == 0) {
5145 			drv_usecwait(1);
5146 		}
5147 		if (CFG_IST(ha, CFG_CTRL_2425)) {
5148 			rval = ql_24xx_read_flash(ha, addr++, &data);
5149 			if (rval != QL_SUCCESS) {
5150 				break;
5151 			}
5152 			*dp++ = LSB(LSW(data));
5153 			*dp++ = MSB(LSW(data));
5154 			*dp++ = LSB(MSW(data));
5155 			*dp++ = MSB(MSW(data));
5156 			cnt += 4;
5157 		} else {
5158 			*dp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
5159 			cnt++;
5160 		}
5161 	}
5162 
5163 	if (CFG_IST(ha, CFG_CTRL_2425) == 0) {
5164 		ql_flash_disable(ha);
5165 	}
5166 
5167 	GLOBAL_HW_UNLOCK();
5168 
5169 	if (rval != QL_SUCCESS) {
5170 		EL(ha, "failed, rval = %xh\n", rval);
5171 	} else {
5172 		/*EMPTY*/
5173 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5174 	}
5175 	return (rval);
5176 }
5177 
5178 /*
5179  * ql_program_flash_address
5180  *	Program flash address.
5181  *
5182  * Input:
5183  *	ha:	adapter state pointer.
5184  *	addr:	flash byte address.
5185  *	data:	data to be written to flash.
5186  *
5187  * Returns:
5188  *	ql local function return status code.
5189  *
5190  * Context:
5191  *	Kernel context.
5192  */
5193 static int
5194 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr,
5195     uint8_t data)
5196 {
5197 	int	rval;
5198 
5199 	/* Write Program Command Sequence */
5200 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5201 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5202 		ql_write_flash_byte(ha, addr, data);
5203 	} else {
5204 		ql_write_flash_byte(ha, 0x5555, 0xaa);
5205 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
5206 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5207 		ql_write_flash_byte(ha, addr, data);
5208 	}
5209 
5210 	/* Wait for write to complete. */
5211 	rval = ql_poll_flash(ha, addr, data);
5212 
5213 	if (rval != QL_SUCCESS) {
5214 		EL(ha, "failed, rval=%xh\n", rval);
5215 	}
5216 	return (rval);
5217 }
5218 
5219 /*
5220  * ql_set_rnid_parameters
5221  *	Set RNID parameters.
5222  *
5223  * Input:
5224  *	ha:	adapter state pointer.
5225  *	cmd:	User space CT arguments pointer.
5226  *	mode:	flags.
5227  */
5228 static void
5229 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5230 {
5231 	EXT_SET_RNID_REQ	tmp_set;
5232 	EXT_RNID_DATA		*tmp_buf;
5233 	int			rval = 0;
5234 
5235 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5236 
5237 	if (DRIVER_SUSPENDED(ha)) {
5238 		EL(ha, "failed, LOOP_NOT_READY\n");
5239 		cmd->Status = EXT_STATUS_BUSY;
5240 		cmd->ResponseLen = 0;
5241 		return;
5242 	}
5243 
5244 	cmd->ResponseLen = 0; /* NO response to caller. */
5245 	if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5246 		/* parameter error */
5247 		EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5248 		    cmd->RequestLen);
5249 		cmd->Status = EXT_STATUS_INVALID_PARAM;
5250 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5251 		cmd->ResponseLen = 0;
5252 		return;
5253 	}
5254 
5255 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set,
5256 	    cmd->RequestLen, mode);
5257 	if (rval != 0) {
5258 		EL(ha, "failed, ddi_copyin\n");
5259 		cmd->Status = EXT_STATUS_COPY_ERR;
5260 		cmd->ResponseLen = 0;
5261 		return;
5262 	}
5263 
5264 	/* Allocate memory for command. */
5265 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5266 	if (tmp_buf == NULL) {
5267 		EL(ha, "failed, kmem_zalloc\n");
5268 		cmd->Status = EXT_STATUS_NO_MEMORY;
5269 		cmd->ResponseLen = 0;
5270 		return;
5271 	}
5272 
5273 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5274 	    (caddr_t)tmp_buf);
5275 	if (rval != QL_SUCCESS) {
5276 		/* error */
5277 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5278 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5279 		cmd->Status = EXT_STATUS_ERR;
5280 		cmd->ResponseLen = 0;
5281 		return;
5282 	}
5283 
5284 	/* Now set the requested params. */
5285 	bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5286 	bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5287 	bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5288 
5289 	rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5290 	    (caddr_t)tmp_buf);
5291 	if (rval != QL_SUCCESS) {
5292 		/* error */
5293 		EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5294 		cmd->Status = EXT_STATUS_ERR;
5295 		cmd->ResponseLen = 0;
5296 	}
5297 
5298 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5299 
5300 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5301 }
5302 
5303 /*
5304  * ql_get_rnid_parameters
5305  *	Get RNID parameters.
5306  *
5307  * Input:
5308  *	ha:	adapter state pointer.
5309  *	cmd:	User space CT arguments pointer.
5310  *	mode:	flags.
5311  */
5312 static void
5313 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5314 {
5315 	EXT_RNID_DATA	*tmp_buf;
5316 	uint32_t	rval;
5317 
5318 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5319 
5320 	if (DRIVER_SUSPENDED(ha)) {
5321 		EL(ha, "failed, LOOP_NOT_READY\n");
5322 		cmd->Status = EXT_STATUS_BUSY;
5323 		cmd->ResponseLen = 0;
5324 		return;
5325 	}
5326 
5327 	/* Allocate memory for command. */
5328 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5329 	if (tmp_buf == NULL) {
5330 		EL(ha, "failed, kmem_zalloc\n");
5331 		cmd->Status = EXT_STATUS_NO_MEMORY;
5332 		cmd->ResponseLen = 0;
5333 		return;
5334 	}
5335 
5336 	/* Send command */
5337 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5338 	    (caddr_t)tmp_buf);
5339 	if (rval != QL_SUCCESS) {
5340 		/* error */
5341 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5342 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5343 		cmd->Status = EXT_STATUS_ERR;
5344 		cmd->ResponseLen = 0;
5345 		return;
5346 	}
5347 
5348 	/* Copy the response */
5349 	if (ql_send_buffer_data((caddr_t)tmp_buf,
5350 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
5351 	    sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5352 		EL(ha, "failed, ddi_copyout\n");
5353 		cmd->Status = EXT_STATUS_COPY_ERR;
5354 		cmd->ResponseLen = 0;
5355 	} else {
5356 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5357 		cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5358 	}
5359 
5360 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5361 }
5362 
5363 /*
5364  * ql_reset_statistics
5365  *	Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5366  *
5367  * Input:
5368  *	ha:	adapter state pointer.
5369  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5370  *
5371  * Returns:
5372  *	None, request status indicated in cmd->Status.
5373  *
5374  * Context:
5375  *	Kernel context.
5376  */
5377 static int
5378 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5379 {
5380 	ql_xioctl_t		*xp = ha->xioctl;
5381 	int			rval = 0;
5382 
5383 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5384 
5385 	if (DRIVER_SUSPENDED(ha)) {
5386 		EL(ha, "failed, LOOP_NOT_READY\n");
5387 		cmd->Status = EXT_STATUS_BUSY;
5388 		cmd->ResponseLen = 0;
5389 		return (QL_FUNCTION_SUSPENDED);
5390 	}
5391 
5392 	rval = ql_reset_link_status(ha);
5393 	if (rval != QL_SUCCESS) {
5394 		EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5395 		cmd->Status = EXT_STATUS_MAILBOX;
5396 		cmd->DetailStatus = rval;
5397 		cmd->ResponseLen = 0;
5398 	}
5399 
5400 	TASK_DAEMON_LOCK(ha);
5401 	xp->IosRequested = 0;
5402 	xp->BytesRequested = 0;
5403 	xp->IOInputRequests = 0;
5404 	xp->IOOutputRequests = 0;
5405 	xp->IOControlRequests = 0;
5406 	xp->IOInputMByteCnt = 0;
5407 	xp->IOOutputMByteCnt = 0;
5408 	xp->IOOutputByteCnt = 0;
5409 	xp->IOInputByteCnt = 0;
5410 	TASK_DAEMON_UNLOCK(ha);
5411 
5412 	INTR_LOCK(ha);
5413 	xp->ControllerErrorCount = 0;
5414 	xp->DeviceErrorCount = 0;
5415 	xp->TotalLipResets = 0;
5416 	xp->TotalInterrupts = 0;
5417 	INTR_UNLOCK(ha);
5418 
5419 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5420 
5421 	return (rval);
5422 }
5423 
5424 /*
5425  * ql_get_statistics
5426  *	Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5427  *
5428  * Input:
5429  *	ha:	adapter state pointer.
5430  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5431  *	mode:	flags.
5432  *
5433  * Returns:
5434  *	None, request status indicated in cmd->Status.
5435  *
5436  * Context:
5437  *	Kernel context.
5438  */
5439 static void
5440 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5441 {
5442 	EXT_HBA_PORT_STAT	ps = {0};
5443 	ql_link_stats_t		*ls;
5444 	int			rval;
5445 	ql_xioctl_t		*xp = ha->xioctl;
5446 	int			retry = 10;
5447 
5448 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5449 
5450 	while (ha->task_daemon_flags &
5451 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5452 		ql_delay(ha, 10000000);	/* 10 second delay */
5453 
5454 		retry--;
5455 
5456 		if (retry == 0) { /* effectively 100 seconds */
5457 			EL(ha, "failed, LOOP_NOT_READY\n");
5458 			cmd->Status = EXT_STATUS_BUSY;
5459 			cmd->ResponseLen = 0;
5460 			return;
5461 		}
5462 	}
5463 
5464 	/* Allocate memory for command. */
5465 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5466 	if (ls == NULL) {
5467 		EL(ha, "failed, kmem_zalloc\n");
5468 		cmd->Status = EXT_STATUS_NO_MEMORY;
5469 		cmd->ResponseLen = 0;
5470 		return;
5471 	}
5472 
5473 	/*
5474 	 * I think these are supposed to be port statistics
5475 	 * the loop ID or port ID should be in cmd->Instance.
5476 	 */
5477 	rval = ql_get_status_counts(ha, (uint16_t)
5478 	    (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id),
5479 	    sizeof (ql_link_stats_t), (caddr_t)ls, 0);
5480 	if (rval != QL_SUCCESS) {
5481 		EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
5482 		    ha->loop_id);
5483 		cmd->Status = EXT_STATUS_MAILBOX;
5484 		cmd->DetailStatus = rval;
5485 		cmd->ResponseLen = 0;
5486 	} else {
5487 		ps.ControllerErrorCount = xp->ControllerErrorCount;
5488 		ps.DeviceErrorCount = xp->DeviceErrorCount;
5489 		ps.IoCount = (uint32_t)(xp->IOInputRequests +
5490 		    xp->IOOutputRequests + xp->IOControlRequests);
5491 		ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt +
5492 		    xp->IOOutputMByteCnt);
5493 		ps.LipResetCount = xp->TotalLipResets;
5494 		ps.InterruptCount = xp->TotalInterrupts;
5495 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5496 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5497 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5498 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5499 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5500 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5501 
5502 		rval = ddi_copyout((void *)&ps,
5503 		    (void *)(uintptr_t)cmd->ResponseAdr,
5504 		    sizeof (EXT_HBA_PORT_STAT), mode);
5505 		if (rval != 0) {
5506 			EL(ha, "failed, ddi_copyout\n");
5507 			cmd->Status = EXT_STATUS_COPY_ERR;
5508 			cmd->ResponseLen = 0;
5509 		} else {
5510 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5511 		}
5512 	}
5513 
5514 	kmem_free(ls, sizeof (ql_link_stats_t));
5515 
5516 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5517 }
5518 
5519 /*
5520  * ql_get_statistics_fc
5521  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5522  *
5523  * Input:
5524  *	ha:	adapter state pointer.
5525  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5526  *	mode:	flags.
5527  *
5528  * Returns:
5529  *	None, request status indicated in cmd->Status.
5530  *
5531  * Context:
5532  *	Kernel context.
5533  */
5534 static void
5535 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5536 {
5537 	EXT_HBA_PORT_STAT	ps = {0};
5538 	ql_link_stats_t		*ls;
5539 	int			rval;
5540 	uint16_t		qlnt;
5541 	EXT_DEST_ADDR		pextdestaddr;
5542 	uint8_t			*name;
5543 	ql_tgt_t		*tq = NULL;
5544 	int			retry = 10;
5545 
5546 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5547 
5548 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
5549 	    (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) {
5550 		EL(ha, "failed, ddi_copyin\n");
5551 		cmd->Status = EXT_STATUS_COPY_ERR;
5552 		cmd->ResponseLen = 0;
5553 		return;
5554 	}
5555 
5556 	qlnt = QLNT_PORT;
5557 	name = pextdestaddr.DestAddr.WWPN;
5558 
5559 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5560 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
5561 	    name[5], name[6], name[7]);
5562 
5563 	tq = ql_find_port(ha, name, qlnt);
5564 
5565 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5566 		EL(ha, "failed, fc_port not found\n");
5567 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5568 		cmd->ResponseLen = 0;
5569 		return;
5570 	}
5571 
5572 	while (ha->task_daemon_flags &
5573 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE  | DRIVER_STALL)) {
5574 		ql_delay(ha, 10000000);	/* 10 second delay */
5575 
5576 		retry--;
5577 
5578 		if (retry == 0) { /* effectively 100 seconds */
5579 			EL(ha, "failed, LOOP_NOT_READY\n");
5580 			cmd->Status = EXT_STATUS_BUSY;
5581 			cmd->ResponseLen = 0;
5582 			return;
5583 		}
5584 	}
5585 
5586 	/* Allocate memory for command. */
5587 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5588 	if (ls == NULL) {
5589 		EL(ha, "failed, kmem_zalloc\n");
5590 		cmd->Status = EXT_STATUS_NO_MEMORY;
5591 		cmd->ResponseLen = 0;
5592 		return;
5593 	}
5594 
5595 	rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t),
5596 	    (caddr_t)ls, 0);
5597 	if (rval != QL_SUCCESS) {
5598 		EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval,
5599 		    tq->d_id.b24);
5600 		cmd->Status = EXT_STATUS_MAILBOX;
5601 		cmd->DetailStatus = rval;
5602 		cmd->ResponseLen = 0;
5603 	} else {
5604 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5605 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5606 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5607 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5608 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5609 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5610 
5611 		rval = ddi_copyout((void *)&ps,
5612 		    (void *)(uintptr_t)cmd->ResponseAdr,
5613 		    sizeof (EXT_HBA_PORT_STAT), mode);
5614 
5615 		if (rval != 0) {
5616 			EL(ha, "failed, ddi_copyout\n");
5617 			cmd->Status = EXT_STATUS_COPY_ERR;
5618 			cmd->ResponseLen = 0;
5619 		} else {
5620 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5621 		}
5622 	}
5623 
5624 	kmem_free(ls, sizeof (ql_link_stats_t));
5625 
5626 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5627 }
5628 
5629 /*
5630  * ql_get_statistics_fc4
5631  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5632  *
5633  * Input:
5634  *	ha:	adapter state pointer.
5635  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5636  *	mode:	flags.
5637  *
5638  * Returns:
5639  *	None, request status indicated in cmd->Status.
5640  *
5641  * Context:
5642  *	Kernel context.
5643  */
5644 static void
5645 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5646 {
5647 	uint32_t		rval;
5648 	EXT_HBA_FC4STATISTICS	fc4stats = {0};
5649 	ql_xioctl_t		*xp = ha->xioctl;
5650 
5651 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5652 
5653 	fc4stats.InputRequests = xp->IOInputRequests;
5654 	fc4stats.OutputRequests = xp->IOOutputRequests;
5655 	fc4stats.ControlRequests = xp->IOControlRequests;
5656 	fc4stats.InputMegabytes = xp->IOInputMByteCnt;
5657 	fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
5658 
5659 	rval = ddi_copyout((void *)&fc4stats,
5660 	    (void *)(uintptr_t)cmd->ResponseAdr,
5661 	    sizeof (EXT_HBA_FC4STATISTICS), mode);
5662 
5663 	if (rval != 0) {
5664 		EL(ha, "failed, ddi_copyout\n");
5665 		cmd->Status = EXT_STATUS_COPY_ERR;
5666 		cmd->ResponseLen = 0;
5667 	} else {
5668 		cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
5669 	}
5670 
5671 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5672 }
5673 
5674 /*
5675  * ql_set_led_state
5676  *	Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
5677  *
5678  * Input:
5679  *	ha:	adapter state pointer.
5680  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5681  *	mode:	flags.
5682  *
5683  * Returns:
5684  *	None, request status indicated in cmd->Status.
5685  *
5686  * Context:
5687  *	Kernel context.
5688  */
5689 static void
5690 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5691 {
5692 	EXT_BEACON_CONTROL	bstate;
5693 	uint32_t		rval;
5694 	ql_xioctl_t		*xp = ha->xioctl;
5695 
5696 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5697 
5698 	if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
5699 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5700 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5701 		EL(ha, "exiting - failed, RequestLen < EXT_BEACON_CONTROL,"
5702 		    " Len=%xh\n", cmd->RequestLen);
5703 		cmd->ResponseLen = 0;
5704 		return;
5705 	}
5706 
5707 	if (ha->device_id < 0x2300) {
5708 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5709 		cmd->DetailStatus = 0;
5710 		EL(ha, "exiting - failed, Invalid function for HBA model\n");
5711 		cmd->ResponseLen = 0;
5712 		return;
5713 	}
5714 
5715 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate,
5716 	    cmd->RequestLen, mode);
5717 
5718 	if (rval != 0) {
5719 		cmd->Status = EXT_STATUS_COPY_ERR;
5720 		EL(ha, "exiting -  failed, ddi_copyin\n");
5721 		return;
5722 	}
5723 
5724 	switch (bstate.State) {
5725 	case EXT_DEF_GRN_BLINK_OFF:	/* turn beacon off */
5726 		if (xp->ledstate.BeaconState == BEACON_OFF) {
5727 			/* not quite an error -- LED state is already off */
5728 			cmd->Status = EXT_STATUS_OK;
5729 			EL(ha, "LED off request -- LED is already off\n");
5730 			break;
5731 		}
5732 
5733 		xp->ledstate.BeaconState = BEACON_OFF;
5734 		xp->ledstate.LEDflags = LED_ALL_OFF;
5735 
5736 		if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
5737 			cmd->Status = EXT_STATUS_MAILBOX;
5738 		} else {
5739 			cmd->Status = EXT_STATUS_OK;
5740 		}
5741 		break;
5742 
5743 	case EXT_DEF_GRN_BLINK_ON:	/* turn beacon on */
5744 		if (xp->ledstate.BeaconState == BEACON_ON) {
5745 			/* not quite an error -- LED state is already on */
5746 			cmd->Status = EXT_STATUS_OK;
5747 			EL(ha, "LED on request  - LED is already on\n");
5748 			break;
5749 		}
5750 
5751 		if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
5752 			cmd->Status = EXT_STATUS_MAILBOX;
5753 			break;
5754 		}
5755 
5756 		if (CFG_IST(ha, CFG_CTRL_2425)) {
5757 			xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
5758 		} else {
5759 			xp->ledstate.LEDflags = LED_GREEN;
5760 		}
5761 		xp->ledstate.BeaconState = BEACON_ON;
5762 
5763 		cmd->Status = EXT_STATUS_OK;
5764 		break;
5765 	default:
5766 		cmd->Status = EXT_STATUS_ERR;
5767 		EL(ha, "failed, unknown state request %xh\n", bstate.State);
5768 		break;
5769 	}
5770 
5771 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5772 }
5773 
5774 /*
5775  * ql_get_led_state
5776  *	Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
5777  *
5778  * Input:
5779  *	ha:	adapter state pointer.
5780  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5781  *	mode:	flags.
5782  *
5783  * Returns:
5784  *	None, request status indicated in cmd->Status.
5785  *
5786  * Context:
5787  *	Kernel context.
5788  */
5789 static void
5790 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5791 {
5792 	EXT_BEACON_CONTROL	bstate = {0};
5793 	uint32_t		rval;
5794 	ql_xioctl_t		*xp = ha->xioctl;
5795 
5796 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5797 
5798 	if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
5799 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5800 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5801 		EL(ha, "exiting - failed, ResponseLen < EXT_BEACON_CONTROL,"
5802 		    "Len=%xh\n", cmd->ResponseLen);
5803 		cmd->ResponseLen = 0;
5804 		return;
5805 	}
5806 
5807 	if (ha->device_id < 0x2300) {
5808 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5809 		cmd->DetailStatus = 0;
5810 		EL(ha, "exiting - failed, Invalid function for HBA model\n");
5811 		cmd->ResponseLen = 0;
5812 		return;
5813 	}
5814 
5815 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
5816 		cmd->Status = EXT_STATUS_BUSY;
5817 		EL(ha, "exiting -  failed, isp abort active\n");
5818 		cmd->ResponseLen = 0;
5819 		return;
5820 	}
5821 
5822 	/* inform the user of the current beacon state (off or on) */
5823 	bstate.State = xp->ledstate.BeaconState;
5824 
5825 	rval = ddi_copyout((void *)&bstate,
5826 	    (void *)(uintptr_t)cmd->ResponseAdr,
5827 	    sizeof (EXT_BEACON_CONTROL), mode);
5828 
5829 	if (rval != 0) {
5830 		EL(ha, "failed, ddi_copyout\n");
5831 		cmd->Status = EXT_STATUS_COPY_ERR;
5832 		cmd->ResponseLen = 0;
5833 	} else {
5834 		cmd->Status = EXT_STATUS_OK;
5835 		cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
5836 	}
5837 
5838 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5839 }
5840 
5841 /*
5842  * ql_blink_led
5843  *	Determine the next state of the LED and drive it
5844  *
5845  * Input:
5846  *	ha:	adapter state pointer.
5847  *
5848  * Context:
5849  *	Interrupt context.
5850  */
5851 void
5852 ql_blink_led(ql_adapter_state_t *ha)
5853 {
5854 	uint32_t		nextstate;
5855 	ql_xioctl_t		*xp = ha->xioctl;
5856 
5857 	QL_PRINT_9(CE_CONT, "(%d): entering\n", ha->instance);
5858 
5859 	if (xp->ledstate.BeaconState == BEACON_ON) {
5860 		/* determine the next led state */
5861 		if (CFG_IST(ha, CFG_CTRL_2425)) {
5862 			nextstate = (xp->ledstate.LEDflags) &
5863 			    (~(RD32_IO_REG(ha, gpiod)));
5864 		} else {
5865 			nextstate = (xp->ledstate.LEDflags) &
5866 			    (~(RD16_IO_REG(ha, gpiod)));
5867 		}
5868 
5869 		/* turn the led on or off */
5870 		ql_drive_led(ha, nextstate);
5871 	}
5872 
5873 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5874 }
5875 
5876 /*
5877  * ql_drive_led
5878  *	drive the led's as determined by LEDflags
5879  *
5880  * Input:
5881  *	ha:		adapter state pointer.
5882  *	LEDflags:	LED flags
5883  *
5884  * Context:
5885  *	Kernel/Interrupt context.
5886  */
5887 static void
5888 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
5889 {
5890 
5891 	QL_PRINT_9(CE_CONT, "(%d): entering\n", ha->instance);
5892 
5893 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
5894 
5895 		uint16_t	gpio_enable, gpio_data;
5896 
5897 		/* setup to send new data */
5898 		gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
5899 		gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
5900 		WRT16_IO_REG(ha, gpioe, gpio_enable);
5901 
5902 		/* read current data and clear out old led data */
5903 		gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
5904 		gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
5905 
5906 		/* set in the new led data. */
5907 		gpio_data = (uint16_t)(gpio_data | LEDflags);
5908 
5909 		/* write out the new led data */
5910 		WRT16_IO_REG(ha, gpiod, gpio_data);
5911 
5912 	} else if (CFG_IST(ha, CFG_CTRL_2425)) {
5913 
5914 		uint32_t	gpio_data;
5915 
5916 		/* setup to send new data */
5917 		gpio_data = RD32_IO_REG(ha, gpiod);
5918 		gpio_data |= LED_MASK_UPDATE_24;
5919 		WRT32_IO_REG(ha, gpiod, gpio_data);
5920 
5921 		/* read current data and clear out old led data */
5922 		gpio_data = RD32_IO_REG(ha, gpiod);
5923 		gpio_data &= ~LED_MASK_COLORS_24;
5924 
5925 		/* set in the new led data */
5926 		gpio_data |= LEDflags;
5927 
5928 		/* write out the new led data */
5929 		WRT32_IO_REG(ha, gpiod, gpio_data);
5930 
5931 	} else {
5932 		EL(ha, "unsupported HBA: %xh", ha->device_id);
5933 	}
5934 
5935 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5936 }
5937 
5938 /*
5939  * ql_setup_led
5940  *	Setup LED for driver control
5941  *
5942  * Input:
5943  *	ha:	adapter state pointer.
5944  *
5945  * Context:
5946  *	Kernel/Interrupt context.
5947  */
5948 static uint32_t
5949 ql_setup_led(ql_adapter_state_t *ha)
5950 {
5951 	uint32_t	rval;
5952 	ql_mbx_data_t	mr;
5953 
5954 	QL_PRINT_9(CE_CONT, "(%d): entering\n", ha->instance);
5955 
5956 	/* decouple the LED control from the fw */
5957 	rval = ql_get_firmware_option(ha, &mr);
5958 	if (rval != QL_SUCCESS) {
5959 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
5960 		return (rval);
5961 	}
5962 
5963 	/* set the appropriate options */
5964 	mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
5965 
5966 	/* send it back to the firmware */
5967 	rval = ql_set_firmware_option(ha, &mr);
5968 	if (rval != QL_SUCCESS) {
5969 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
5970 		return (rval);
5971 	}
5972 
5973 	/* initally, turn the LED's off */
5974 	ql_drive_led(ha, LED_ALL_OFF);
5975 
5976 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5977 
5978 	return (rval);
5979 }
5980 
5981 /*
5982  * ql_wrapup_led
5983  *	Return LED control to the firmware
5984  *
5985  * Input:
5986  *	ha:	adapter state pointer.
5987  *
5988  * Context:
5989  *	Kernel/Interrupt context.
5990  */
5991 static uint32_t
5992 ql_wrapup_led(ql_adapter_state_t *ha)
5993 {
5994 	uint32_t	rval;
5995 	ql_mbx_data_t	mr;
5996 
5997 	QL_PRINT_9(CE_CONT, "(%d): entering\n", ha->instance);
5998 
5999 	/* Turn all LED's off */
6000 	ql_drive_led(ha, LED_ALL_OFF);
6001 
6002 	if (CFG_IST(ha, CFG_CTRL_2425)) {
6003 
6004 		uint32_t	gpio_data;
6005 
6006 		/* disable the LED update mask */
6007 		gpio_data = RD32_IO_REG(ha, gpiod);
6008 		gpio_data &= ~LED_MASK_UPDATE_24;
6009 
6010 		/* write out the data */
6011 		WRT32_IO_REG(ha, gpiod, gpio_data);
6012 	}
6013 
6014 	/* give LED control back to the f/w */
6015 	rval = ql_get_firmware_option(ha, &mr);
6016 	if (rval != QL_SUCCESS) {
6017 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
6018 		return (rval);
6019 	}
6020 
6021 	mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6022 
6023 	rval = ql_set_firmware_option(ha, &mr);
6024 	if (rval != QL_SUCCESS) {
6025 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
6026 		return (rval);
6027 	}
6028 
6029 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6030 
6031 	return (rval);
6032 }
6033 
6034 /*
6035  * ql_get_port_summary
6036  *	Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6037  *
6038  *	The EXT_IOCTL->RequestAdr points to a single
6039  *	UINT32 which identifies the device type.
6040  *
6041  * Input:
6042  *	ha:	adapter state pointer.
6043  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6044  *	mode:	flags.
6045  *
6046  * Returns:
6047  *	None, request status indicated in cmd->Status.
6048  *
6049  * Context:
6050  *	Kernel context.
6051  */
6052 static void
6053 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6054 {
6055 	EXT_DEVICEDATA		dd = {0};
6056 	EXT_DEVICEDATA		*uddp;
6057 	ql_link_t		*link;
6058 	ql_tgt_t		*tq;
6059 	uint32_t		rlen, dev_type, index;
6060 	int			rval = 0;
6061 	EXT_DEVICEDATAENTRY	*uddep, *ddep;
6062 
6063 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
6064 
6065 	ddep = &dd.EntryList[0];
6066 
6067 	/*
6068 	 * Get the type of device the requestor is looking for.
6069 	 *
6070 	 * We ignore this for now.
6071 	 */
6072 	rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6073 	    (void *)&dev_type, sizeof (dev_type), mode);
6074 	if (rval != 0) {
6075 		cmd->Status = EXT_STATUS_COPY_ERR;
6076 		cmd->ResponseLen = 0;
6077 		EL(ha, "failed, ddi_copyin\n");
6078 		return;
6079 	}
6080 	/*
6081 	 * Count the number of entries to be returned. Count devices
6082 	 * that are offlline, but have been persistently bound.
6083 	 */
6084 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6085 		for (link = ha->dev[index].first; link != NULL;
6086 		    link = link->next) {
6087 			tq = link->base_address;
6088 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6089 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6090 				continue;	/* Skip this one */
6091 			}
6092 			dd.TotalDevices++;
6093 		}
6094 	}
6095 	/*
6096 	 * Compute the number of entries that can be returned
6097 	 * based upon the size of caller's response buffer.
6098 	 */
6099 	dd.ReturnListEntryCount = 0;
6100 	if (dd.TotalDevices == 0) {
6101 		rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY);
6102 	} else {
6103 		rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6104 		    (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6105 	}
6106 	if (rlen > cmd->ResponseLen) {
6107 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6108 		cmd->DetailStatus = rlen;
6109 		EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6110 		    rlen, cmd->ResponseLen);
6111 		cmd->ResponseLen = 0;
6112 		return;
6113 	}
6114 	cmd->ResponseLen = 0;
6115 	uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6116 	uddep = &uddp->EntryList[0];
6117 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6118 		for (link = ha->dev[index].first; link != NULL;
6119 		    link = link->next) {
6120 			tq = link->base_address;
6121 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6122 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6123 				continue;	/* Skip this one */
6124 			}
6125 
6126 			bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6127 
6128 			bcopy(tq->node_name, ddep->NodeWWN, 8);
6129 			bcopy(tq->port_name, ddep->PortWWN, 8);
6130 
6131 			ddep->PortID[0] = tq->d_id.b.domain;
6132 			ddep->PortID[1] = tq->d_id.b.area;
6133 			ddep->PortID[2] = tq->d_id.b.al_pa;
6134 
6135 			bcopy(tq->port_name,
6136 			    (caddr_t)&ddep->TargetAddress.Target, 8);
6137 
6138 			ddep->DeviceFlags = tq->flags;
6139 			ddep->LoopID = tq->loop_id;
6140 			QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, "
6141 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6142 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6143 			    ha->instance, ddep->TargetAddress.Target,
6144 			    ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6145 			    ddep->NodeWWN[2], ddep->NodeWWN[3],
6146 			    ddep->NodeWWN[4], ddep->NodeWWN[5],
6147 			    ddep->NodeWWN[6], ddep->NodeWWN[7],
6148 			    ddep->PortWWN[0], ddep->PortWWN[1],
6149 			    ddep->PortWWN[2], ddep->PortWWN[3],
6150 			    ddep->PortWWN[4], ddep->PortWWN[5],
6151 			    ddep->PortWWN[6], ddep->PortWWN[7]);
6152 			rval = ddi_copyout((void *)ddep, (void *)uddep,
6153 			    sizeof (EXT_DEVICEDATAENTRY), mode);
6154 
6155 			if (rval != 0) {
6156 				cmd->Status = EXT_STATUS_COPY_ERR;
6157 				cmd->ResponseLen = 0;
6158 				EL(ha, "failed, ddi_copyout\n");
6159 				break;
6160 			}
6161 			dd.ReturnListEntryCount++;
6162 			uddep++;
6163 			cmd->ResponseLen += (uint32_t)
6164 			    sizeof (EXT_DEVICEDATAENTRY);
6165 		}
6166 	}
6167 	rval = ddi_copyout((void *)&dd, (void *)uddp,
6168 	    sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6169 
6170 	if (rval != 0) {
6171 		cmd->Status = EXT_STATUS_COPY_ERR;
6172 		cmd->ResponseLen = 0;
6173 		EL(ha, "failed, ddi_copyout-2\n");
6174 	} else {
6175 		cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6176 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6177 	}
6178 }
6179 
6180 /*
6181  * ql_get_target_id
6182  *	Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6183  *
6184  * Input:
6185  *	ha:	adapter state pointer.
6186  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6187  *	mode:	flags.
6188  *
6189  * Returns:
6190  *	None, request status indicated in cmd->Status.
6191  *
6192  * Context:
6193  *	Kernel context.
6194  */
6195 static void
6196 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6197 {
6198 	uint32_t		rval;
6199 	uint16_t		qlnt;
6200 	EXT_DEST_ADDR		extdestaddr = {0};
6201 	uint8_t			*name;
6202 	uint8_t			wwpn[EXT_DEF_WWN_NAME_SIZE];
6203 	ql_tgt_t		*tq;
6204 
6205 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
6206 
6207 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6208 	    (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) {
6209 		EL(ha, "failed, ddi_copyin\n");
6210 		cmd->Status = EXT_STATUS_COPY_ERR;
6211 		cmd->ResponseLen = 0;
6212 		return;
6213 	}
6214 
6215 	qlnt = QLNT_PORT;
6216 	name = wwpn;
6217 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6218 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
6219 	    name[5], name[6], name[7]);
6220 
6221 	tq = ql_find_port(ha, name, qlnt);
6222 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6223 		EL(ha, "failed, fc_port not found\n");
6224 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6225 		cmd->ResponseLen = 0;
6226 		return;
6227 	}
6228 
6229 	bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6230 
6231 	rval = ddi_copyout((void *)&extdestaddr,
6232 	    (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode);
6233 	if (rval != 0) {
6234 		EL(ha, "failed, ddi_copyout\n");
6235 		cmd->Status = EXT_STATUS_COPY_ERR;
6236 		cmd->ResponseLen = 0;
6237 	}
6238 
6239 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6240 }
6241 
6242 /*
6243  * ql_setup_fcache
6244  *	Populates selected flash sections into the cache
6245  *
6246  * Input:
6247  *	ha = adapter state pointer.
6248  *
6249  * Returns:
6250  *
6251  * Context:
6252  *	Kernel context.
6253  *
6254  * Note:
6255  *	Driver must be in stalled state prior to entering or
6256  *	add code to this function prior to calling ql_setup_flash()
6257  */
6258 void
6259 ql_setup_fcache(ql_adapter_state_t *ha)
6260 {
6261 	int		rval;
6262 	uint32_t	freadpos = 0;
6263 	uint32_t	fw_done = 0;
6264 	ql_fcache_t	*head = NULL;
6265 	ql_fcache_t	*tail = NULL;
6266 	ql_fcache_t	*ftmp;
6267 
6268 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6269 
6270 	CACHE_LOCK(ha);
6271 
6272 	/* If we already have populated it, rtn */
6273 	if (ha->fcache != NULL) {
6274 		CACHE_UNLOCK(ha);
6275 		EL(ha, "buffer already populated\n");
6276 		return;
6277 	}
6278 
6279 	if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6280 		CACHE_UNLOCK(ha);
6281 		EL(ha, "unable to setup flash; rval=%xh\n", rval);
6282 		return;
6283 	}
6284 
6285 	while (freadpos != 0xffffffff) {
6286 
6287 		/* Allocate & populate this node */
6288 
6289 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6290 			EL(ha, "node alloc failed\n");
6291 			rval = QL_FUNCTION_FAILED;
6292 			break;
6293 		}
6294 
6295 		/* link in the new node */
6296 		if (head == NULL) {
6297 			head = tail = ftmp;
6298 		} else {
6299 			tail->next = ftmp;
6300 			tail = ftmp;
6301 		}
6302 
6303 		/* Do the firmware node first for 24xx/25xx's */
6304 		if (fw_done == 0) {
6305 			if (CFG_IST(ha, CFG_CTRL_2425)) {
6306 				freadpos = FLASH_24XX_FIRMWARE_ADDR;
6307 			}
6308 			fw_done = 1;
6309 		}
6310 
6311 		if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6312 		    freadpos)) != QL_SUCCESS) {
6313 			EL(ha, "failed, 24xx dump_fcode"
6314 			    " pos=%xh rval=%xh\n", freadpos, rval);
6315 			rval = QL_FUNCTION_FAILED;
6316 			break;
6317 		}
6318 
6319 		/* checkout the pci data / format */
6320 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6321 			EL(ha, "flash header incorrect\n");
6322 			rval = QL_FUNCTION_FAILED;
6323 			break;
6324 		}
6325 	}
6326 
6327 	if (rval != QL_SUCCESS) {
6328 		/* release all resources we have */
6329 		ftmp = head;
6330 		while (ftmp != NULL) {
6331 			tail = ftmp->next;
6332 			kmem_free(ftmp->buf, FBUFSIZE);
6333 			kmem_free(ftmp, sizeof (ql_fcache_t));
6334 			ftmp = tail;
6335 		}
6336 
6337 		EL(ha, "failed, exiting\n");
6338 	} else {
6339 		ha->fcache = head;
6340 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6341 	}
6342 	CACHE_UNLOCK(ha);
6343 }
6344 
6345 /*
6346  * ql_update_fcache
6347  *	re-populates updated flash into the fcache. If
6348  *	fcache does not exist (e.g., flash was empty/invalid on
6349  *	boot), this routine will create and the populate it.
6350  *
6351  * Input:
6352  *	ha	= adapter state pointer.
6353  *	*bpf 	= Pointer to flash buffer.
6354  *	bsize	= Size of flash buffer.
6355  *
6356  * Returns:
6357  *
6358  * Context:
6359  *	Kernel context.
6360  */
6361 static void
6362 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6363 {
6364 	int		rval = QL_SUCCESS;
6365 	uint32_t	freadpos = 0;
6366 	uint32_t	fw_done = 0;
6367 	ql_fcache_t	*head = NULL;
6368 	ql_fcache_t	*tail = NULL;
6369 	ql_fcache_t	*ftmp;
6370 
6371 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6372 
6373 	while (freadpos != 0xffffffff) {
6374 
6375 		/* Allocate & populate this node */
6376 
6377 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6378 			EL(ha, "node alloc failed\n");
6379 			rval = QL_FUNCTION_FAILED;
6380 			break;
6381 		}
6382 
6383 		/* link in the new node */
6384 		if (head == NULL) {
6385 			head = tail = ftmp;
6386 		} else {
6387 			tail->next = ftmp;
6388 			tail = ftmp;
6389 		}
6390 
6391 		/* Do the firmware node first for 24xx's */
6392 		if (fw_done == 0) {
6393 			if (CFG_IST(ha, CFG_CTRL_2425)) {
6394 				freadpos = FLASH_24XX_FIRMWARE_ADDR;
6395 			}
6396 			fw_done = 1;
6397 		}
6398 
6399 		/* read in first FBUFSIZE bytes of this flash section */
6400 		if (freadpos+FBUFSIZE > bsize) {
6401 			EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6402 			    freadpos, bsize);
6403 			rval = QL_FUNCTION_FAILED;
6404 			break;
6405 		}
6406 		bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE);
6407 
6408 		/* checkout the pci data / format */
6409 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6410 			EL(ha, "flash header incorrect\n");
6411 			rval = QL_FUNCTION_FAILED;
6412 			break;
6413 		}
6414 	}
6415 
6416 	if (rval != QL_SUCCESS) {
6417 		/*
6418 		 * release all resources we have
6419 		 */
6420 		ql_fcache_rel(head);
6421 		EL(ha, "failed, exiting\n");
6422 	} else {
6423 		/*
6424 		 * Release previous fcache resources and update with new
6425 		 */
6426 		CACHE_LOCK(ha);
6427 		ql_fcache_rel(ha->fcache);
6428 		ha->fcache = head;
6429 		CACHE_UNLOCK(ha);
6430 
6431 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6432 	}
6433 }
6434 
6435 /*
6436  * ql_setup_fnode
6437  *	Allocates fcache node
6438  *
6439  * Input:
6440  *	ha = adapter state pointer.
6441  *	node = point to allocated fcache node (NULL = failed)
6442  *
6443  * Returns:
6444  *
6445  * Context:
6446  *	Kernel context.
6447  *
6448  * Note:
6449  *	Driver must be in stalled state prior to entering or
6450  *	add code to this function prior to calling ql_setup_flash()
6451  */
6452 static ql_fcache_t *
6453 ql_setup_fnode(ql_adapter_state_t *ha)
6454 {
6455 	ql_fcache_t	*fnode = NULL;
6456 
6457 	if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t),
6458 	    KM_SLEEP))) == NULL) {
6459 		EL(ha, "fnode alloc failed\n");
6460 		fnode = NULL;
6461 	} else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE,
6462 	    KM_SLEEP))) == NULL) {
6463 		EL(ha, "buf alloc failed\n");
6464 		kmem_free(fnode, sizeof (ql_fcache_t));
6465 		fnode = NULL;
6466 	} else {
6467 		fnode->buflen = FBUFSIZE;
6468 	}
6469 
6470 	return (fnode);
6471 }
6472 
6473 /*
6474  * ql_fcache_rel
6475  *	Releases the fcache resources
6476  *
6477  * Input:
6478  *	ha	= adapter state pointer.
6479  *	head	= Pointer to fcache linked list
6480  *
6481  * Returns:
6482  *
6483  * Context:
6484  *	Kernel context.
6485  *
6486  */
6487 void
6488 ql_fcache_rel(ql_fcache_t *head)
6489 {
6490 	ql_fcache_t	*ftmp = head;
6491 	ql_fcache_t	*tail;
6492 
6493 	/* release all resources we have */
6494 	while (ftmp != NULL) {
6495 		tail = ftmp->next;
6496 		kmem_free(ftmp->buf, FBUFSIZE);
6497 		kmem_free(ftmp, sizeof (ql_fcache_t));
6498 		ftmp = tail;
6499 	}
6500 }
6501 
6502 /*
6503  * ql_get_fbuf
6504  *	Search the fcache list for the type specified
6505  *
6506  * Input:
6507  *	fptr	= Pointer to fcache linked list
6508  *	ftype	= Type of image to be returned.
6509  *
6510  * Returns:
6511  *	Pointer to ql_fcache_t.
6512  *	NULL means not found.
6513  *
6514  * Context:
6515  *	Kernel context.
6516  *
6517  *
6518  */
6519 ql_fcache_t *
6520 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype)
6521 {
6522 	while (fptr != NULL) {
6523 		/* does this image meet criteria? */
6524 		if (ftype & fptr->type) {
6525 			break;
6526 		}
6527 		fptr = fptr->next;
6528 	}
6529 	return (fptr);
6530 }
6531 
6532 /*
6533  * ql_check_pci
6534  *
6535  *	checks the passed buffer for a valid pci signature and
6536  *	expected (and in range) pci length values.
6537  *
6538  *	For firmware type, a pci header is added since the image in
6539  *	the flash does not have one (!!!).
6540  *
6541  *	On successful pci check, nextpos adjusted to next pci header.
6542  *
6543  * Returns:
6544  *	-1 --> last pci image
6545  *	0 --> pci header valid
6546  *	1 --> pci header invalid.
6547  *
6548  * Context:
6549  *	Kernel context.
6550  */
6551 static int
6552 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
6553 {
6554 	pci_header_t	*pcih;
6555 	pci_data_t	*pcid;
6556 	uint32_t	doff;
6557 	uint8_t		*pciinfo;
6558 
6559 	if (fcache != NULL) {
6560 		pciinfo = fcache->buf;
6561 	} else {
6562 		EL(ha, "failed, null fcache ptr passed\n");
6563 		return (1);
6564 	}
6565 
6566 	if (pciinfo == NULL) {
6567 		EL(ha, "failed, null pciinfo ptr passed\n");
6568 		return (1);
6569 	}
6570 
6571 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
6572 		caddr_t	bufp;
6573 		uint_t	len;
6574 
6575 		if (pciinfo[0] != SBUS_CODE_FCODE) {
6576 			EL(ha, "failed, unable to detect sbus fcode\n");
6577 			return (1);
6578 		}
6579 		fcache->type = FTYPE_FCODE;
6580 
6581 		/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
6582 		if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
6583 		    PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
6584 		    DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
6585 		    (int *)&len) == DDI_PROP_SUCCESS) {
6586 
6587 			(void) snprintf(fcache->verstr,
6588 			    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
6589 			kmem_free(bufp, len);
6590 		}
6591 
6592 		*nextpos = 0xffffffff;
6593 		return (0);
6594 	}
6595 
6596 	if (*nextpos == FLASH_24XX_FIRMWARE_ADDR) {
6597 
6598 		pci_header_t	fwh = {0};
6599 		pci_data_t	fwd = {0};
6600 		uint8_t		*buf, *bufp;
6601 
6602 		/*
6603 		 * Build a pci header for the firmware module
6604 		 */
6605 		if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
6606 		    NULL) {
6607 			EL(ha, "failed, unable to allocate buffer\n");
6608 			return (1);
6609 		}
6610 
6611 		fwh.signature[0] = PCI_HEADER0;
6612 		fwh.signature[1] = PCI_HEADER1;
6613 		fwh.dataoffset[0] = LSB(sizeof (pci_header_t));
6614 		fwh.dataoffset[1] = MSB(sizeof (pci_header_t));
6615 
6616 		fwd.signature[0] = 'P';
6617 		fwd.signature[1] = 'C';
6618 		fwd.signature[2] = 'I';
6619 		fwd.signature[3] = 'R';
6620 		fwd.codetype = PCI_CODE_FW;
6621 		fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
6622 		fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
6623 
6624 		bufp = buf;
6625 		bcopy(&fwh, bufp, sizeof (pci_header_t));
6626 		bufp += sizeof (pci_header_t);
6627 		bcopy(&fwd, bufp, sizeof (pci_data_t));
6628 		bufp += sizeof (pci_data_t);
6629 
6630 		bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
6631 		    sizeof (pci_data_t)));
6632 		bcopy(buf, fcache->buf, FBUFSIZE);
6633 
6634 		fcache->type = FTYPE_FW;
6635 
6636 		(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6637 		    "%d.%02d.%02d", fcache->buf[19], fcache->buf[23],
6638 		    fcache->buf[27]);
6639 
6640 		*nextpos = 0;
6641 		kmem_free(buf, FBUFSIZE);
6642 		return (0);
6643 	}
6644 
6645 	/* get to the pci header image length */
6646 	pcih = (pci_header_t *)pciinfo;
6647 
6648 	doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
6649 
6650 	/* some header section sanity check */
6651 	if (pcih->signature[0] != PCI_HEADER0 ||
6652 	    pcih->signature[1] != PCI_HEADER1 || doff > 50) {
6653 		EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
6654 		    pcih->signature[0], pcih->signature[1], doff);
6655 		return (1);
6656 	}
6657 
6658 	pcid = (pci_data_t *)(pciinfo + doff);
6659 
6660 	/* a slight sanity data section check */
6661 	if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
6662 	    pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
6663 		EL(ha, "failed, data sig mismatch!\n");
6664 		return (1);
6665 	}
6666 
6667 	if (pcid->indicator == PCI_IND_LAST_IMAGE) {
6668 		EL(ha, "last image\n");
6669 		*nextpos = 0xffffffff;
6670 	} else {
6671 		/* adjust the next flash read start position */
6672 		*nextpos += (pcid->imagelength[0] |
6673 		    (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
6674 	}
6675 
6676 	switch (pcid->codetype) {
6677 	case PCI_CODE_X86PC:
6678 		fcache->type = FTYPE_BIOS;
6679 		break;
6680 	case PCI_CODE_FCODE:
6681 		fcache->type = FTYPE_FCODE;
6682 		break;
6683 	case PCI_CODE_EFI:
6684 		fcache->type = FTYPE_EFI;
6685 		break;
6686 	case PCI_CODE_HPPA:
6687 		fcache->type = FTYPE_HPPA;
6688 		break;
6689 	default:
6690 		fcache->type = FTYPE_UNKNOWN;
6691 		break;
6692 	}
6693 
6694 	(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6695 	    "%d.%d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
6696 
6697 	return (0);
6698 }
6699 
6700 /*
6701  * ql_get_sfp
6702  *	Returns sfp data to sdmapi caller
6703  *
6704  * Input:
6705  *	ha:	adapter state pointer.
6706  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6707  *	mode:	flags.
6708  *
6709  * Returns:
6710  *	None, request status indicated in cmd->Status.
6711  *
6712  * Context:
6713  *	Kernel context.
6714  */
6715 static void
6716 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6717 {
6718 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
6719 
6720 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
6721 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
6722 		EL(ha, "failed, invalid request for HBA\n");
6723 		return;
6724 	}
6725 
6726 	if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
6727 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6728 		cmd->DetailStatus = QL_24XX_SFP_SIZE;
6729 		EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
6730 		    cmd->ResponseLen);
6731 		return;
6732 	}
6733 
6734 	/* Dump SFP data in user buffer */
6735 	if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
6736 	    mode)) != 0) {
6737 		cmd->Status = EXT_STATUS_COPY_ERR;
6738 		EL(ha, "failed, copy error\n");
6739 	} else {
6740 		cmd->Status = EXT_STATUS_OK;
6741 	}
6742 
6743 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6744 }
6745 
6746 /*
6747  * ql_dump_sfp
6748  *	Dumps SFP.
6749  *
6750  * Input:
6751  *	ha:	adapter state pointer.
6752  *	bp:	buffer address.
6753  *	mode:	flags
6754  *
6755  * Returns:
6756  *
6757  * Context:
6758  *	Kernel context.
6759  */
6760 static int
6761 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
6762 {
6763 	dma_mem_t	mem;
6764 	uint32_t	cnt;
6765 	int		rval2, rval = 0;
6766 	uint32_t	dxfer;
6767 
6768 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
6769 
6770 	/* Get memory for SFP. */
6771 
6772 	if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
6773 	    QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
6774 		EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
6775 		return (ENOMEM);
6776 	}
6777 
6778 	for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
6779 		rval2 = ql_read_sfp(ha, &mem,
6780 		    (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
6781 		    (uint16_t)(cnt & 0xff));
6782 		if (rval2 != QL_SUCCESS) {
6783 			EL(ha, "failed, read_sfp=%xh\n", rval2);
6784 			rval = EFAULT;
6785 			break;
6786 		}
6787 
6788 		/* copy the data back */
6789 		if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
6790 		    mode)) != mem.size) {
6791 			/* ddi copy error */
6792 			EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
6793 			rval = EFAULT;
6794 			break;
6795 		}
6796 
6797 		/* adjust the buffer pointer */
6798 		bp = (caddr_t)bp + mem.size;
6799 	}
6800 
6801 	ql_free_phys(ha, &mem);
6802 
6803 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6804 
6805 	return (rval);
6806 }
6807 
6808 /*
6809  * ql_port_param
6810  *	Retrieves or sets the firmware port speed settings
6811  *
6812  * Input:
6813  *	ha:	adapter state pointer.
6814  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6815  *	mode:	flags.
6816  *
6817  * Returns:
6818  *	None, request status indicated in cmd->Status.
6819  *
6820  * Context:
6821  *	Kernel context.
6822  *
6823  */
6824 static void
6825 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6826 {
6827 	uint8_t			*name;
6828 	ql_tgt_t		*tq;
6829 	EXT_PORT_PARAM		port_param = {0};
6830 	uint32_t		rval = QL_SUCCESS;
6831 	uint32_t		idma_rate;
6832 
6833 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
6834 
6835 	if (CFG_IST(ha, CFG_CTRL_2425) == 0) {
6836 		EL(ha, "invalid request for this HBA\n");
6837 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
6838 		cmd->ResponseLen = 0;
6839 		return;
6840 	}
6841 
6842 	if (LOOP_NOT_READY(ha)) {
6843 		EL(ha, "failed, loop not ready\n");
6844 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
6845 		cmd->ResponseLen = 0;
6846 		return;
6847 	}
6848 
6849 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6850 	    (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) {
6851 		EL(ha, "failed, ddi_copyin\n");
6852 		cmd->Status = EXT_STATUS_COPY_ERR;
6853 		cmd->ResponseLen = 0;
6854 		return;
6855 	}
6856 
6857 	if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
6858 		EL(ha, "Unsupported dest lookup type: %xh\n",
6859 		    port_param.FCScsiAddr.DestType);
6860 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6861 		cmd->ResponseLen = 0;
6862 		return;
6863 	}
6864 
6865 	name = port_param.FCScsiAddr.DestAddr.WWPN;
6866 
6867 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6868 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
6869 	    name[5], name[6], name[7]);
6870 
6871 	tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
6872 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6873 		EL(ha, "failed, fc_port not found\n");
6874 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6875 		cmd->ResponseLen = 0;
6876 		return;
6877 	}
6878 
6879 	cmd->Status = EXT_STATUS_OK;
6880 	cmd->DetailStatus = EXT_STATUS_OK;
6881 
6882 	switch (port_param.Mode) {
6883 	case EXT_IIDMA_MODE_GET:
6884 		/*
6885 		 * Report the firmware's port rate for the wwpn
6886 		 */
6887 		rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
6888 		    port_param.Mode);
6889 
6890 		if (rval != QL_SUCCESS) {
6891 			EL(ha, "iidma get failed: %xh\n", rval);
6892 			cmd->Status = EXT_STATUS_MAILBOX;
6893 			cmd->DetailStatus = rval;
6894 			cmd->ResponseLen = 0;
6895 		} else {
6896 			switch (idma_rate) {
6897 			case IIDMA_RATE_1GB:
6898 				port_param.Speed =
6899 				    EXT_DEF_PORTSPEED_1GBIT;
6900 				break;
6901 			case IIDMA_RATE_2GB:
6902 				port_param.Speed =
6903 				    EXT_DEF_PORTSPEED_2GBIT;
6904 				break;
6905 			case IIDMA_RATE_4GB:
6906 				port_param.Speed =
6907 				    EXT_DEF_PORTSPEED_4GBIT;
6908 				break;
6909 			case IIDMA_RATE_8GB:
6910 				port_param.Speed =
6911 				    EXT_DEF_PORTSPEED_8GBIT;
6912 				break;
6913 			default:
6914 				port_param.Speed =
6915 				    EXT_DEF_PORTSPEED_UNKNOWN;
6916 				EL(ha, "failed, Port speed rate=%xh\n",
6917 				    idma_rate);
6918 				break;
6919 			}
6920 
6921 			/* Copy back the data */
6922 			rval = ddi_copyout((void *)&port_param,
6923 			    (void *)(uintptr_t)cmd->ResponseAdr,
6924 			    sizeof (EXT_PORT_PARAM), mode);
6925 
6926 			if (rval != 0) {
6927 				cmd->Status = EXT_STATUS_COPY_ERR;
6928 				cmd->ResponseLen = 0;
6929 				EL(ha, "failed, ddi_copyout\n");
6930 			} else {
6931 				cmd->ResponseLen = (uint32_t)
6932 				    sizeof (EXT_PORT_PARAM);
6933 			}
6934 		}
6935 		break;
6936 
6937 	case EXT_IIDMA_MODE_SET:
6938 		/*
6939 		 * Set the firmware's port rate for the wwpn
6940 		 */
6941 		switch (port_param.Speed) {
6942 		case EXT_DEF_PORTSPEED_1GBIT:
6943 			idma_rate = IIDMA_RATE_1GB;
6944 			break;
6945 		case EXT_DEF_PORTSPEED_2GBIT:
6946 			idma_rate = IIDMA_RATE_2GB;
6947 			break;
6948 		case EXT_DEF_PORTSPEED_4GBIT:
6949 			idma_rate = IIDMA_RATE_4GB;
6950 			break;
6951 		case EXT_DEF_PORTSPEED_8GBIT:
6952 			idma_rate = IIDMA_RATE_8GB;
6953 			break;
6954 		default:
6955 			EL(ha, "invalid set iidma rate: %x\n",
6956 			    port_param.Speed);
6957 			cmd->Status = EXT_STATUS_INVALID_PARAM;
6958 			cmd->ResponseLen = 0;
6959 			rval = QL_PARAMETER_ERROR;
6960 			break;
6961 		}
6962 
6963 		if (rval == QL_SUCCESS) {
6964 			rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
6965 			    port_param.Mode);
6966 			if (rval != QL_SUCCESS) {
6967 				EL(ha, "iidma set failed: %xh\n", rval);
6968 				cmd->Status = EXT_STATUS_MAILBOX;
6969 				cmd->DetailStatus = rval;
6970 				cmd->ResponseLen = 0;
6971 			}
6972 		}
6973 		break;
6974 	default:
6975 		EL(ha, "invalid mode specified: %x\n", port_param.Mode);
6976 		cmd->Status = EXT_STATUS_INVALID_PARAM;
6977 		cmd->ResponseLen = 0;
6978 		cmd->DetailStatus = 0;
6979 		break;
6980 	}
6981 
6982 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6983 }
6984 
6985 /*
6986  * ql_get_fwexttrace
6987  *	Dumps f/w extended trace buffer
6988  *
6989  * Input:
6990  *	ha:	adapter state pointer.
6991  *	bp:	buffer address.
6992  *	mode:	flags
6993  *
6994  * Returns:
6995  *
6996  * Context:
6997  *	Kernel context.
6998  */
6999 /* ARGSUSED */
7000 static void
7001 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7002 {
7003 	int	rval;
7004 	caddr_t	payload;
7005 
7006 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7007 
7008 	if (CFG_IST(ha, CFG_CTRL_2425) == 0) {
7009 		EL(ha, "invalid request for this HBA\n");
7010 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7011 		cmd->ResponseLen = 0;
7012 		return;
7013 	}
7014 
7015 	if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
7016 	    (ha->fwexttracebuf.bp == NULL)) {
7017 		EL(ha, "f/w extended trace is not enabled\n");
7018 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7019 		cmd->ResponseLen = 0;
7020 		return;
7021 	}
7022 
7023 	if (cmd->ResponseLen < FWEXTSIZE) {
7024 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7025 		cmd->DetailStatus = FWEXTSIZE;
7026 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n",
7027 		    cmd->ResponseLen, FWEXTSIZE);
7028 		cmd->ResponseLen = 0;
7029 		return;
7030 	}
7031 
7032 	/* Time Stamp */
7033 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP);
7034 	if (rval != QL_SUCCESS) {
7035 		EL(ha, "f/w extended trace insert"
7036 		    "time stamp failed: %xh\n", rval);
7037 		cmd->Status = EXT_STATUS_ERR;
7038 		cmd->ResponseLen = 0;
7039 		return;
7040 	}
7041 
7042 	/* Disable Tracing */
7043 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE);
7044 	if (rval != QL_SUCCESS) {
7045 		EL(ha, "f/w extended trace disable failed: %xh\n", rval);
7046 		cmd->Status = EXT_STATUS_ERR;
7047 		cmd->ResponseLen = 0;
7048 		return;
7049 	}
7050 
7051 	/* Allocate payload buffer */
7052 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7053 	if (payload == NULL) {
7054 		EL(ha, "failed, kmem_zalloc\n");
7055 		cmd->Status = EXT_STATUS_NO_MEMORY;
7056 		cmd->ResponseLen = 0;
7057 		return;
7058 	}
7059 
7060 	/* Sync DMA buffer. */
7061 	(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
7062 	    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
7063 
7064 	/* Copy trace buffer data. */
7065 	ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload,
7066 	    (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE,
7067 	    DDI_DEV_AUTOINCR);
7068 
7069 	/* Send payload to application. */
7070 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7071 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7072 		EL(ha, "failed, send_buffer_data\n");
7073 		cmd->Status = EXT_STATUS_COPY_ERR;
7074 		cmd->ResponseLen = 0;
7075 	} else {
7076 		cmd->Status = EXT_STATUS_OK;
7077 	}
7078 
7079 	kmem_free(payload, FWEXTSIZE);
7080 }
7081 
7082 /*
7083  * ql_get_fwfcetrace
7084  *	Dumps f/w fibre channel event trace buffer
7085  *
7086  * Input:
7087  *	ha:	adapter state pointer.
7088  *	bp:	buffer address.
7089  *	mode:	flags
7090  *
7091  * Returns:
7092  *
7093  * Context:
7094  *	Kernel context.
7095  */
7096 /* ARGSUSED */
7097 static void
7098 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7099 {
7100 	int	rval;
7101 	caddr_t	payload;
7102 
7103 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7104 
7105 	if (CFG_IST(ha, CFG_CTRL_2425) == 0) {
7106 		EL(ha, "invalid request for this HBA\n");
7107 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7108 		cmd->ResponseLen = 0;
7109 		return;
7110 	}
7111 
7112 	if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
7113 	    (ha->fwfcetracebuf.bp == NULL)) {
7114 		EL(ha, "f/w FCE trace is not enabled\n");
7115 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7116 		cmd->ResponseLen = 0;
7117 		return;
7118 	}
7119 
7120 	if (cmd->ResponseLen < FWFCESIZE) {
7121 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7122 		cmd->DetailStatus = FWFCESIZE;
7123 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n",
7124 		    cmd->ResponseLen, FWFCESIZE);
7125 		cmd->ResponseLen = 0;
7126 		return;
7127 	}
7128 
7129 	/* Disable Tracing */
7130 	rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE);
7131 	if (rval != QL_SUCCESS) {
7132 		EL(ha, "f/w FCE trace disable failed: %xh\n", rval);
7133 		cmd->Status = EXT_STATUS_ERR;
7134 		cmd->ResponseLen = 0;
7135 		return;
7136 	}
7137 
7138 	/* Allocate payload buffer */
7139 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7140 	if (payload == NULL) {
7141 		EL(ha, "failed, kmem_zalloc\n");
7142 		cmd->Status = EXT_STATUS_NO_MEMORY;
7143 		cmd->ResponseLen = 0;
7144 		return;
7145 	}
7146 
7147 	/* Sync DMA buffer. */
7148 	(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
7149 	    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
7150 
7151 	/* Copy trace buffer data. */
7152 	ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)payload,
7153 	    (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE,
7154 	    DDI_DEV_AUTOINCR);
7155 
7156 	/* Send payload to application. */
7157 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7158 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7159 		EL(ha, "failed, send_buffer_data\n");
7160 		cmd->Status = EXT_STATUS_COPY_ERR;
7161 		cmd->ResponseLen = 0;
7162 	} else {
7163 		cmd->Status = EXT_STATUS_OK;
7164 	}
7165 
7166 	kmem_free(payload, FWFCESIZE);
7167 }
7168 
7169 /*
7170  * ql_get_pci_data
7171  *	Retrieves pci config space data
7172  *
7173  * Input:
7174  *	ha:	adapter state pointer.
7175  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7176  *	mode:	flags.
7177  *
7178  * Returns:
7179  *	None, request status indicated in cmd->Status.
7180  *
7181  * Context:
7182  *	Kernel context.
7183  *
7184  */
7185 static void
7186 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7187 {
7188 	uint8_t		cap_ptr;
7189 	uint8_t		cap_id;
7190 	uint32_t	buf_size = 256;
7191 
7192 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7193 
7194 	/*
7195 	 * First check the "Capabilities List" bit of the status register.
7196 	 */
7197 	if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
7198 		/*
7199 		 * Now get the capability pointer
7200 		 */
7201 		cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
7202 		while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
7203 			/*
7204 			 * Check for the pcie capability.
7205 			 */
7206 			cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
7207 			if (cap_id == PCI_CAP_ID_PCI_E) {
7208 				buf_size = 4096;
7209 				break;
7210 			}
7211 			cap_ptr = (uint8_t)ql_pci_config_get8(ha,
7212 			    (cap_ptr + PCI_CAP_NEXT_PTR));
7213 		}
7214 	}
7215 
7216 	if (cmd->ResponseLen < buf_size) {
7217 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7218 		cmd->DetailStatus = buf_size;
7219 		EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
7220 		    cmd->ResponseLen);
7221 		return;
7222 	}
7223 
7224 	/* Dump PCI config data. */
7225 	if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7226 	    buf_size, mode)) != 0) {
7227 		cmd->Status = EXT_STATUS_COPY_ERR;
7228 		cmd->DetailStatus = 0;
7229 		EL(ha, "failed, copy err pci_dump\n");
7230 	} else {
7231 		cmd->Status = EXT_STATUS_OK;
7232 		cmd->DetailStatus = buf_size;
7233 	}
7234 
7235 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7236 }
7237 
7238 /*
7239  * ql_pci_dump
7240  *	Dumps PCI config data to application buffer.
7241  *
7242  * Input:
7243  *	ha = adapter state pointer.
7244  *	bp = user buffer address.
7245  *
7246  * Returns:
7247  *
7248  * Context:
7249  *	Kernel context.
7250  */
7251 int
7252 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
7253 {
7254 	uint32_t	pci_os;
7255 	uint32_t	*ptr32, *org_ptr32;
7256 
7257 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7258 
7259 	ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
7260 	if (ptr32 == NULL) {
7261 		EL(ha, "failed kmem_zalloc\n");
7262 		return (ENOMEM);
7263 	}
7264 
7265 	/* store the initial value of ptr32 */
7266 	org_ptr32 = ptr32;
7267 	for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
7268 		*ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
7269 		LITTLE_ENDIAN_32(ptr32);
7270 		ptr32++;
7271 	}
7272 
7273 	if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
7274 	    0) {
7275 		EL(ha, "failed ddi_copyout\n");
7276 		kmem_free(org_ptr32, pci_size);
7277 		return (EFAULT);
7278 	}
7279 
7280 	QL_DUMP_9(org_ptr32, 8, pci_size);
7281 
7282 	kmem_free(org_ptr32, pci_size);
7283 
7284 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7285 
7286 	return (0);
7287 }
7288 
7289 /*
7290  * ql_menlo_reset
7291  *	Reset Menlo
7292  *
7293  * Input:
7294  *	ha:	adapter state pointer.
7295  *	bp:	buffer address.
7296  *	mode:	flags
7297  *
7298  * Returns:
7299  *
7300  * Context:
7301  *	Kernel context.
7302  */
7303 static void
7304 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7305 {
7306 	MENLO_RESET	rst;
7307 	ql_mbx_data_t	mr;
7308 	int		rval;
7309 
7310 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7311 
7312 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7313 		EL(ha, "failed, invalid request for HBA\n");
7314 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7315 		cmd->ResponseLen = 0;
7316 		return;
7317 	}
7318 
7319 	/*
7320 	 * TODO: only vp_index 0 can do this (?)
7321 	 */
7322 
7323 
7324 	/*  Verify the size of request structure. */
7325 	if (cmd->RequestLen < sizeof (MENLO_RESET)) {
7326 		/* Return error */
7327 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7328 		    sizeof (MENLO_RESET));
7329 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7330 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7331 		cmd->ResponseLen = 0;
7332 		return;
7333 	}
7334 
7335 	/* Get reset request. */
7336 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7337 	    (void *)&rst, sizeof (MENLO_RESET), mode) != 0) {
7338 		EL(ha, "failed, ddi_copyin\n");
7339 		cmd->Status = EXT_STATUS_COPY_ERR;
7340 		cmd->ResponseLen = 0;
7341 		return;
7342 	}
7343 
7344 	/* Wait for I/O to stop and daemon to stall. */
7345 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
7346 		EL(ha, "ql_stall_driver failed\n");
7347 		ql_restart_hba(ha);
7348 		cmd->Status = EXT_STATUS_BUSY;
7349 		cmd->ResponseLen = 0;
7350 		return;
7351 	}
7352 
7353 	rval = ql_reset_menlo(ha, &mr, rst.Flags);
7354 	if (rval != QL_SUCCESS) {
7355 		EL(ha, "failed, status=%xh\n", rval);
7356 		cmd->Status = EXT_STATUS_MAILBOX;
7357 		cmd->DetailStatus = rval;
7358 		cmd->ResponseLen = 0;
7359 	} else if (mr.mb[1] != 0) {
7360 		EL(ha, "failed, substatus=%d\n", mr.mb[1]);
7361 		cmd->Status = EXT_STATUS_ERR;
7362 		cmd->DetailStatus = mr.mb[1];
7363 		cmd->ResponseLen = 0;
7364 	}
7365 
7366 	ql_restart_hba(ha);
7367 
7368 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7369 }
7370 
7371 /*
7372  * ql_menlo_get_fw_version
7373  *	Get Menlo firmware version.
7374  *
7375  * Input:
7376  *	ha:	adapter state pointer.
7377  *	bp:	buffer address.
7378  *	mode:	flags
7379  *
7380  * Returns:
7381  *
7382  * Context:
7383  *	Kernel context.
7384  */
7385 static void
7386 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7387 {
7388 	int			rval;
7389 	ql_mbx_iocb_t		*pkt;
7390 	MENLO_GET_FW_VERSION	ver = {0};
7391 
7392 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7393 
7394 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7395 		EL(ha, "failed, invalid request for HBA\n");
7396 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7397 		cmd->ResponseLen = 0;
7398 		return;
7399 	}
7400 
7401 	if (cmd->ResponseLen < sizeof (MENLO_GET_FW_VERSION)) {
7402 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7403 		cmd->DetailStatus = sizeof (MENLO_GET_FW_VERSION);
7404 		EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
7405 		    sizeof (MENLO_GET_FW_VERSION));
7406 		cmd->ResponseLen = 0;
7407 		return;
7408 	}
7409 
7410 	/* Allocate packet. */
7411 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7412 	if (pkt == NULL) {
7413 		EL(ha, "failed, kmem_zalloc\n");
7414 		cmd->Status = EXT_STATUS_NO_MEMORY;
7415 		cmd->ResponseLen = 0;
7416 		return;
7417 	}
7418 
7419 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
7420 	pkt->mvfy.entry_count = 1;
7421 	pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW);
7422 
7423 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7424 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
7425 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
7426 	ver.FwVersion = LE_32(pkt->mvfy.fw_version);
7427 
7428 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
7429 	    pkt->mvfy.options_status != CS_COMPLETE) {
7430 		/* Command error */
7431 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7432 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
7433 		    pkt->mvfy.failure_code);
7434 		cmd->Status = EXT_STATUS_ERR;
7435 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7436 		    QL_FUNCTION_FAILED;
7437 		cmd->ResponseLen = 0;
7438 	} else if (ddi_copyout((void *)&ver,
7439 	    (void *)(uintptr_t)cmd->ResponseAdr,
7440 	    sizeof (MENLO_GET_FW_VERSION), mode) != 0) {
7441 		EL(ha, "failed, ddi_copyout\n");
7442 		cmd->Status = EXT_STATUS_COPY_ERR;
7443 		cmd->ResponseLen = 0;
7444 	} else {
7445 		cmd->ResponseLen = sizeof (MENLO_GET_FW_VERSION);
7446 	}
7447 
7448 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7449 
7450 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7451 }
7452 
7453 /*
7454  * ql_menlo_update_fw
7455  *	Get Menlo update firmware.
7456  *
7457  * Input:
7458  *	ha:	adapter state pointer.
7459  *	bp:	buffer address.
7460  *	mode:	flags
7461  *
7462  * Returns:
7463  *
7464  * Context:
7465  *	Kernel context.
7466  */
7467 static void
7468 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7469 {
7470 	ql_mbx_iocb_t		*pkt;
7471 	dma_mem_t		*dma_mem;
7472 	MENLO_UPDATE_FW		fw;
7473 	uint32_t		*ptr32;
7474 	int			rval;
7475 
7476 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7477 
7478 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7479 		EL(ha, "failed, invalid request for HBA\n");
7480 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7481 		cmd->ResponseLen = 0;
7482 		return;
7483 	}
7484 
7485 	/*
7486 	 * TODO: only vp_index 0 can do this (?)
7487 	 */
7488 
7489 	/*  Verify the size of request structure. */
7490 	if (cmd->RequestLen < sizeof (MENLO_UPDATE_FW)) {
7491 		/* Return error */
7492 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7493 		    sizeof (MENLO_UPDATE_FW));
7494 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7495 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7496 		cmd->ResponseLen = 0;
7497 		return;
7498 	}
7499 
7500 	/* Get update fw request. */
7501 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw,
7502 	    sizeof (MENLO_UPDATE_FW), mode) != 0) {
7503 		EL(ha, "failed, ddi_copyin\n");
7504 		cmd->Status = EXT_STATUS_COPY_ERR;
7505 		cmd->ResponseLen = 0;
7506 		return;
7507 	}
7508 
7509 	/* Wait for I/O to stop and daemon to stall. */
7510 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
7511 		EL(ha, "ql_stall_driver failed\n");
7512 		ql_restart_hba(ha);
7513 		cmd->Status = EXT_STATUS_BUSY;
7514 		cmd->ResponseLen = 0;
7515 		return;
7516 	}
7517 
7518 	/* Allocate packet. */
7519 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
7520 	if (dma_mem == NULL) {
7521 		EL(ha, "failed, kmem_zalloc\n");
7522 		cmd->Status = EXT_STATUS_NO_MEMORY;
7523 		cmd->ResponseLen = 0;
7524 		return;
7525 	}
7526 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7527 	if (pkt == NULL) {
7528 		EL(ha, "failed, kmem_zalloc\n");
7529 		kmem_free(dma_mem, sizeof (dma_mem_t));
7530 		ql_restart_hba(ha);
7531 		cmd->Status = EXT_STATUS_NO_MEMORY;
7532 		cmd->ResponseLen = 0;
7533 		return;
7534 	}
7535 
7536 	/* Get DMA memory for the IOCB */
7537 	if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
7538 	    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
7539 		cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
7540 		    "alloc failed", QL_NAME, ha->instance);
7541 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7542 		kmem_free(dma_mem, sizeof (dma_mem_t));
7543 		ql_restart_hba(ha);
7544 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
7545 		cmd->ResponseLen = 0;
7546 		return;
7547 	}
7548 
7549 	/* Get firmware data. */
7550 	if (ql_get_buffer_data((caddr_t)fw.pFwDataBytes, dma_mem->bp,
7551 	    fw.TotalByteCount, mode) != fw.TotalByteCount) {
7552 		EL(ha, "failed, get_buffer_data\n");
7553 		ql_free_dma_resource(ha, dma_mem);
7554 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7555 		kmem_free(dma_mem, sizeof (dma_mem_t));
7556 		ql_restart_hba(ha);
7557 		cmd->Status = EXT_STATUS_COPY_ERR;
7558 		cmd->ResponseLen = 0;
7559 		return;
7560 	}
7561 
7562 	/* Sync DMA buffer. */
7563 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
7564 	    DDI_DMA_SYNC_FORDEV);
7565 
7566 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
7567 	pkt->mvfy.entry_count = 1;
7568 	pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
7569 	ptr32 = dma_mem->bp;
7570 	pkt->mvfy.fw_version = LE_32(ptr32[2]);
7571 	pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
7572 	pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
7573 	pkt->mvfy.dseg_count = LE_16(1);
7574 	pkt->mvfy.dseg_0_address[0] = (uint32_t)
7575 	    LE_32(LSD(dma_mem->cookie.dmac_laddress));
7576 	pkt->mvfy.dseg_0_address[1] = (uint32_t)
7577 	    LE_32(MSD(dma_mem->cookie.dmac_laddress));
7578 	pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount);
7579 
7580 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7581 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
7582 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
7583 
7584 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
7585 	    pkt->mvfy.options_status != CS_COMPLETE) {
7586 		/* Command error */
7587 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7588 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
7589 		    pkt->mvfy.failure_code);
7590 		cmd->Status = EXT_STATUS_ERR;
7591 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7592 		    QL_FUNCTION_FAILED;
7593 		cmd->ResponseLen = 0;
7594 	}
7595 
7596 	ql_free_dma_resource(ha, dma_mem);
7597 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7598 	kmem_free(dma_mem, sizeof (dma_mem_t));
7599 	ql_restart_hba(ha);
7600 
7601 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7602 }
7603 
7604 /*
7605  * ql_menlo_manage_info
7606  *	Get Menlo manage info.
7607  *
7608  * Input:
7609  *	ha:	adapter state pointer.
7610  *	bp:	buffer address.
7611  *	mode:	flags
7612  *
7613  * Returns:
7614  *
7615  * Context:
7616  *	Kernel context.
7617  */
7618 static void
7619 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7620 {
7621 	ql_mbx_iocb_t		*pkt;
7622 	dma_mem_t		*dma_mem = NULL;
7623 	MENLO_MANAGE_INFO	info;
7624 	int			rval;
7625 
7626 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7627 
7628 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7629 		EL(ha, "failed, invalid request for HBA\n");
7630 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7631 		cmd->ResponseLen = 0;
7632 		return;
7633 	}
7634 
7635 	/*  Verify the size of request structure. */
7636 	if (cmd->RequestLen < sizeof (MENLO_MANAGE_INFO)) {
7637 		/* Return error */
7638 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7639 		    sizeof (MENLO_MANAGE_INFO));
7640 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7641 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7642 		cmd->ResponseLen = 0;
7643 		return;
7644 	}
7645 
7646 	/* Get manage info request. */
7647 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
7648 	    (caddr_t)&info, sizeof (MENLO_MANAGE_INFO), mode) != 0) {
7649 		EL(ha, "failed, ddi_copyin\n");
7650 		cmd->Status = EXT_STATUS_COPY_ERR;
7651 		cmd->ResponseLen = 0;
7652 		return;
7653 	}
7654 
7655 	/* Allocate packet. */
7656 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7657 	if (pkt == NULL) {
7658 		EL(ha, "failed, kmem_zalloc\n");
7659 		ql_restart_driver(ha);
7660 		cmd->Status = EXT_STATUS_NO_MEMORY;
7661 		cmd->ResponseLen = 0;
7662 		return;
7663 	}
7664 
7665 	pkt->mdata.entry_type = MENLO_DATA_TYPE;
7666 	pkt->mdata.entry_count = 1;
7667 	pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
7668 
7669 	/* Get DMA memory for the IOCB */
7670 	if (info.Operation == MENLO_OP_READ_MEM ||
7671 	    info.Operation == MENLO_OP_WRITE_MEM) {
7672 		pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
7673 		pkt->mdata.parameter_1 =
7674 		    LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
7675 		dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
7676 		    KM_SLEEP);
7677 		if (dma_mem == NULL) {
7678 			EL(ha, "failed, kmem_zalloc\n");
7679 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7680 			cmd->Status = EXT_STATUS_NO_MEMORY;
7681 			cmd->ResponseLen = 0;
7682 			return;
7683 		}
7684 		if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
7685 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
7686 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
7687 			    "alloc failed", QL_NAME, ha->instance);
7688 			kmem_free(dma_mem, sizeof (dma_mem_t));
7689 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7690 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
7691 			cmd->ResponseLen = 0;
7692 			return;
7693 		}
7694 		if (info.Operation == MENLO_OP_WRITE_MEM) {
7695 			/* Get data. */
7696 			if (ql_get_buffer_data((caddr_t)info.pDataBytes,
7697 			    dma_mem->bp, info.TotalByteCount, mode) !=
7698 			    info.TotalByteCount) {
7699 				EL(ha, "failed, get_buffer_data\n");
7700 				ql_free_dma_resource(ha, dma_mem);
7701 				kmem_free(dma_mem, sizeof (dma_mem_t));
7702 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7703 				cmd->Status = EXT_STATUS_COPY_ERR;
7704 				cmd->ResponseLen = 0;
7705 				return;
7706 			}
7707 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
7708 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
7709 		}
7710 		pkt->mdata.dseg_count = LE_16(1);
7711 		pkt->mdata.dseg_0_address[0] = (uint32_t)
7712 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
7713 		pkt->mdata.dseg_0_address[1] = (uint32_t)
7714 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
7715 		pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount);
7716 	} else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
7717 		pkt->mdata.parameter_1 =
7718 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
7719 		pkt->mdata.parameter_2 =
7720 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
7721 		pkt->mdata.parameter_3 =
7722 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
7723 	} else if (info.Operation & MENLO_OP_GET_INFO) {
7724 		pkt->mdata.parameter_1 =
7725 		    LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
7726 		pkt->mdata.parameter_2 =
7727 		    LE_32(info.Parameters.ap.MenloInfo.InfoContext);
7728 	}
7729 
7730 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7731 	LITTLE_ENDIAN_16(&pkt->mdata.options_status);
7732 	LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
7733 
7734 	if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
7735 	    pkt->mdata.options_status != CS_COMPLETE) {
7736 		/* Command error */
7737 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7738 		    pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status,
7739 		    pkt->mdata.failure_code);
7740 		cmd->Status = EXT_STATUS_ERR;
7741 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7742 		    QL_FUNCTION_FAILED;
7743 		cmd->ResponseLen = 0;
7744 	} else if (info.Operation == MENLO_OP_READ_MEM) {
7745 		(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
7746 		    DDI_DMA_SYNC_FORKERNEL);
7747 		if (ql_send_buffer_data((caddr_t)info.pDataBytes, dma_mem->bp,
7748 		    info.TotalByteCount, mode) != info.TotalByteCount) {
7749 			cmd->Status = EXT_STATUS_COPY_ERR;
7750 			cmd->ResponseLen = 0;
7751 		}
7752 	}
7753 
7754 	ql_free_dma_resource(ha, dma_mem);
7755 	kmem_free(dma_mem, sizeof (dma_mem_t));
7756 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7757 
7758 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7759 }
7760 
7761 /*
7762  * ql_suspend_hba
7763  *	Suspends all adapter ports.
7764  *
7765  * Input:
7766  *	ha:		adapter state pointer.
7767  *	options:	BIT_0 --> leave driver stalled on exit if
7768  *				  failed.
7769  *
7770  * Returns:
7771  *	qla local function return status code.
7772  *
7773  * Context:
7774  *	Kernel context.
7775  */
7776 static int
7777 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
7778 {
7779 	ql_adapter_state_t	*ha2;
7780 	ql_link_t		*link;
7781 	int			rval = QL_SUCCESS;
7782 
7783 	/* Quiesce I/O on all adapter ports */
7784 	for (link = ql_hba.first; link != NULL; link = link->next) {
7785 		ha2 = link->base_address;
7786 
7787 		if (ha2->fru_hba_index != ha->fru_hba_index) {
7788 			continue;
7789 		}
7790 
7791 		if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) {
7792 			EL(ha, "ql_stall_driver status=%xh\n", rval);
7793 			break;
7794 		}
7795 	}
7796 
7797 	return (rval);
7798 }
7799 
7800 /*
7801  * ql_restart_hba
7802  *	Restarts adapter.
7803  *
7804  * Input:
7805  *	ha:	adapter state pointer.
7806  *
7807  * Context:
7808  *	Kernel context.
7809  */
7810 static void
7811 ql_restart_hba(ql_adapter_state_t *ha)
7812 {
7813 	ql_adapter_state_t	*ha2;
7814 	ql_link_t		*link;
7815 
7816 	/* Resume I/O on all adapter ports */
7817 	for (link = ql_hba.first; link != NULL; link = link->next) {
7818 		ha2 = link->base_address;
7819 
7820 		if (ha2->fru_hba_index != ha->fru_hba_index) {
7821 			continue;
7822 		}
7823 
7824 		ql_restart_driver(ha2);
7825 	}
7826 }
7827 
7828 /*
7829  * ql_get_vp_cnt_id
7830  *	Retrieves pci config space data
7831  *
7832  * Input:
7833  *	ha:	adapter state pointer.
7834  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7835  *	mode:	flags.
7836  *
7837  * Returns:
7838  *	None, request status indicated in cmd->Status.
7839  *
7840  * Context:
7841  *	Kernel context.
7842  *
7843  */
7844 static void
7845 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7846 {
7847 	ql_adapter_state_t	*vha;
7848 	EXT_VPORT_ID_CNT	tmp_vp = {0};
7849 	int			id = 0;
7850 	int			rval;
7851 
7852 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7853 
7854 	if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT)) {
7855 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7856 		cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
7857 		EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
7858 		    cmd->ResponseLen);
7859 		cmd->ResponseLen = 0;
7860 		return;
7861 	}
7862 
7863 	vha = ha->vp_next;
7864 	while (vha != NULL) {
7865 		tmp_vp.VpCnt++;
7866 		tmp_vp.VpId[id] = vha->vp_index;
7867 		id++;
7868 		vha = vha->vp_next;
7869 	}
7870 	rval = ddi_copyout((void *)&tmp_vp,
7871 	    (void *)(uintptr_t)(cmd->ResponseAdr),
7872 	    sizeof (EXT_VPORT_ID_CNT), mode);
7873 	if (rval != 0) {
7874 		cmd->Status = EXT_STATUS_COPY_ERR;
7875 		cmd->ResponseLen = 0;
7876 		EL(ha, "failed, ddi_copyout\n");
7877 	} else {
7878 		cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
7879 		QL_PRINT_9(CE_CONT, "(%d): exiting, vport_cnt=%d\n",
7880 		    ha->instance, tmp_vp.VpCnt);
7881 	}
7882 
7883 }
7884 
7885 /*
7886  * ql_vp_ioctl
7887  *	Performs all EXT_CC_VPORT_CMD functions.
7888  *
7889  * Input:
7890  *	ha:	adapter state pointer.
7891  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7892  *	mode:	flags.
7893  *
7894  * Returns:
7895  *	None, request status indicated in cmd->Status.
7896  *
7897  * Context:
7898  *	Kernel context.
7899  */
7900 static void
7901 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7902 {
7903 	QL_PRINT_9(CE_CONT, "(%d): entered, cmd=%d\n", ha->instance,
7904 	    cmd->SubCode);
7905 
7906 	/* case off on command subcode */
7907 	switch (cmd->SubCode) {
7908 	case EXT_VF_SC_VPORT_GETINFO:
7909 		ql_qry_vport(ha, cmd, mode);
7910 		break;
7911 	default:
7912 		/* function not supported. */
7913 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
7914 		EL(ha, "failed, Unsupported Subcode=%xh\n",
7915 		    cmd->SubCode);
7916 		break;
7917 	}
7918 
7919 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7920 }
7921 
7922 /*
7923  * ql_qry_vport
7924  *	Performs EXT_VF_SC_VPORT_GETINFO subfunction.
7925  *
7926  * Input:
7927  *	ha:	adapter state pointer.
7928  *	cmd:	EXT_IOCTL cmd struct pointer.
7929  *	mode:	flags.
7930  *
7931  * Returns:
7932  *	None, request status indicated in cmd->Status.
7933  *
7934  * Context:
7935  *	Kernel context.
7936  */
7937 static void
7938 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
7939 {
7940 	ql_adapter_state_t	*tmp_vha;
7941 	EXT_VPORT_INFO		tmp_vport = {0};
7942 	int			max_vport;
7943 
7944 	QL_PRINT_9(CE_CONT, "(%d): entered\n", vha->instance);
7945 
7946 	if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
7947 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7948 		cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
7949 		EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
7950 		    cmd->ResponseLen);
7951 		cmd->ResponseLen = 0;
7952 		return;
7953 	}
7954 
7955 	/* Fill in the vport information. */
7956 	bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
7957 	    EXT_DEF_WWN_NAME_SIZE);
7958 	bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
7959 	    EXT_DEF_WWN_NAME_SIZE);
7960 	tmp_vport.state = vha->state;
7961 
7962 	tmp_vha = vha->pha->vp_next;
7963 	while (tmp_vha != NULL) {
7964 		tmp_vport.used++;
7965 		tmp_vha = tmp_vha->vp_next;
7966 	}
7967 
7968 	max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS :
7969 	    MAX_25_VIRTUAL_PORTS);
7970 	if (max_vport > tmp_vport.used) {
7971 		tmp_vport.free = max_vport - tmp_vport.used;
7972 	}
7973 
7974 	if (ddi_copyout((void *)&tmp_vport,
7975 	    (void *)(uintptr_t)(cmd->ResponseAdr),
7976 	    sizeof (EXT_VPORT_INFO), mode) != 0) {
7977 		cmd->Status = EXT_STATUS_COPY_ERR;
7978 		cmd->ResponseLen = 0;
7979 		EL(vha, "failed, ddi_copyout\n");
7980 	} else {
7981 		cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
7982 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", vha->instance);
7983 	}
7984 }
7985