1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 #pragma ident	"Copyright 2010 QLogic Corporation; ql_xioctl.c"
29 
30 /*
31  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32  *
33  * ***********************************************************************
34  * *									**
35  * *				NOTICE					**
36  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
37  * *			ALL RIGHTS RESERVED				**
38  * *									**
39  * ***********************************************************************
40  *
41  */
42 
43 #include <ql_apps.h>
44 #include <ql_api.h>
45 #include <ql_debug.h>
46 #include <ql_init.h>
47 #include <ql_iocb.h>
48 #include <ql_ioctl.h>
49 #include <ql_mbx.h>
50 #include <ql_xioctl.h>
51 
52 /*
53  * Local data
54  */
55 
56 /*
57  * Local prototypes
58  */
59 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
60 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
61     boolean_t (*)(EXT_IOCTL *));
62 static boolean_t ql_validate_signature(EXT_IOCTL *);
63 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
64 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
65 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
66 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
67 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int);
90 
91 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
92 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
93 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
94 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
95     uint8_t);
96 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
97 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
98 static int ql_24xx_flash_desc(ql_adapter_state_t *);
99 static int ql_setup_flash(ql_adapter_state_t *);
100 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
101 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
102 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
103     uint32_t, int);
104 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
105     uint8_t);
106 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
107 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
108 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
109 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
110 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
113 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
115 static uint32_t ql_setup_led(ql_adapter_state_t *);
116 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
117 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
118 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
119 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
120 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
121 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
122 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
123 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
124 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
125 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
126 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
127 static void ql_process_flt(ql_adapter_state_t *, uint32_t);
128 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
129 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
130 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
131 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
132 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
134 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
135 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
138 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
139 static void ql_restart_hba(ql_adapter_state_t *);
140 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
141 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
143 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
144 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
145 static void ql_update_flash_caches(ql_adapter_state_t *);
146 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
147 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
148 static void ql_get_fcf_list(ql_adapter_state_t *, EXT_IOCTL *, int);
149 static void ql_get_resource_counts(ql_adapter_state_t *, EXT_IOCTL *, int);
150 static void ql_qry_adapter_versions(ql_adapter_state_t *, EXT_IOCTL *, int);
151 static int ql_set_loop_point(ql_adapter_state_t *, uint16_t);
152 
153 /* ******************************************************************** */
154 /*			External IOCTL support.				*/
155 /* ******************************************************************** */
156 
157 /*
158  * ql_alloc_xioctl_resource
159  *	Allocates resources needed by module code.
160  *
161  * Input:
162  *	ha:		adapter state pointer.
163  *
164  * Returns:
165  *	SYS_ERRNO
166  *
167  * Context:
168  *	Kernel context.
169  */
170 int
171 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
172 {
173 	ql_xioctl_t	*xp;
174 
175 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
176 
177 	if (ha->xioctl != NULL) {
178 		QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
179 		    ha->instance);
180 		return (0);
181 	}
182 
183 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
184 	if (xp == NULL) {
185 		EL(ha, "failed, kmem_zalloc\n");
186 		return (ENOMEM);
187 	}
188 	ha->xioctl = xp;
189 
190 	/* Allocate AEN tracking buffer */
191 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
192 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
193 	if (xp->aen_tracking_queue == NULL) {
194 		EL(ha, "failed, kmem_zalloc-2\n");
195 		ql_free_xioctl_resource(ha);
196 		return (ENOMEM);
197 	}
198 
199 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
200 
201 	return (0);
202 }
203 
204 /*
205  * ql_free_xioctl_resource
206  *	Frees resources used by module code.
207  *
208  * Input:
209  *	ha:		adapter state pointer.
210  *
211  * Context:
212  *	Kernel context.
213  */
214 void
215 ql_free_xioctl_resource(ql_adapter_state_t *ha)
216 {
217 	ql_xioctl_t	*xp = ha->xioctl;
218 
219 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
220 
221 	if (xp == NULL) {
222 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
223 		return;
224 	}
225 
226 	if (xp->aen_tracking_queue != NULL) {
227 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
228 		    sizeof (EXT_ASYNC_EVENT));
229 		xp->aen_tracking_queue = NULL;
230 	}
231 
232 	kmem_free(xp, sizeof (ql_xioctl_t));
233 	ha->xioctl = NULL;
234 
235 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
236 }
237 
238 /*
239  * ql_xioctl
240  *	External IOCTL processing.
241  *
242  * Input:
243  *	ha:	adapter state pointer.
244  *	cmd:	function to perform
245  *	arg:	data type varies with request
246  *	mode:	flags
247  *	cred_p:	credentials pointer
248  *	rval_p:	pointer to result value
249  *
250  * Returns:
251  *	0:		success
252  *	ENXIO:		No such device or address
253  *	ENOPROTOOPT:	Protocol not available
254  *
255  * Context:
256  *	Kernel context.
257  */
258 /* ARGSUSED */
259 int
260 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
261     cred_t *cred_p, int *rval_p)
262 {
263 	int	rval;
264 
265 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
266 
267 	if (ha->xioctl == NULL) {
268 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
269 		return (ENXIO);
270 	}
271 
272 	switch (cmd) {
273 	case EXT_CC_QUERY:
274 	case EXT_CC_SEND_FCCT_PASSTHRU:
275 	case EXT_CC_REG_AEN:
276 	case EXT_CC_GET_AEN:
277 	case EXT_CC_SEND_SCSI_PASSTHRU:
278 	case EXT_CC_WWPN_TO_SCSIADDR:
279 	case EXT_CC_SEND_ELS_RNID:
280 	case EXT_CC_SET_DATA:
281 	case EXT_CC_GET_DATA:
282 	case EXT_CC_HOST_IDX:
283 	case EXT_CC_READ_NVRAM:
284 	case EXT_CC_UPDATE_NVRAM:
285 	case EXT_CC_READ_OPTION_ROM:
286 	case EXT_CC_READ_OPTION_ROM_EX:
287 	case EXT_CC_UPDATE_OPTION_ROM:
288 	case EXT_CC_UPDATE_OPTION_ROM_EX:
289 	case EXT_CC_GET_VPD:
290 	case EXT_CC_SET_VPD:
291 	case EXT_CC_LOOPBACK:
292 	case EXT_CC_GET_FCACHE:
293 	case EXT_CC_GET_FCACHE_EX:
294 	case EXT_CC_HOST_DRVNAME:
295 	case EXT_CC_GET_SFP_DATA:
296 	case EXT_CC_PORT_PARAM:
297 	case EXT_CC_GET_PCI_DATA:
298 	case EXT_CC_GET_FWEXTTRACE:
299 	case EXT_CC_GET_FWFCETRACE:
300 	case EXT_CC_GET_VP_CNT_ID:
301 	case EXT_CC_VPORT_CMD:
302 	case EXT_CC_ACCESS_FLASH:
303 	case EXT_CC_RESET_FW:
304 	case EXT_CC_MENLO_MANAGE_INFO:
305 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
306 		break;
307 	default:
308 		/* function not supported. */
309 		EL(ha, "function=%d not supported\n", cmd);
310 		rval = ENOPROTOOPT;
311 	}
312 
313 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
314 
315 	return (rval);
316 }
317 
318 /*
319  * ql_sdm_ioctl
320  *	Provides ioctl functions for SAN/Device Management functions
321  *	AKA External Ioctl functions.
322  *
323  * Input:
324  *	ha:		adapter state pointer.
325  *	ioctl_code:	ioctl function to perform
326  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
327  *	mode:		flags
328  *
329  * Returns:
330  *	0:	success
331  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
332  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
333  *		copyout of EXT_IOCTL status info failed.
334  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
335  *	EBUSY:	Device busy
336  *
337  * Context:
338  *	Kernel context.
339  */
340 static int
341 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
342 {
343 	EXT_IOCTL		*cmd;
344 	int			rval;
345 	ql_adapter_state_t	*vha;
346 
347 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
348 
349 	/* Copy argument structure (EXT_IOCTL) from application land. */
350 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
351 	    ql_validate_signature)) != 0) {
352 		/*
353 		 * a non-zero value at this time means a problem getting
354 		 * the requested information from application land, just
355 		 * return the error code and hope for the best.
356 		 */
357 		EL(ha, "failed, sdm_setup\n");
358 		return (rval);
359 	}
360 
361 	/*
362 	 * Map the physical ha ptr (which the ioctl is called with)
363 	 * to the virtual ha that the caller is addressing.
364 	 */
365 	if (ha->flags & VP_ENABLED) {
366 		/* Check that it is within range. */
367 		if (cmd->HbaSelect > (CFG_IST(ha, CFG_CTRL_2422) ?
368 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
369 			EL(ha, "Invalid HbaSelect vp index: %xh\n",
370 			    cmd->HbaSelect);
371 			cmd->Status = EXT_STATUS_INVALID_VPINDEX;
372 			cmd->ResponseLen = 0;
373 			return (EFAULT);
374 		}
375 		/*
376 		 * Special case: HbaSelect == 0 is physical ha
377 		 */
378 		if (cmd->HbaSelect != 0) {
379 			vha = ha->vp_next;
380 			while (vha != NULL) {
381 				if (vha->vp_index == cmd->HbaSelect) {
382 					ha = vha;
383 					break;
384 				}
385 				vha = vha->vp_next;
386 			}
387 			/*
388 			 * The specified vp index may be valid(within range)
389 			 * but it's not in the list. Currently this is all
390 			 * we can say.
391 			 */
392 			if (vha == NULL) {
393 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
394 				cmd->ResponseLen = 0;
395 				return (EFAULT);
396 			}
397 		}
398 	}
399 
400 	/*
401 	 * If driver is suspended, stalled, or powered down rtn BUSY
402 	 */
403 	if (ha->flags & ADAPTER_SUSPENDED ||
404 	    ha->task_daemon_flags & DRIVER_STALL ||
405 	    ha->power_level != PM_LEVEL_D0) {
406 		EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
407 		    "driver suspended" :
408 		    (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
409 		    "FCA powered down"));
410 		cmd->Status = EXT_STATUS_BUSY;
411 		cmd->ResponseLen = 0;
412 		rval = EBUSY;
413 
414 		/* Return results to caller */
415 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
416 			EL(ha, "failed, sdm_return\n");
417 			rval = EFAULT;
418 		}
419 		return (rval);
420 	}
421 
422 	switch (ioctl_code) {
423 	case EXT_CC_QUERY_OS:
424 		ql_query(ha, cmd, mode);
425 		break;
426 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
427 		ql_fcct(ha, cmd, mode);
428 		break;
429 	case EXT_CC_REG_AEN_OS:
430 		ql_aen_reg(ha, cmd, mode);
431 		break;
432 	case EXT_CC_GET_AEN_OS:
433 		ql_aen_get(ha, cmd, mode);
434 		break;
435 	case EXT_CC_GET_DATA_OS:
436 		ql_get_host_data(ha, cmd, mode);
437 		break;
438 	case EXT_CC_SET_DATA_OS:
439 		ql_set_host_data(ha, cmd, mode);
440 		break;
441 	case EXT_CC_SEND_ELS_RNID_OS:
442 		ql_send_els_rnid(ha, cmd, mode);
443 		break;
444 	case EXT_CC_SCSI_PASSTHRU_OS:
445 		ql_scsi_passthru(ha, cmd, mode);
446 		break;
447 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
448 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
449 		break;
450 	case EXT_CC_HOST_IDX_OS:
451 		ql_host_idx(ha, cmd, mode);
452 		break;
453 	case EXT_CC_HOST_DRVNAME_OS:
454 		ql_host_drvname(ha, cmd, mode);
455 		break;
456 	case EXT_CC_READ_NVRAM_OS:
457 		ql_read_nvram(ha, cmd, mode);
458 		break;
459 	case EXT_CC_UPDATE_NVRAM_OS:
460 		ql_write_nvram(ha, cmd, mode);
461 		break;
462 	case EXT_CC_READ_OPTION_ROM_OS:
463 	case EXT_CC_READ_OPTION_ROM_EX_OS:
464 		ql_read_flash(ha, cmd, mode);
465 		break;
466 	case EXT_CC_UPDATE_OPTION_ROM_OS:
467 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
468 		ql_write_flash(ha, cmd, mode);
469 		break;
470 	case EXT_CC_LOOPBACK_OS:
471 		ql_diagnostic_loopback(ha, cmd, mode);
472 		break;
473 	case EXT_CC_GET_VPD_OS:
474 		ql_read_vpd(ha, cmd, mode);
475 		break;
476 	case EXT_CC_SET_VPD_OS:
477 		ql_write_vpd(ha, cmd, mode);
478 		break;
479 	case EXT_CC_GET_FCACHE_OS:
480 		ql_get_fcache(ha, cmd, mode);
481 		break;
482 	case EXT_CC_GET_FCACHE_EX_OS:
483 		ql_get_fcache_ex(ha, cmd, mode);
484 		break;
485 	case EXT_CC_GET_SFP_DATA_OS:
486 		ql_get_sfp(ha, cmd, mode);
487 		break;
488 	case EXT_CC_PORT_PARAM_OS:
489 		ql_port_param(ha, cmd, mode);
490 		break;
491 	case EXT_CC_GET_PCI_DATA_OS:
492 		ql_get_pci_data(ha, cmd, mode);
493 		break;
494 	case EXT_CC_GET_FWEXTTRACE_OS:
495 		ql_get_fwexttrace(ha, cmd, mode);
496 		break;
497 	case EXT_CC_GET_FWFCETRACE_OS:
498 		ql_get_fwfcetrace(ha, cmd, mode);
499 		break;
500 	case EXT_CC_MENLO_RESET:
501 		ql_menlo_reset(ha, cmd, mode);
502 		break;
503 	case EXT_CC_MENLO_GET_FW_VERSION:
504 		ql_menlo_get_fw_version(ha, cmd, mode);
505 		break;
506 	case EXT_CC_MENLO_UPDATE_FW:
507 		ql_menlo_update_fw(ha, cmd, mode);
508 		break;
509 	case EXT_CC_MENLO_MANAGE_INFO:
510 		ql_menlo_manage_info(ha, cmd, mode);
511 		break;
512 	case EXT_CC_GET_VP_CNT_ID_OS:
513 		ql_get_vp_cnt_id(ha, cmd, mode);
514 		break;
515 	case EXT_CC_VPORT_CMD_OS:
516 		ql_vp_ioctl(ha, cmd, mode);
517 		break;
518 	case EXT_CC_ACCESS_FLASH_OS:
519 		ql_access_flash(ha, cmd, mode);
520 		break;
521 	case EXT_CC_RESET_FW_OS:
522 		ql_reset_cmd(ha, cmd);
523 		break;
524 	default:
525 		/* function not supported. */
526 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
527 
528 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
529 		cmd->ResponseLen = 0;
530 		break;
531 	}
532 
533 	/* Return results to caller */
534 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
535 		EL(ha, "failed, sdm_return\n");
536 		return (EFAULT);
537 	}
538 
539 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
540 
541 	return (0);
542 }
543 
544 /*
545  * ql_sdm_setup
546  *	Make a local copy of the EXT_IOCTL struct and validate it.
547  *
548  * Input:
549  *	ha:		adapter state pointer.
550  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
551  *	arg:		Address of application EXT_IOCTL cmd data
552  *	mode:		flags
553  *	val_sig:	Pointer to a function to validate the ioctl signature.
554  *
555  * Returns:
556  *	0:		success
557  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
558  *	EINVAL:		Invalid version, signature.
559  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
560  *
561  * Context:
562  *	Kernel context.
563  */
564 static int
565 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
566     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
567 {
568 	int		rval;
569 	EXT_IOCTL	*cmd;
570 
571 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
572 
573 	/* Allocate local memory for EXT_IOCTL. */
574 	*cmd_struct = NULL;
575 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
576 	if (cmd == NULL) {
577 		EL(ha, "failed, kmem_zalloc\n");
578 		return (ENOMEM);
579 	}
580 	/* Get argument structure. */
581 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
582 	if (rval != 0) {
583 		EL(ha, "failed, ddi_copyin\n");
584 		rval = EFAULT;
585 	} else {
586 		/*
587 		 * Check signature and the version.
588 		 * If either are not valid then neither is the
589 		 * structure so don't attempt to return any error status
590 		 * because we can't trust what caller's arg points to.
591 		 * Just return the errno.
592 		 */
593 		if (val_sig(cmd) == 0) {
594 			EL(ha, "failed, signature\n");
595 			rval = EINVAL;
596 		} else if (cmd->Version > EXT_VERSION) {
597 			EL(ha, "failed, version\n");
598 			rval = EINVAL;
599 		}
600 	}
601 
602 	if (rval == 0) {
603 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
604 		*cmd_struct = cmd;
605 		cmd->Status = EXT_STATUS_OK;
606 		cmd->DetailStatus = 0;
607 	} else {
608 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
609 	}
610 
611 	return (rval);
612 }
613 
614 /*
615  * ql_validate_signature
616  *	Validate the signature string for an external ioctl call.
617  *
618  * Input:
619  *	sg:	Pointer to EXT_IOCTL signature to validate.
620  *
621  * Returns:
622  *	B_TRUE:		Signature is valid.
623  *	B_FALSE:	Signature is NOT valid.
624  *
625  * Context:
626  *	Kernel context.
627  */
628 static boolean_t
629 ql_validate_signature(EXT_IOCTL *cmd_struct)
630 {
631 	/*
632 	 * Check signature.
633 	 *
634 	 * If signature is not valid then neither is the rest of
635 	 * the structure (e.g., can't trust it), so don't attempt
636 	 * to return any error status other than the errno.
637 	 */
638 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
639 		QL_PRINT_2(CE_CONT, "failed,\n");
640 		return (B_FALSE);
641 	}
642 
643 	return (B_TRUE);
644 }
645 
646 /*
647  * ql_sdm_return
648  *	Copies return data/status to application land for
649  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
650  *
651  * Input:
652  *	ha:		adapter state pointer.
653  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
654  *	ioctl_code:	ioctl function to perform
655  *	arg:		EXT_IOCTL cmd data in application land.
656  *	mode:		flags
657  *
658  * Returns:
659  *	0:	success
660  *	EFAULT:	Copy out error.
661  *
662  * Context:
663  *	Kernel context.
664  */
665 /* ARGSUSED */
666 static int
667 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
668 {
669 	int	rval = 0;
670 
671 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
672 
673 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
674 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
675 	    mode);
676 
677 	rval |= ddi_copyout((void *)&cmd->Status,
678 	    (void *)&(((EXT_IOCTL*)arg)->Status),
679 	    sizeof (cmd->Status), mode);
680 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
681 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
682 	    sizeof (cmd->DetailStatus), mode);
683 
684 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
685 
686 	if (rval != 0) {
687 		/* Some copyout operation failed */
688 		EL(ha, "failed, ddi_copyout\n");
689 		return (EFAULT);
690 	}
691 
692 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
693 
694 	return (0);
695 }
696 
697 /*
698  * ql_query
699  *	Performs all EXT_CC_QUERY functions.
700  *
701  * Input:
702  *	ha:	adapter state pointer.
703  *	cmd:	Local EXT_IOCTL cmd struct pointer.
704  *	mode:	flags.
705  *
706  * Returns:
707  *	None, request status indicated in cmd->Status.
708  *
709  * Context:
710  *	Kernel context.
711  */
712 static void
713 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
714 {
715 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
716 	    cmd->SubCode);
717 
718 	/* case off on command subcode */
719 	switch (cmd->SubCode) {
720 	case EXT_SC_QUERY_HBA_NODE:
721 		ql_qry_hba_node(ha, cmd, mode);
722 		break;
723 	case EXT_SC_QUERY_HBA_PORT:
724 		ql_qry_hba_port(ha, cmd, mode);
725 		break;
726 	case EXT_SC_QUERY_DISC_PORT:
727 		ql_qry_disc_port(ha, cmd, mode);
728 		break;
729 	case EXT_SC_QUERY_DISC_TGT:
730 		ql_qry_disc_tgt(ha, cmd, mode);
731 		break;
732 	case EXT_SC_QUERY_DRIVER:
733 		ql_qry_driver(ha, cmd, mode);
734 		break;
735 	case EXT_SC_QUERY_FW:
736 		ql_qry_fw(ha, cmd, mode);
737 		break;
738 	case EXT_SC_QUERY_CHIP:
739 		ql_qry_chip(ha, cmd, mode);
740 		break;
741 	case EXT_SC_QUERY_CNA_PORT:
742 		ql_qry_cna_port(ha, cmd, mode);
743 		break;
744 	case EXT_SC_QUERY_ADAPTER_VERSIONS:
745 		ql_qry_adapter_versions(ha, cmd, mode);
746 		break;
747 	case EXT_SC_QUERY_DISC_LUN:
748 	default:
749 		/* function not supported. */
750 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
751 		EL(ha, "failed, Unsupported Subcode=%xh\n",
752 		    cmd->SubCode);
753 		break;
754 	}
755 
756 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
757 }
758 
759 /*
760  * ql_qry_hba_node
761  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
762  *
763  * Input:
764  *	ha:	adapter state pointer.
765  *	cmd:	EXT_IOCTL cmd struct pointer.
766  *	mode:	flags.
767  *
768  * Returns:
769  *	None, request status indicated in cmd->Status.
770  *
771  * Context:
772  *	Kernel context.
773  */
774 static void
775 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
776 {
777 	EXT_HBA_NODE	tmp_node = {0};
778 	uint_t		len;
779 	caddr_t		bufp;
780 
781 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
782 
783 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
784 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
785 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
786 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
787 		    "Len=%xh\n", cmd->ResponseLen);
788 		cmd->ResponseLen = 0;
789 		return;
790 	}
791 
792 	/* fill in the values */
793 
794 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
795 	    EXT_DEF_WWN_NAME_SIZE);
796 
797 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
798 
799 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
800 
801 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
802 
803 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
804 
805 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
806 		size_t		verlen;
807 		uint16_t	w;
808 		char		*tmpptr;
809 
810 		verlen = strlen((char *)(tmp_node.DriverVersion));
811 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
812 			EL(ha, "failed, No room for fpga version string\n");
813 		} else {
814 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
815 			    (uint16_t *)
816 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
817 
818 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
819 			if (tmpptr == NULL) {
820 				EL(ha, "Unable to insert fpga version str\n");
821 			} else {
822 				(void) sprintf(tmpptr, "%d.%d",
823 				    ((w & 0xf0) >> 4), (w & 0x0f));
824 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
825 			}
826 		}
827 	}
828 
829 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
830 	    ha->fw_major_version, ha->fw_minor_version,
831 	    ha->fw_subminor_version);
832 
833 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
834 		switch (ha->fw_attributes) {
835 		case FWATTRIB_EF:
836 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
837 			break;
838 		case FWATTRIB_TP:
839 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
840 			break;
841 		case FWATTRIB_IP:
842 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
843 			break;
844 		case FWATTRIB_IPX:
845 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
846 			break;
847 		case FWATTRIB_FL:
848 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
849 			break;
850 		case FWATTRIB_FPX:
851 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
852 			break;
853 		default:
854 			break;
855 		}
856 	}
857 
858 	/* FCode version. */
859 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
860 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
861 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
862 	    (int *)&len) == DDI_PROP_SUCCESS) {
863 		if (len < EXT_DEF_MAX_STR_SIZE) {
864 			bcopy(bufp, tmp_node.OptRomVersion, len);
865 		} else {
866 			bcopy(bufp, tmp_node.OptRomVersion,
867 			    EXT_DEF_MAX_STR_SIZE - 1);
868 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
869 			    '\0';
870 		}
871 		kmem_free(bufp, len);
872 	} else {
873 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
874 	}
875 	tmp_node.PortCount = 1;
876 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
877 
878 	if (ddi_copyout((void *)&tmp_node,
879 	    (void *)(uintptr_t)(cmd->ResponseAdr),
880 	    sizeof (EXT_HBA_NODE), mode) != 0) {
881 		cmd->Status = EXT_STATUS_COPY_ERR;
882 		cmd->ResponseLen = 0;
883 		EL(ha, "failed, ddi_copyout\n");
884 	} else {
885 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
886 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
887 	}
888 }
889 
890 /*
891  * ql_qry_hba_port
892  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
893  *
894  * Input:
895  *	ha:	adapter state pointer.
896  *	cmd:	EXT_IOCTL cmd struct pointer.
897  *	mode:	flags.
898  *
899  * Returns:
900  *	None, request status indicated in cmd->Status.
901  *
902  * Context:
903  *	Kernel context.
904  */
905 static void
906 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
907 {
908 	ql_link_t	*link;
909 	ql_tgt_t	*tq;
910 	ql_mbx_data_t	mr;
911 	EXT_HBA_PORT	tmp_port = {0};
912 	int		rval;
913 	uint16_t	port_cnt, tgt_cnt, index;
914 
915 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
916 
917 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
918 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
919 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
920 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
921 		    cmd->ResponseLen);
922 		cmd->ResponseLen = 0;
923 		return;
924 	}
925 
926 	/* fill in the values */
927 
928 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
929 	    EXT_DEF_WWN_NAME_SIZE);
930 	tmp_port.Id[0] = 0;
931 	tmp_port.Id[1] = ha->d_id.b.domain;
932 	tmp_port.Id[2] = ha->d_id.b.area;
933 	tmp_port.Id[3] = ha->d_id.b.al_pa;
934 
935 	/* For now we are initiator only driver */
936 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
937 
938 	if (ha->task_daemon_flags & LOOP_DOWN) {
939 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
940 	} else if (DRIVER_SUSPENDED(ha)) {
941 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
942 	} else {
943 		tmp_port.State = EXT_DEF_HBA_OK;
944 	}
945 
946 	if (ha->flags & POINT_TO_POINT) {
947 		tmp_port.Mode = EXT_DEF_P2P_MODE;
948 	} else {
949 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
950 	}
951 	/*
952 	 * fill in the portspeed values.
953 	 *
954 	 * default to not yet negotiated state
955 	 */
956 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
957 
958 	if (tmp_port.State == EXT_DEF_HBA_OK) {
959 		switch (ha->iidma_rate) {
960 		case IIDMA_RATE_1GB:
961 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
962 			break;
963 		case IIDMA_RATE_2GB:
964 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_2GBIT;
965 			break;
966 		case IIDMA_RATE_4GB:
967 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_4GBIT;
968 			break;
969 		case IIDMA_RATE_8GB:
970 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_8GBIT;
971 			break;
972 		case IIDMA_RATE_10GB:
973 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_10GBIT;
974 			break;
975 		default:
976 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
977 			EL(ha, "failed, data rate=%xh\n", mr.mb[1]);
978 			break;
979 		}
980 	}
981 
982 	/* Report all supported port speeds */
983 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
984 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
985 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
986 		    EXT_DEF_PORTSPEED_1GBIT);
987 		/*
988 		 * Correct supported speeds based on type of
989 		 * sfp that is present
990 		 */
991 		switch (ha->sfp_stat) {
992 		case 1:
993 			/* no sfp detected */
994 			break;
995 		case 2:
996 		case 4:
997 			/* 4GB sfp */
998 			tmp_port.PortSupportedSpeed &=
999 			    ~EXT_DEF_PORTSPEED_8GBIT;
1000 			break;
1001 		case 3:
1002 		case 5:
1003 			/* 8GB sfp */
1004 			tmp_port.PortSupportedSpeed &=
1005 			    ~EXT_DEF_PORTSPEED_1GBIT;
1006 			break;
1007 		default:
1008 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1009 			break;
1010 
1011 		}
1012 	} else if (CFG_IST(ha, CFG_CTRL_8081)) {
1013 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1014 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1015 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1016 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1017 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
1018 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1019 		    EXT_DEF_PORTSPEED_1GBIT);
1020 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
1021 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1022 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
1023 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1024 	} else {
1025 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1026 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1027 	}
1028 	tmp_port.LinkState2 = LSB(ha->sfp_stat);
1029 	port_cnt = 0;
1030 	tgt_cnt = 0;
1031 
1032 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1033 		for (link = ha->dev[index].first; link != NULL;
1034 		    link = link->next) {
1035 			tq = link->base_address;
1036 
1037 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1038 				continue;
1039 			}
1040 
1041 			port_cnt++;
1042 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1043 				tgt_cnt++;
1044 			}
1045 		}
1046 	}
1047 
1048 	tmp_port.DiscPortCount = port_cnt;
1049 	tmp_port.DiscTargetCount = tgt_cnt;
1050 
1051 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1052 
1053 	rval = ddi_copyout((void *)&tmp_port,
1054 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1055 	    sizeof (EXT_HBA_PORT), mode);
1056 	if (rval != 0) {
1057 		cmd->Status = EXT_STATUS_COPY_ERR;
1058 		cmd->ResponseLen = 0;
1059 		EL(ha, "failed, ddi_copyout\n");
1060 	} else {
1061 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1062 		QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1063 		    ha->instance, port_cnt, tgt_cnt);
1064 	}
1065 }
1066 
1067 /*
1068  * ql_qry_disc_port
1069  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1070  *
1071  * Input:
1072  *	ha:	adapter state pointer.
1073  *	cmd:	EXT_IOCTL cmd struct pointer.
1074  *	mode:	flags.
1075  *
1076  *	cmd->Instance = Port instance in fcport chain.
1077  *
1078  * Returns:
1079  *	None, request status indicated in cmd->Status.
1080  *
1081  * Context:
1082  *	Kernel context.
1083  */
1084 static void
1085 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1086 {
1087 	EXT_DISC_PORT	tmp_port = {0};
1088 	ql_link_t	*link;
1089 	ql_tgt_t	*tq;
1090 	uint16_t	index;
1091 	uint16_t	inst = 0;
1092 
1093 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1094 
1095 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1096 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1097 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1098 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1099 		    cmd->ResponseLen);
1100 		cmd->ResponseLen = 0;
1101 		return;
1102 	}
1103 
1104 	for (link = NULL, index = 0;
1105 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1106 		for (link = ha->dev[index].first; link != NULL;
1107 		    link = link->next) {
1108 			tq = link->base_address;
1109 
1110 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1111 				continue;
1112 			}
1113 			if (inst != cmd->Instance) {
1114 				inst++;
1115 				continue;
1116 			}
1117 
1118 			/* fill in the values */
1119 			bcopy(tq->node_name, tmp_port.WWNN,
1120 			    EXT_DEF_WWN_NAME_SIZE);
1121 			bcopy(tq->port_name, tmp_port.WWPN,
1122 			    EXT_DEF_WWN_NAME_SIZE);
1123 
1124 			break;
1125 		}
1126 	}
1127 
1128 	if (link == NULL) {
1129 		/* no matching device */
1130 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1131 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1132 		cmd->ResponseLen = 0;
1133 		return;
1134 	}
1135 
1136 	tmp_port.Id[0] = 0;
1137 	tmp_port.Id[1] = tq->d_id.b.domain;
1138 	tmp_port.Id[2] = tq->d_id.b.area;
1139 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1140 
1141 	tmp_port.Type = 0;
1142 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1143 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1144 		    EXT_DEF_INITIATOR_DEV);
1145 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1146 		(void) ql_inq_scan(ha, tq, 1);
1147 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1148 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1149 	}
1150 
1151 	if (tq->flags & TQF_FABRIC_DEVICE) {
1152 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1153 	} else {
1154 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1155 	}
1156 
1157 	tmp_port.Status = 0;
1158 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1159 
1160 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1161 
1162 	if (ddi_copyout((void *)&tmp_port,
1163 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1164 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1165 		cmd->Status = EXT_STATUS_COPY_ERR;
1166 		cmd->ResponseLen = 0;
1167 		EL(ha, "failed, ddi_copyout\n");
1168 	} else {
1169 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1170 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1171 	}
1172 }
1173 
1174 /*
1175  * ql_qry_disc_tgt
1176  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1177  *
1178  * Input:
1179  *	ha:		adapter state pointer.
1180  *	cmd:		EXT_IOCTL cmd struct pointer.
1181  *	mode:		flags.
1182  *
1183  *	cmd->Instance = Port instance in fcport chain.
1184  *
1185  * Returns:
1186  *	None, request status indicated in cmd->Status.
1187  *
1188  * Context:
1189  *	Kernel context.
1190  */
1191 static void
1192 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1193 {
1194 	EXT_DISC_TARGET	tmp_tgt = {0};
1195 	ql_link_t	*link;
1196 	ql_tgt_t	*tq;
1197 	uint16_t	index;
1198 	uint16_t	inst = 0;
1199 
1200 	QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1201 	    cmd->Instance);
1202 
1203 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1204 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1205 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1206 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1207 		    cmd->ResponseLen);
1208 		cmd->ResponseLen = 0;
1209 		return;
1210 	}
1211 
1212 	/* Scan port list for requested target and fill in the values */
1213 	for (link = NULL, index = 0;
1214 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1215 		for (link = ha->dev[index].first; link != NULL;
1216 		    link = link->next) {
1217 			tq = link->base_address;
1218 
1219 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1220 			    tq->flags & TQF_INITIATOR_DEVICE) {
1221 				continue;
1222 			}
1223 			if (inst != cmd->Instance) {
1224 				inst++;
1225 				continue;
1226 			}
1227 
1228 			/* fill in the values */
1229 			bcopy(tq->node_name, tmp_tgt.WWNN,
1230 			    EXT_DEF_WWN_NAME_SIZE);
1231 			bcopy(tq->port_name, tmp_tgt.WWPN,
1232 			    EXT_DEF_WWN_NAME_SIZE);
1233 
1234 			break;
1235 		}
1236 	}
1237 
1238 	if (link == NULL) {
1239 		/* no matching device */
1240 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1241 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1242 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1243 		cmd->ResponseLen = 0;
1244 		return;
1245 	}
1246 	tmp_tgt.Id[0] = 0;
1247 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1248 	tmp_tgt.Id[2] = tq->d_id.b.area;
1249 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1250 
1251 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1252 
1253 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1254 		(void) ql_inq_scan(ha, tq, 1);
1255 	}
1256 
1257 	tmp_tgt.Type = 0;
1258 	if (tq->flags & TQF_TAPE_DEVICE) {
1259 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1260 	}
1261 
1262 	if (tq->flags & TQF_FABRIC_DEVICE) {
1263 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1264 	} else {
1265 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1266 	}
1267 
1268 	tmp_tgt.Status = 0;
1269 
1270 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1271 
1272 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1273 
1274 	if (ddi_copyout((void *)&tmp_tgt,
1275 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1276 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1277 		cmd->Status = EXT_STATUS_COPY_ERR;
1278 		cmd->ResponseLen = 0;
1279 		EL(ha, "failed, ddi_copyout\n");
1280 	} else {
1281 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1282 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1283 	}
1284 }
1285 
1286 /*
1287  * ql_qry_fw
1288  *	Performs EXT_SC_QUERY_FW subfunction.
1289  *
1290  * Input:
1291  *	ha:	adapter state pointer.
1292  *	cmd:	EXT_IOCTL cmd struct pointer.
1293  *	mode:	flags.
1294  *
1295  * Returns:
1296  *	None, request status indicated in cmd->Status.
1297  *
1298  * Context:
1299  *	Kernel context.
1300  */
1301 static void
1302 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1303 {
1304 	EXT_FW		fw_info = {0};
1305 
1306 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1307 
1308 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1309 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1310 		cmd->DetailStatus = sizeof (EXT_FW);
1311 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1312 		    cmd->ResponseLen);
1313 		cmd->ResponseLen = 0;
1314 		return;
1315 	}
1316 
1317 	(void) sprintf((char *)(fw_info.Version), "%d.%02d.%02d",
1318 	    ha->fw_major_version, ha->fw_minor_version,
1319 	    ha->fw_subminor_version);
1320 
1321 	fw_info.Attrib = ha->fw_attributes;
1322 
1323 	if (ddi_copyout((void *)&fw_info,
1324 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1325 	    sizeof (EXT_FW), mode) != 0) {
1326 		cmd->Status = EXT_STATUS_COPY_ERR;
1327 		cmd->ResponseLen = 0;
1328 		EL(ha, "failed, ddi_copyout\n");
1329 		return;
1330 	} else {
1331 		cmd->ResponseLen = sizeof (EXT_FW);
1332 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1333 	}
1334 }
1335 
1336 /*
1337  * ql_qry_chip
1338  *	Performs EXT_SC_QUERY_CHIP subfunction.
1339  *
1340  * Input:
1341  *	ha:	adapter state pointer.
1342  *	cmd:	EXT_IOCTL cmd struct pointer.
1343  *	mode:	flags.
1344  *
1345  * Returns:
1346  *	None, request status indicated in cmd->Status.
1347  *
1348  * Context:
1349  *	Kernel context.
1350  */
1351 static void
1352 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1353 {
1354 	EXT_CHIP	chip = {0};
1355 
1356 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1357 
1358 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1359 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1360 		cmd->DetailStatus = sizeof (EXT_CHIP);
1361 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1362 		    cmd->ResponseLen);
1363 		cmd->ResponseLen = 0;
1364 		return;
1365 	}
1366 
1367 	chip.VendorId = ha->ven_id;
1368 	chip.DeviceId = ha->device_id;
1369 	chip.SubVendorId = ha->subven_id;
1370 	chip.SubSystemId = ha->subsys_id;
1371 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1372 	chip.IoAddrLen = 0x100;
1373 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1374 	chip.MemAddrLen = 0x100;
1375 	chip.ChipRevID = ha->rev_id;
1376 	if (ha->flags & FUNCTION_1) {
1377 		chip.FuncNo = 1;
1378 	}
1379 
1380 	if (ddi_copyout((void *)&chip,
1381 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1382 	    sizeof (EXT_CHIP), mode) != 0) {
1383 		cmd->Status = EXT_STATUS_COPY_ERR;
1384 		cmd->ResponseLen = 0;
1385 		EL(ha, "failed, ddi_copyout\n");
1386 	} else {
1387 		cmd->ResponseLen = sizeof (EXT_CHIP);
1388 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1389 	}
1390 }
1391 
1392 /*
1393  * ql_qry_driver
1394  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1395  *
1396  * Input:
1397  *	ha:	adapter state pointer.
1398  *	cmd:	EXT_IOCTL cmd struct pointer.
1399  *	mode:	flags.
1400  *
1401  * Returns:
1402  *	None, request status indicated in cmd->Status.
1403  *
1404  * Context:
1405  *	Kernel context.
1406  */
1407 static void
1408 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1409 {
1410 	EXT_DRIVER	qd = {0};
1411 
1412 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1413 
1414 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1415 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1416 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1417 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1418 		    cmd->ResponseLen);
1419 		cmd->ResponseLen = 0;
1420 		return;
1421 	}
1422 
1423 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1424 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1425 	qd.TargetsPerBus = (uint16_t)
1426 	    (CFG_IST(ha, (CFG_CTRL_24258081 | CFG_EXT_FW_INTERFACE)) ?
1427 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1428 	qd.LunsPerTarget = 2030;
1429 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1430 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1431 
1432 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1433 	    sizeof (EXT_DRIVER), mode) != 0) {
1434 		cmd->Status = EXT_STATUS_COPY_ERR;
1435 		cmd->ResponseLen = 0;
1436 		EL(ha, "failed, ddi_copyout\n");
1437 	} else {
1438 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1439 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1440 	}
1441 }
1442 
1443 /*
1444  * ql_fcct
1445  *	IOCTL management server FC-CT passthrough.
1446  *
1447  * Input:
1448  *	ha:	adapter state pointer.
1449  *	cmd:	User space CT arguments pointer.
1450  *	mode:	flags.
1451  *
1452  * Returns:
1453  *	None, request status indicated in cmd->Status.
1454  *
1455  * Context:
1456  *	Kernel context.
1457  */
1458 static void
1459 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1460 {
1461 	ql_mbx_iocb_t		*pkt;
1462 	ql_mbx_data_t		mr;
1463 	dma_mem_t		*dma_mem;
1464 	caddr_t			pld;
1465 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1466 	int			rval;
1467 	ql_ct_iu_preamble_t	*ct;
1468 	ql_xioctl_t		*xp = ha->xioctl;
1469 	ql_tgt_t		tq;
1470 	uint16_t		comp_status, loop_id;
1471 
1472 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1473 
1474 	/* Get CT argument structure. */
1475 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1476 		EL(ha, "failed, No switch\n");
1477 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1478 		cmd->ResponseLen = 0;
1479 		return;
1480 	}
1481 
1482 	if (DRIVER_SUSPENDED(ha)) {
1483 		EL(ha, "failed, LOOP_NOT_READY\n");
1484 		cmd->Status = EXT_STATUS_BUSY;
1485 		cmd->ResponseLen = 0;
1486 		return;
1487 	}
1488 
1489 	/* Login management server device. */
1490 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1491 		tq.d_id.b.al_pa = 0xfa;
1492 		tq.d_id.b.area = 0xff;
1493 		tq.d_id.b.domain = 0xff;
1494 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1495 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1496 		    MANAGEMENT_SERVER_LOOP_ID);
1497 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1498 		if (rval != QL_SUCCESS) {
1499 			EL(ha, "failed, server login\n");
1500 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1501 			cmd->ResponseLen = 0;
1502 			return;
1503 		} else {
1504 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1505 		}
1506 	}
1507 
1508 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1509 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1510 
1511 	/* Allocate a DMA Memory Descriptor */
1512 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1513 	if (dma_mem == NULL) {
1514 		EL(ha, "failed, kmem_zalloc\n");
1515 		cmd->Status = EXT_STATUS_NO_MEMORY;
1516 		cmd->ResponseLen = 0;
1517 		return;
1518 	}
1519 	/* Determine maximum buffer size. */
1520 	if (cmd->RequestLen < cmd->ResponseLen) {
1521 		pld_byte_cnt = cmd->ResponseLen;
1522 	} else {
1523 		pld_byte_cnt = cmd->RequestLen;
1524 	}
1525 
1526 	/* Allocate command block. */
1527 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1528 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1529 	if (pkt == NULL) {
1530 		EL(ha, "failed, kmem_zalloc\n");
1531 		cmd->Status = EXT_STATUS_NO_MEMORY;
1532 		cmd->ResponseLen = 0;
1533 		return;
1534 	}
1535 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1536 
1537 	/* Get command payload data. */
1538 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1539 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1540 		EL(ha, "failed, get_buffer_data\n");
1541 		kmem_free(pkt, pkt_size);
1542 		cmd->Status = EXT_STATUS_COPY_ERR;
1543 		cmd->ResponseLen = 0;
1544 		return;
1545 	}
1546 
1547 	/* Get DMA memory for the IOCB */
1548 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1549 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1550 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1551 		    "alloc failed", QL_NAME, ha->instance);
1552 		kmem_free(pkt, pkt_size);
1553 		kmem_free(dma_mem, sizeof (dma_mem_t));
1554 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1555 		cmd->ResponseLen = 0;
1556 		return;
1557 	}
1558 
1559 	/* Copy out going payload data to IOCB DMA buffer. */
1560 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1561 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1562 
1563 	/* Sync IOCB DMA buffer. */
1564 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1565 	    DDI_DMA_SYNC_FORDEV);
1566 
1567 	/*
1568 	 * Setup IOCB
1569 	 */
1570 	ct = (ql_ct_iu_preamble_t *)pld;
1571 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
1572 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1573 		pkt->ms24.entry_count = 1;
1574 
1575 		pkt->ms24.vp_index = ha->vp_index;
1576 
1577 		/* Set loop ID */
1578 		pkt->ms24.n_port_hdl = (uint16_t)
1579 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1580 		    LE_16(SNS_24XX_HDL) :
1581 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1582 
1583 		/* Set ISP command timeout. */
1584 		pkt->ms24.timeout = LE_16(120);
1585 
1586 		/* Set cmd/response data segment counts. */
1587 		pkt->ms24.cmd_dseg_count = LE_16(1);
1588 		pkt->ms24.resp_dseg_count = LE_16(1);
1589 
1590 		/* Load ct cmd byte count. */
1591 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1592 
1593 		/* Load ct rsp byte count. */
1594 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1595 
1596 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1597 
1598 		/* Load MS command entry data segments. */
1599 		*long_ptr++ = (uint32_t)
1600 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1601 		*long_ptr++ = (uint32_t)
1602 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1603 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1604 
1605 		/* Load MS response entry data segments. */
1606 		*long_ptr++ = (uint32_t)
1607 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1608 		*long_ptr++ = (uint32_t)
1609 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1610 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1611 
1612 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1613 		    sizeof (ql_mbx_iocb_t));
1614 
1615 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1616 		if (comp_status == CS_DATA_UNDERRUN) {
1617 			if ((BE_16(ct->max_residual_size)) == 0) {
1618 				comp_status = CS_COMPLETE;
1619 			}
1620 		}
1621 
1622 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1623 		    0) {
1624 			EL(ha, "failed, I/O timeout or "
1625 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1626 			    pkt->sts24.entry_status,
1627 			    pkt->sts24.scsi_status_l, rval);
1628 			kmem_free(pkt, pkt_size);
1629 			ql_free_dma_resource(ha, dma_mem);
1630 			kmem_free(dma_mem, sizeof (dma_mem_t));
1631 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1632 			cmd->ResponseLen = 0;
1633 			return;
1634 		}
1635 	} else {
1636 		pkt->ms.entry_type = MS_TYPE;
1637 		pkt->ms.entry_count = 1;
1638 
1639 		/* Set loop ID */
1640 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1641 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1642 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1643 			pkt->ms.loop_id_l = LSB(loop_id);
1644 			pkt->ms.loop_id_h = MSB(loop_id);
1645 		} else {
1646 			pkt->ms.loop_id_h = LSB(loop_id);
1647 		}
1648 
1649 		/* Set ISP command timeout. */
1650 		pkt->ms.timeout = LE_16(120);
1651 
1652 		/* Set data segment counts. */
1653 		pkt->ms.cmd_dseg_count_l = 1;
1654 		pkt->ms.total_dseg_count = LE_16(2);
1655 
1656 		/* Response total byte count. */
1657 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1658 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1659 
1660 		/* Command total byte count. */
1661 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1662 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1663 
1664 		/* Load command/response data segments. */
1665 		pkt->ms.dseg_0_address[0] = (uint32_t)
1666 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1667 		pkt->ms.dseg_0_address[1] = (uint32_t)
1668 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1669 		pkt->ms.dseg_1_address[0] = (uint32_t)
1670 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1671 		pkt->ms.dseg_1_address[1] = (uint32_t)
1672 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1673 
1674 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1675 		    sizeof (ql_mbx_iocb_t));
1676 
1677 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1678 		if (comp_status == CS_DATA_UNDERRUN) {
1679 			if ((BE_16(ct->max_residual_size)) == 0) {
1680 				comp_status = CS_COMPLETE;
1681 			}
1682 		}
1683 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1684 			EL(ha, "failed, I/O timeout or "
1685 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1686 			kmem_free(pkt, pkt_size);
1687 			ql_free_dma_resource(ha, dma_mem);
1688 			kmem_free(dma_mem, sizeof (dma_mem_t));
1689 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1690 			cmd->ResponseLen = 0;
1691 			return;
1692 		}
1693 	}
1694 
1695 	/* Sync in coming DMA buffer. */
1696 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1697 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1698 	/* Copy in coming DMA data. */
1699 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1700 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1701 	    DDI_DEV_AUTOINCR);
1702 
1703 	/* Copy response payload from DMA buffer to application. */
1704 	if (cmd->ResponseLen != 0) {
1705 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1706 		    cmd->ResponseLen);
1707 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1708 
1709 		/* Send response payload. */
1710 		if (ql_send_buffer_data(pld,
1711 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1712 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1713 			EL(ha, "failed, send_buffer_data\n");
1714 			cmd->Status = EXT_STATUS_COPY_ERR;
1715 			cmd->ResponseLen = 0;
1716 		}
1717 	}
1718 
1719 	kmem_free(pkt, pkt_size);
1720 	ql_free_dma_resource(ha, dma_mem);
1721 	kmem_free(dma_mem, sizeof (dma_mem_t));
1722 
1723 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1724 }
1725 
1726 /*
1727  * ql_aen_reg
1728  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1729  *
1730  * Input:
1731  *	ha:	adapter state pointer.
1732  *	cmd:	EXT_IOCTL cmd struct pointer.
1733  *	mode:	flags.
1734  *
1735  * Returns:
1736  *	None, request status indicated in cmd->Status.
1737  *
1738  * Context:
1739  *	Kernel context.
1740  */
1741 static void
1742 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1743 {
1744 	EXT_REG_AEN	reg_struct;
1745 	int		rval = 0;
1746 	ql_xioctl_t	*xp = ha->xioctl;
1747 
1748 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1749 
1750 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &reg_struct,
1751 	    cmd->RequestLen, mode);
1752 
1753 	if (rval == 0) {
1754 		if (reg_struct.Enable) {
1755 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1756 		} else {
1757 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1758 			/* Empty the queue. */
1759 			INTR_LOCK(ha);
1760 			xp->aen_q_head = 0;
1761 			xp->aen_q_tail = 0;
1762 			INTR_UNLOCK(ha);
1763 		}
1764 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1765 	} else {
1766 		cmd->Status = EXT_STATUS_COPY_ERR;
1767 		EL(ha, "failed, ddi_copyin\n");
1768 	}
1769 }
1770 
1771 /*
1772  * ql_aen_get
1773  *	IOCTL management server Asynchronous Event Record Transfer.
1774  *
1775  * Input:
1776  *	ha:	adapter state pointer.
1777  *	cmd:	EXT_IOCTL cmd struct pointer.
1778  *	mode:	flags.
1779  *
1780  * Returns:
1781  *	None, request status indicated in cmd->Status.
1782  *
1783  * Context:
1784  *	Kernel context.
1785  */
1786 static void
1787 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1788 {
1789 	uint32_t	out_size;
1790 	EXT_ASYNC_EVENT	*tmp_q;
1791 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1792 	uint8_t		i;
1793 	uint8_t		queue_cnt;
1794 	uint8_t		request_cnt;
1795 	ql_xioctl_t	*xp = ha->xioctl;
1796 
1797 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1798 
1799 	/* Compute the number of events that can be returned */
1800 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1801 
1802 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1803 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1804 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1805 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1806 		    "Len=%xh\n", request_cnt);
1807 		cmd->ResponseLen = 0;
1808 		return;
1809 	}
1810 
1811 	/* 1st: Make a local copy of the entire queue content. */
1812 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1813 	queue_cnt = 0;
1814 
1815 	INTR_LOCK(ha);
1816 	i = xp->aen_q_head;
1817 
1818 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1819 		if (tmp_q[i].AsyncEventCode != 0) {
1820 			bcopy(&tmp_q[i], &aen[queue_cnt],
1821 			    sizeof (EXT_ASYNC_EVENT));
1822 			queue_cnt++;
1823 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1824 		}
1825 		if (i == xp->aen_q_tail) {
1826 			/* done. */
1827 			break;
1828 		}
1829 		i++;
1830 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1831 			i = 0;
1832 		}
1833 	}
1834 
1835 	/* Empty the queue. */
1836 	xp->aen_q_head = 0;
1837 	xp->aen_q_tail = 0;
1838 
1839 	INTR_UNLOCK(ha);
1840 
1841 	/* 2nd: Now transfer the queue content to user buffer */
1842 	/* Copy the entire queue to user's buffer. */
1843 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1844 	if (queue_cnt == 0) {
1845 		cmd->ResponseLen = 0;
1846 	} else if (ddi_copyout((void *)&aen[0],
1847 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1848 	    out_size, mode) != 0) {
1849 		cmd->Status = EXT_STATUS_COPY_ERR;
1850 		cmd->ResponseLen = 0;
1851 		EL(ha, "failed, ddi_copyout\n");
1852 	} else {
1853 		cmd->ResponseLen = out_size;
1854 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1855 	}
1856 }
1857 
1858 /*
1859  * ql_enqueue_aen
1860  *
1861  * Input:
1862  *	ha:		adapter state pointer.
1863  *	event_code:	async event code of the event to add to queue.
1864  *	payload:	event payload for the queue.
1865  *	INTR_LOCK must be already obtained.
1866  *
1867  * Context:
1868  *	Interrupt or Kernel context, no mailbox commands allowed.
1869  */
1870 void
1871 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1872 {
1873 	uint8_t			new_entry;	/* index to current entry */
1874 	uint16_t		*mbx;
1875 	EXT_ASYNC_EVENT		*aen_queue;
1876 	ql_xioctl_t		*xp = ha->xioctl;
1877 
1878 	QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1879 	    event_code);
1880 
1881 	if (xp == NULL) {
1882 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1883 		return;
1884 	}
1885 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1886 
1887 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1888 		/* Need to change queue pointers to make room. */
1889 
1890 		/* Increment tail for adding new entry. */
1891 		xp->aen_q_tail++;
1892 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1893 			xp->aen_q_tail = 0;
1894 		}
1895 		if (xp->aen_q_head == xp->aen_q_tail) {
1896 			/*
1897 			 * We're overwriting the oldest entry, so need to
1898 			 * update the head pointer.
1899 			 */
1900 			xp->aen_q_head++;
1901 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1902 				xp->aen_q_head = 0;
1903 			}
1904 		}
1905 	}
1906 
1907 	new_entry = xp->aen_q_tail;
1908 	aen_queue[new_entry].AsyncEventCode = event_code;
1909 
1910 	/* Update payload */
1911 	if (payload != NULL) {
1912 		switch (event_code) {
1913 		case MBA_LIP_OCCURRED:
1914 		case MBA_LOOP_UP:
1915 		case MBA_LOOP_DOWN:
1916 		case MBA_LIP_F8:
1917 		case MBA_LIP_RESET:
1918 		case MBA_PORT_UPDATE:
1919 			break;
1920 		case MBA_RSCN_UPDATE:
1921 			mbx = (uint16_t *)payload;
1922 			/* al_pa */
1923 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1924 			    LSB(mbx[2]);
1925 			/* area */
1926 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1927 			    MSB(mbx[2]);
1928 			/* domain */
1929 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1930 			    LSB(mbx[1]);
1931 			/* save in big endian */
1932 			BIG_ENDIAN_24(&aen_queue[new_entry].
1933 			    Payload.RSCN.RSCNInfo[0]);
1934 
1935 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1936 			    MSB(mbx[1]);
1937 
1938 			break;
1939 		default:
1940 			/* Not supported */
1941 			EL(ha, "failed, event code not supported=%xh\n",
1942 			    event_code);
1943 			aen_queue[new_entry].AsyncEventCode = 0;
1944 			break;
1945 		}
1946 	}
1947 
1948 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1949 }
1950 
1951 /*
1952  * ql_scsi_passthru
1953  *	IOCTL SCSI passthrough.
1954  *
1955  * Input:
1956  *	ha:	adapter state pointer.
1957  *	cmd:	User space SCSI command pointer.
1958  *	mode:	flags.
1959  *
1960  * Returns:
1961  *	None, request status indicated in cmd->Status.
1962  *
1963  * Context:
1964  *	Kernel context.
1965  */
1966 static void
1967 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1968 {
1969 	ql_mbx_iocb_t		*pkt;
1970 	ql_mbx_data_t		mr;
1971 	dma_mem_t		*dma_mem;
1972 	caddr_t			pld;
1973 	uint32_t		pkt_size, pld_size;
1974 	uint16_t		qlnt, retries, cnt, cnt2;
1975 	uint8_t			*name;
1976 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1977 	EXT_SCSI_PASSTHRU	*usp_req;
1978 	int			rval;
1979 	union _passthru {
1980 		EXT_SCSI_PASSTHRU	sp_cmd;
1981 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
1982 	} pt_req;		/* Passthru request */
1983 	uint32_t		status, sense_sz = 0;
1984 	ql_tgt_t		*tq = NULL;
1985 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
1986 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
1987 
1988 	/* SCSI request struct for SCSI passthrough IOs. */
1989 	struct {
1990 		uint16_t	lun;
1991 		uint16_t	sense_length;	/* Sense buffer size */
1992 		size_t		resid;		/* Residual */
1993 		uint8_t		*cdbp;		/* Requestor's CDB */
1994 		uint8_t		*u_sense;	/* Requestor's sense buffer */
1995 		uint8_t		cdb_len;	/* Requestor's CDB length */
1996 		uint8_t		direction;
1997 	} scsi_req;
1998 
1999 	struct {
2000 		uint8_t		*rsp_info;
2001 		uint8_t		*req_sense_data;
2002 		uint32_t	residual_length;
2003 		uint32_t	rsp_info_length;
2004 		uint32_t	req_sense_length;
2005 		uint16_t	comp_status;
2006 		uint8_t		state_flags_l;
2007 		uint8_t		state_flags_h;
2008 		uint8_t		scsi_status_l;
2009 		uint8_t		scsi_status_h;
2010 	} sts;
2011 
2012 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2013 
2014 	/* Verify Sub Code and set cnt to needed request size. */
2015 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2016 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
2017 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2018 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2019 	} else {
2020 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2021 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2022 		cmd->ResponseLen = 0;
2023 		return;
2024 	}
2025 
2026 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2027 	if (dma_mem == NULL) {
2028 		EL(ha, "failed, kmem_zalloc\n");
2029 		cmd->Status = EXT_STATUS_NO_MEMORY;
2030 		cmd->ResponseLen = 0;
2031 		return;
2032 	}
2033 	/*  Verify the size of and copy in the passthru request structure. */
2034 	if (cmd->RequestLen != pld_size) {
2035 		/* Return error */
2036 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2037 		    cmd->RequestLen, pld_size);
2038 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2039 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2040 		cmd->ResponseLen = 0;
2041 		return;
2042 	}
2043 
2044 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2045 	    pld_size, mode) != 0) {
2046 		EL(ha, "failed, ddi_copyin\n");
2047 		cmd->Status = EXT_STATUS_COPY_ERR;
2048 		cmd->ResponseLen = 0;
2049 		return;
2050 	}
2051 
2052 	/*
2053 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2054 	 * request data structure.
2055 	 */
2056 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2057 		scsi_req.lun = sp_req->TargetAddr.Lun;
2058 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2059 		scsi_req.cdbp = &sp_req->Cdb[0];
2060 		scsi_req.cdb_len = sp_req->CdbLength;
2061 		scsi_req.direction = sp_req->Direction;
2062 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2063 		scsi_req.u_sense = &usp_req->SenseData[0];
2064 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2065 
2066 		qlnt = QLNT_PORT;
2067 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2068 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2069 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2070 		tq = ql_find_port(ha, name, qlnt);
2071 	} else {
2072 		/*
2073 		 * Must be FC PASSTHRU, verified above.
2074 		 */
2075 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2076 			qlnt = QLNT_PORT;
2077 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2078 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2079 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2080 			    ha->instance, cmd->SubCode, name[0], name[1],
2081 			    name[2], name[3], name[4], name[5], name[6],
2082 			    name[7]);
2083 			tq = ql_find_port(ha, name, qlnt);
2084 		} else if (fc_req->FCScsiAddr.DestType ==
2085 		    EXT_DEF_DESTTYPE_WWNN) {
2086 			qlnt = QLNT_NODE;
2087 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2088 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2089 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2090 			    ha->instance, cmd->SubCode, name[0], name[1],
2091 			    name[2], name[3], name[4], name[5], name[6],
2092 			    name[7]);
2093 			tq = ql_find_port(ha, name, qlnt);
2094 		} else if (fc_req->FCScsiAddr.DestType ==
2095 		    EXT_DEF_DESTTYPE_PORTID) {
2096 			qlnt = QLNT_PID;
2097 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2098 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2099 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2100 			    name[0], name[1], name[2]);
2101 			tq = ql_find_port(ha, name, qlnt);
2102 		} else {
2103 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2104 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2105 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2106 			cmd->ResponseLen = 0;
2107 			return;
2108 		}
2109 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2110 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2111 		scsi_req.cdbp = &sp_req->Cdb[0];
2112 		scsi_req.cdb_len = sp_req->CdbLength;
2113 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2114 		scsi_req.u_sense = &ufc_req->SenseData[0];
2115 		scsi_req.direction = fc_req->Direction;
2116 	}
2117 
2118 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2119 		EL(ha, "failed, fc_port not found\n");
2120 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2121 		cmd->ResponseLen = 0;
2122 		return;
2123 	}
2124 
2125 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2126 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2127 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2128 		cmd->ResponseLen = 0;
2129 		return;
2130 	}
2131 
2132 	/* Allocate command block. */
2133 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2134 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2135 	    cmd->ResponseLen) {
2136 		pld_size = cmd->ResponseLen;
2137 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2138 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2139 		if (pkt == NULL) {
2140 			EL(ha, "failed, kmem_zalloc\n");
2141 			cmd->Status = EXT_STATUS_NO_MEMORY;
2142 			cmd->ResponseLen = 0;
2143 			return;
2144 		}
2145 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2146 
2147 		/* Get DMA memory for the IOCB */
2148 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2149 		    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2150 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2151 			    "alloc failed", QL_NAME, ha->instance);
2152 			kmem_free(pkt, pkt_size);
2153 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2154 			cmd->ResponseLen = 0;
2155 			return;
2156 		}
2157 
2158 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2159 			scsi_req.direction = (uint8_t)
2160 			    (CFG_IST(ha, CFG_CTRL_24258081) ?
2161 			    CF_RD : CF_DATA_IN | CF_STAG);
2162 		} else {
2163 			scsi_req.direction = (uint8_t)
2164 			    (CFG_IST(ha, CFG_CTRL_24258081) ?
2165 			    CF_WR : CF_DATA_OUT | CF_STAG);
2166 			cmd->ResponseLen = 0;
2167 
2168 			/* Get command payload. */
2169 			if (ql_get_buffer_data(
2170 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2171 			    pld, pld_size, mode) != pld_size) {
2172 				EL(ha, "failed, get_buffer_data\n");
2173 				cmd->Status = EXT_STATUS_COPY_ERR;
2174 
2175 				kmem_free(pkt, pkt_size);
2176 				ql_free_dma_resource(ha, dma_mem);
2177 				kmem_free(dma_mem, sizeof (dma_mem_t));
2178 				return;
2179 			}
2180 
2181 			/* Copy out going data to DMA buffer. */
2182 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2183 			    (uint8_t *)dma_mem->bp, pld_size,
2184 			    DDI_DEV_AUTOINCR);
2185 
2186 			/* Sync DMA buffer. */
2187 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2188 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2189 		}
2190 	} else {
2191 		scsi_req.direction = (uint8_t)
2192 		    (CFG_IST(ha, CFG_CTRL_24258081) ? 0 : CF_STAG);
2193 		cmd->ResponseLen = 0;
2194 
2195 		pkt_size = sizeof (ql_mbx_iocb_t);
2196 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2197 		if (pkt == NULL) {
2198 			EL(ha, "failed, kmem_zalloc-2\n");
2199 			cmd->Status = EXT_STATUS_NO_MEMORY;
2200 			return;
2201 		}
2202 		pld = NULL;
2203 		pld_size = 0;
2204 	}
2205 
2206 	/* retries = ha->port_down_retry_count; */
2207 	retries = 1;
2208 	cmd->Status = EXT_STATUS_OK;
2209 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2210 
2211 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2212 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2213 
2214 	do {
2215 		if (DRIVER_SUSPENDED(ha)) {
2216 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2217 			break;
2218 		}
2219 
2220 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2221 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2222 			pkt->cmd24.entry_count = 1;
2223 
2224 			/* Set LUN number */
2225 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2226 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2227 
2228 			/* Set N_port handle */
2229 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2230 
2231 			/* Set VP Index */
2232 			pkt->cmd24.vp_index = ha->vp_index;
2233 
2234 			/* Set target ID */
2235 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2236 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2237 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2238 
2239 			/* Set ISP command timeout. */
2240 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2241 
2242 			/* Load SCSI CDB */
2243 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2244 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2245 			    DDI_DEV_AUTOINCR);
2246 			for (cnt = 0; cnt < MAX_CMDSZ;
2247 			    cnt = (uint16_t)(cnt + 4)) {
2248 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2249 				    + cnt, 4);
2250 			}
2251 
2252 			/* Set tag queue control flags */
2253 			pkt->cmd24.task = TA_STAG;
2254 
2255 			if (pld_size) {
2256 				/* Set transfer direction. */
2257 				pkt->cmd24.control_flags = scsi_req.direction;
2258 
2259 				/* Set data segment count. */
2260 				pkt->cmd24.dseg_count = LE_16(1);
2261 
2262 				/* Load total byte count. */
2263 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2264 
2265 				/* Load data descriptor. */
2266 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2267 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2268 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2269 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2270 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2271 			}
2272 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2273 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2274 			pkt->cmd3.entry_count = 1;
2275 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2276 				pkt->cmd3.target_l = LSB(tq->loop_id);
2277 				pkt->cmd3.target_h = MSB(tq->loop_id);
2278 			} else {
2279 				pkt->cmd3.target_h = LSB(tq->loop_id);
2280 			}
2281 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2282 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2283 			pkt->cmd3.control_flags_l = scsi_req.direction;
2284 			pkt->cmd3.timeout = LE_16(15);
2285 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2286 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2287 			}
2288 			if (pld_size) {
2289 				pkt->cmd3.dseg_count = LE_16(1);
2290 				pkt->cmd3.byte_count = LE_32(pld_size);
2291 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2292 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2293 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2294 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2295 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2296 			}
2297 		} else {
2298 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2299 			pkt->cmd.entry_count = 1;
2300 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2301 				pkt->cmd.target_l = LSB(tq->loop_id);
2302 				pkt->cmd.target_h = MSB(tq->loop_id);
2303 			} else {
2304 				pkt->cmd.target_h = LSB(tq->loop_id);
2305 			}
2306 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2307 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2308 			pkt->cmd.control_flags_l = scsi_req.direction;
2309 			pkt->cmd.timeout = LE_16(15);
2310 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2311 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2312 			}
2313 			if (pld_size) {
2314 				pkt->cmd.dseg_count = LE_16(1);
2315 				pkt->cmd.byte_count = LE_32(pld_size);
2316 				pkt->cmd.dseg_0_address = (uint32_t)
2317 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2318 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2319 			}
2320 		}
2321 		/* Go issue command and wait for completion. */
2322 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2323 		QL_DUMP_9(pkt, 8, pkt_size);
2324 
2325 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2326 
2327 		if (pld_size) {
2328 			/* Sync in coming DMA buffer. */
2329 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2330 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2331 			/* Copy in coming DMA data. */
2332 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2333 			    (uint8_t *)dma_mem->bp, pld_size,
2334 			    DDI_DEV_AUTOINCR);
2335 		}
2336 
2337 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2338 			pkt->sts24.entry_status = (uint8_t)
2339 			    (pkt->sts24.entry_status & 0x3c);
2340 		} else {
2341 			pkt->sts.entry_status = (uint8_t)
2342 			    (pkt->sts.entry_status & 0x7e);
2343 		}
2344 
2345 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2346 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2347 			    pkt->sts.entry_status, tq->d_id.b24);
2348 			status = QL_FUNCTION_PARAMETER_ERROR;
2349 		}
2350 
2351 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
2352 		    LE_16(pkt->sts24.comp_status) :
2353 		    LE_16(pkt->sts.comp_status));
2354 
2355 		/*
2356 		 * We have verified about all the request that can be so far.
2357 		 * Now we need to start verification of our ability to
2358 		 * actually issue the CDB.
2359 		 */
2360 		if (DRIVER_SUSPENDED(ha)) {
2361 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2362 			break;
2363 		} else if (status == QL_SUCCESS &&
2364 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2365 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2366 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2367 			if (tq->flags & TQF_FABRIC_DEVICE) {
2368 				rval = ql_login_fport(ha, tq, tq->loop_id,
2369 				    LFF_NO_PLOGI, &mr);
2370 				if (rval != QL_SUCCESS) {
2371 					EL(ha, "failed, login_fport=%xh, "
2372 					    "d_id=%xh\n", rval, tq->d_id.b24);
2373 				}
2374 			} else {
2375 				rval = ql_login_lport(ha, tq, tq->loop_id,
2376 				    LLF_NONE);
2377 				if (rval != QL_SUCCESS) {
2378 					EL(ha, "failed, login_lport=%xh, "
2379 					    "d_id=%xh\n", rval, tq->d_id.b24);
2380 				}
2381 			}
2382 		} else {
2383 			break;
2384 		}
2385 
2386 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2387 
2388 	} while (retries--);
2389 
2390 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2391 		/* Cannot issue command now, maybe later */
2392 		EL(ha, "failed, suspended\n");
2393 		kmem_free(pkt, pkt_size);
2394 		ql_free_dma_resource(ha, dma_mem);
2395 		kmem_free(dma_mem, sizeof (dma_mem_t));
2396 		cmd->Status = EXT_STATUS_SUSPENDED;
2397 		cmd->ResponseLen = 0;
2398 		return;
2399 	}
2400 
2401 	if (status != QL_SUCCESS) {
2402 		/* Command error */
2403 		EL(ha, "failed, I/O\n");
2404 		kmem_free(pkt, pkt_size);
2405 		ql_free_dma_resource(ha, dma_mem);
2406 		kmem_free(dma_mem, sizeof (dma_mem_t));
2407 		cmd->Status = EXT_STATUS_ERR;
2408 		cmd->DetailStatus = status;
2409 		cmd->ResponseLen = 0;
2410 		return;
2411 	}
2412 
2413 	/* Setup status. */
2414 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
2415 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2416 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2417 
2418 		/* Setup residuals. */
2419 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2420 
2421 		/* Setup state flags. */
2422 		sts.state_flags_l = pkt->sts24.state_flags_l;
2423 		sts.state_flags_h = pkt->sts24.state_flags_h;
2424 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2425 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2426 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2427 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2428 		} else {
2429 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2430 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2431 			    SF_GOT_STATUS);
2432 		}
2433 		if (scsi_req.direction & CF_WR) {
2434 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2435 			    SF_DATA_OUT);
2436 		} else if (scsi_req.direction & CF_RD) {
2437 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2438 			    SF_DATA_IN);
2439 		}
2440 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2441 
2442 		/* Setup FCP response info. */
2443 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2444 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2445 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2446 		for (cnt = 0; cnt < sts.rsp_info_length;
2447 		    cnt = (uint16_t)(cnt + 4)) {
2448 			ql_chg_endian(sts.rsp_info + cnt, 4);
2449 		}
2450 
2451 		/* Setup sense data. */
2452 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2453 			sts.req_sense_length =
2454 			    LE_32(pkt->sts24.fcp_sense_length);
2455 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2456 			    SF_ARQ_DONE);
2457 		} else {
2458 			sts.req_sense_length = 0;
2459 		}
2460 		sts.req_sense_data =
2461 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2462 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2463 		    (uintptr_t)sts.req_sense_data);
2464 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2465 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2466 		}
2467 	} else {
2468 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2469 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2470 
2471 		/* Setup residuals. */
2472 		sts.residual_length = LE_32(pkt->sts.residual_length);
2473 
2474 		/* Setup state flags. */
2475 		sts.state_flags_l = pkt->sts.state_flags_l;
2476 		sts.state_flags_h = pkt->sts.state_flags_h;
2477 
2478 		/* Setup FCP response info. */
2479 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2480 		    LE_16(pkt->sts.rsp_info_length) : 0;
2481 		sts.rsp_info = &pkt->sts.rsp_info[0];
2482 
2483 		/* Setup sense data. */
2484 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2485 		    LE_16(pkt->sts.req_sense_length) : 0;
2486 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2487 	}
2488 
2489 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2490 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2491 
2492 	switch (sts.comp_status) {
2493 	case CS_INCOMPLETE:
2494 	case CS_ABORTED:
2495 	case CS_DEVICE_UNAVAILABLE:
2496 	case CS_PORT_UNAVAILABLE:
2497 	case CS_PORT_LOGGED_OUT:
2498 	case CS_PORT_CONFIG_CHG:
2499 	case CS_PORT_BUSY:
2500 	case CS_LOOP_DOWN_ABORT:
2501 		cmd->Status = EXT_STATUS_BUSY;
2502 		break;
2503 	case CS_RESET:
2504 	case CS_QUEUE_FULL:
2505 		cmd->Status = EXT_STATUS_ERR;
2506 		break;
2507 	case CS_TIMEOUT:
2508 		cmd->Status = EXT_STATUS_ERR;
2509 		break;
2510 	case CS_DATA_OVERRUN:
2511 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2512 		break;
2513 	case CS_DATA_UNDERRUN:
2514 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2515 		break;
2516 	}
2517 
2518 	/*
2519 	 * If non data transfer commands fix tranfer counts.
2520 	 */
2521 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2522 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2523 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2524 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2525 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2526 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2527 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2528 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2529 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2530 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2531 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2532 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2533 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2534 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2535 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2536 		/*
2537 		 * Non data transfer command, clear sts_entry residual
2538 		 * length.
2539 		 */
2540 		sts.residual_length = 0;
2541 		cmd->ResponseLen = 0;
2542 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2543 			sts.comp_status = CS_COMPLETE;
2544 			cmd->Status = EXT_STATUS_OK;
2545 		}
2546 	} else {
2547 		cmd->ResponseLen = pld_size;
2548 	}
2549 
2550 	/* Correct ISP completion status */
2551 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2552 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2553 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2554 		    ha->instance);
2555 		scsi_req.resid = 0;
2556 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2557 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2558 		    ha->instance);
2559 		scsi_req.resid = sts.residual_length;
2560 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2561 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2562 
2563 			cmd->ResponseLen = (uint32_t)
2564 			    (pld_size - scsi_req.resid);
2565 		} else {
2566 			EL(ha, "failed, Transfer ERROR\n");
2567 			cmd->Status = EXT_STATUS_ERR;
2568 			cmd->ResponseLen = 0;
2569 		}
2570 	} else {
2571 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2572 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2573 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2574 		    sts.scsi_status_l);
2575 
2576 		scsi_req.resid = pld_size;
2577 		/*
2578 		 * Handle residual count on SCSI check
2579 		 * condition.
2580 		 *
2581 		 * - If Residual Under / Over is set, use the
2582 		 *   Residual Transfer Length field in IOCB.
2583 		 * - If Residual Under / Over is not set, and
2584 		 *   Transferred Data bit is set in State Flags
2585 		 *   field of IOCB, report residual value of 0
2586 		 *   (you may want to do this for tape
2587 		 *   Write-type commands only). This takes care
2588 		 *   of logical end of tape problem and does
2589 		 *   not break Unit Attention.
2590 		 * - If Residual Under / Over is not set, and
2591 		 *   Transferred Data bit is not set in State
2592 		 *   Flags, report residual value equal to
2593 		 *   original data transfer length.
2594 		 */
2595 		if (sts.scsi_status_l & STATUS_CHECK) {
2596 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2597 			cmd->DetailStatus = sts.scsi_status_l;
2598 			if (sts.scsi_status_h &
2599 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2600 				scsi_req.resid = sts.residual_length;
2601 			} else if (sts.state_flags_h &
2602 			    STATE_XFERRED_DATA) {
2603 				scsi_req.resid = 0;
2604 			}
2605 		}
2606 	}
2607 
2608 	if (sts.scsi_status_l & STATUS_CHECK &&
2609 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2610 	    sts.req_sense_length) {
2611 		/*
2612 		 * Check condition with vaild sense data flag set and sense
2613 		 * length != 0
2614 		 */
2615 		if (sts.req_sense_length > scsi_req.sense_length) {
2616 			sense_sz = scsi_req.sense_length;
2617 		} else {
2618 			sense_sz = sts.req_sense_length;
2619 		}
2620 
2621 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2622 		    tq->d_id.b24);
2623 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2624 
2625 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2626 		    (size_t)sense_sz, mode) != 0) {
2627 			EL(ha, "failed, request sense ddi_copyout\n");
2628 		}
2629 
2630 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2631 		cmd->DetailStatus = sts.scsi_status_l;
2632 	}
2633 
2634 	/* Copy response payload from DMA buffer to application. */
2635 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2636 	    cmd->ResponseLen != 0) {
2637 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2638 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2639 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2640 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2641 
2642 		/* Send response payload. */
2643 		if (ql_send_buffer_data(pld,
2644 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2645 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2646 			EL(ha, "failed, send_buffer_data\n");
2647 			cmd->Status = EXT_STATUS_COPY_ERR;
2648 			cmd->ResponseLen = 0;
2649 		}
2650 	}
2651 
2652 	if (cmd->Status != EXT_STATUS_OK) {
2653 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2654 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2655 	} else {
2656 		/*EMPTY*/
2657 		QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2658 		    ha->instance, cmd->ResponseLen);
2659 	}
2660 
2661 	kmem_free(pkt, pkt_size);
2662 	ql_free_dma_resource(ha, dma_mem);
2663 	kmem_free(dma_mem, sizeof (dma_mem_t));
2664 }
2665 
2666 /*
2667  * ql_wwpn_to_scsiaddr
2668  *
2669  * Input:
2670  *	ha:	adapter state pointer.
2671  *	cmd:	EXT_IOCTL cmd struct pointer.
2672  *	mode:	flags.
2673  *
2674  * Context:
2675  *	Kernel context.
2676  */
2677 static void
2678 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2679 {
2680 	int		status;
2681 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2682 	EXT_SCSI_ADDR	*tmp_addr;
2683 	ql_tgt_t	*tq;
2684 
2685 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2686 
2687 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2688 		/* Return error */
2689 		EL(ha, "incorrect RequestLen\n");
2690 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2691 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2692 		return;
2693 	}
2694 
2695 	status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2696 	    cmd->RequestLen, mode);
2697 
2698 	if (status != 0) {
2699 		cmd->Status = EXT_STATUS_COPY_ERR;
2700 		EL(ha, "failed, ddi_copyin\n");
2701 		return;
2702 	}
2703 
2704 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2705 
2706 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2707 		/* no matching device */
2708 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2709 		EL(ha, "failed, device not found\n");
2710 		return;
2711 	}
2712 
2713 	/* Copy out the IDs found.  For now we can only return target ID. */
2714 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2715 
2716 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2717 
2718 	if (status != 0) {
2719 		cmd->Status = EXT_STATUS_COPY_ERR;
2720 		EL(ha, "failed, ddi_copyout\n");
2721 	} else {
2722 		cmd->Status = EXT_STATUS_OK;
2723 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2724 	}
2725 }
2726 
2727 /*
2728  * ql_host_idx
2729  *	Gets host order index.
2730  *
2731  * Input:
2732  *	ha:	adapter state pointer.
2733  *	cmd:	EXT_IOCTL cmd struct pointer.
2734  *	mode:	flags.
2735  *
2736  * Returns:
2737  *	None, request status indicated in cmd->Status.
2738  *
2739  * Context:
2740  *	Kernel context.
2741  */
2742 static void
2743 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2744 {
2745 	uint16_t	idx;
2746 
2747 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2748 
2749 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2750 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2751 		cmd->DetailStatus = sizeof (uint16_t);
2752 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2753 		cmd->ResponseLen = 0;
2754 		return;
2755 	}
2756 
2757 	idx = (uint16_t)ha->instance;
2758 
2759 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2760 	    sizeof (uint16_t), mode) != 0) {
2761 		cmd->Status = EXT_STATUS_COPY_ERR;
2762 		cmd->ResponseLen = 0;
2763 		EL(ha, "failed, ddi_copyout\n");
2764 	} else {
2765 		cmd->ResponseLen = sizeof (uint16_t);
2766 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2767 	}
2768 }
2769 
2770 /*
2771  * ql_host_drvname
2772  *	Gets host driver name
2773  *
2774  * Input:
2775  *	ha:	adapter state pointer.
2776  *	cmd:	EXT_IOCTL cmd struct pointer.
2777  *	mode:	flags.
2778  *
2779  * Returns:
2780  *	None, request status indicated in cmd->Status.
2781  *
2782  * Context:
2783  *	Kernel context.
2784  */
2785 static void
2786 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2787 {
2788 
2789 	char		drvname[] = QL_NAME;
2790 	uint32_t	qlnamelen;
2791 
2792 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2793 
2794 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2795 
2796 	if (cmd->ResponseLen < qlnamelen) {
2797 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2798 		cmd->DetailStatus = qlnamelen;
2799 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2800 		    cmd->ResponseLen, qlnamelen);
2801 		cmd->ResponseLen = 0;
2802 		return;
2803 	}
2804 
2805 	if (ddi_copyout((void *)&drvname,
2806 	    (void *)(uintptr_t)(cmd->ResponseAdr),
2807 	    qlnamelen, mode) != 0) {
2808 		cmd->Status = EXT_STATUS_COPY_ERR;
2809 		cmd->ResponseLen = 0;
2810 		EL(ha, "failed, ddi_copyout\n");
2811 	} else {
2812 		cmd->ResponseLen = qlnamelen-1;
2813 	}
2814 
2815 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2816 }
2817 
2818 /*
2819  * ql_read_nvram
2820  *	Get NVRAM contents.
2821  *
2822  * Input:
2823  *	ha:	adapter state pointer.
2824  *	cmd:	EXT_IOCTL cmd struct pointer.
2825  *	mode:	flags.
2826  *
2827  * Returns:
2828  *	None, request status indicated in cmd->Status.
2829  *
2830  * Context:
2831  *	Kernel context.
2832  */
2833 static void
2834 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2835 {
2836 
2837 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2838 
2839 	if (cmd->ResponseLen < ha->nvram_cache->size) {
2840 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2841 		cmd->DetailStatus = ha->nvram_cache->size;
2842 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2843 		    cmd->ResponseLen);
2844 		cmd->ResponseLen = 0;
2845 		return;
2846 	}
2847 
2848 	/* Get NVRAM data. */
2849 	if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2850 	    mode) != 0) {
2851 		cmd->Status = EXT_STATUS_COPY_ERR;
2852 		cmd->ResponseLen = 0;
2853 		EL(ha, "failed, copy error\n");
2854 	} else {
2855 		cmd->ResponseLen = ha->nvram_cache->size;
2856 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2857 	}
2858 }
2859 
2860 /*
2861  * ql_write_nvram
2862  *	Loads NVRAM contents.
2863  *
2864  * Input:
2865  *	ha:	adapter state pointer.
2866  *	cmd:	EXT_IOCTL cmd struct pointer.
2867  *	mode:	flags.
2868  *
2869  * Returns:
2870  *	None, request status indicated in cmd->Status.
2871  *
2872  * Context:
2873  *	Kernel context.
2874  */
2875 static void
2876 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2877 {
2878 
2879 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2880 
2881 	if (cmd->RequestLen < ha->nvram_cache->size) {
2882 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2883 		cmd->DetailStatus = ha->nvram_cache->size;
2884 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2885 		    cmd->RequestLen);
2886 		return;
2887 	}
2888 
2889 	/* Load NVRAM data. */
2890 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2891 	    mode) != 0) {
2892 		cmd->Status = EXT_STATUS_COPY_ERR;
2893 		EL(ha, "failed, copy error\n");
2894 	} else {
2895 		/*EMPTY*/
2896 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2897 	}
2898 }
2899 
2900 /*
2901  * ql_write_vpd
2902  *	Loads VPD contents.
2903  *
2904  * Input:
2905  *	ha:	adapter state pointer.
2906  *	cmd:	EXT_IOCTL cmd struct pointer.
2907  *	mode:	flags.
2908  *
2909  * Returns:
2910  *	None, request status indicated in cmd->Status.
2911  *
2912  * Context:
2913  *	Kernel context.
2914  */
2915 static void
2916 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2917 {
2918 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2919 
2920 	int32_t		rval = 0;
2921 
2922 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2923 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2924 		EL(ha, "failed, invalid request for HBA\n");
2925 		return;
2926 	}
2927 
2928 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2929 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2930 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2931 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2932 		    cmd->RequestLen);
2933 		return;
2934 	}
2935 
2936 	/* Load VPD data. */
2937 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2938 	    mode)) != 0) {
2939 		cmd->Status = EXT_STATUS_COPY_ERR;
2940 		cmd->DetailStatus = rval;
2941 		EL(ha, "failed, errno=%x\n", rval);
2942 	} else {
2943 		/*EMPTY*/
2944 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2945 	}
2946 }
2947 
2948 /*
2949  * ql_read_vpd
2950  *	Dumps VPD contents.
2951  *
2952  * Input:
2953  *	ha:	adapter state pointer.
2954  *	cmd:	EXT_IOCTL cmd struct pointer.
2955  *	mode:	flags.
2956  *
2957  * Returns:
2958  *	None, request status indicated in cmd->Status.
2959  *
2960  * Context:
2961  *	Kernel context.
2962  */
2963 static void
2964 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2965 {
2966 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2967 
2968 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2969 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2970 		EL(ha, "failed, invalid request for HBA\n");
2971 		return;
2972 	}
2973 
2974 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2975 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2976 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2977 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2978 		    cmd->ResponseLen);
2979 		return;
2980 	}
2981 
2982 	/* Dump VPD data. */
2983 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2984 	    mode)) != 0) {
2985 		cmd->Status = EXT_STATUS_COPY_ERR;
2986 		EL(ha, "failed,\n");
2987 	} else {
2988 		/*EMPTY*/
2989 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2990 	}
2991 }
2992 
2993 /*
2994  * ql_get_fcache
2995  *	Dumps flash cache contents.
2996  *
2997  * Input:
2998  *	ha:	adapter state pointer.
2999  *	cmd:	EXT_IOCTL cmd struct pointer.
3000  *	mode:	flags.
3001  *
3002  * Returns:
3003  *	None, request status indicated in cmd->Status.
3004  *
3005  * Context:
3006  *	Kernel context.
3007  */
3008 static void
3009 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3010 {
3011 	uint32_t	bsize, boff, types, cpsize, hsize;
3012 	ql_fcache_t	*fptr;
3013 
3014 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3015 
3016 	CACHE_LOCK(ha);
3017 
3018 	if (ha->fcache == NULL) {
3019 		CACHE_UNLOCK(ha);
3020 		cmd->Status = EXT_STATUS_ERR;
3021 		EL(ha, "failed, adapter fcache not setup\n");
3022 		return;
3023 	}
3024 
3025 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
3026 		bsize = 100;
3027 	} else {
3028 		bsize = 400;
3029 	}
3030 
3031 	if (cmd->ResponseLen < bsize) {
3032 		CACHE_UNLOCK(ha);
3033 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3034 		cmd->DetailStatus = bsize;
3035 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3036 		    bsize, cmd->ResponseLen);
3037 		return;
3038 	}
3039 
3040 	boff = 0;
3041 	bsize = 0;
3042 	fptr = ha->fcache;
3043 
3044 	/*
3045 	 * For backwards compatibility, get one of each image type
3046 	 */
3047 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3048 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3049 		/* Get the next image */
3050 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3051 
3052 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3053 
3054 			if (ddi_copyout(fptr->buf,
3055 			    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3056 			    cpsize, mode) != 0) {
3057 				CACHE_UNLOCK(ha);
3058 				EL(ha, "ddicopy failed, done\n");
3059 				cmd->Status = EXT_STATUS_COPY_ERR;
3060 				cmd->DetailStatus = 0;
3061 				return;
3062 			}
3063 			boff += 100;
3064 			bsize += cpsize;
3065 			types &= ~(fptr->type);
3066 		}
3067 	}
3068 
3069 	/*
3070 	 * Get the firmware image -- it needs to be last in the
3071 	 * buffer at offset 300 for backwards compatibility. Also for
3072 	 * backwards compatibility, the pci header is stripped off.
3073 	 */
3074 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3075 
3076 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3077 		if (hsize > fptr->buflen) {
3078 			CACHE_UNLOCK(ha);
3079 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3080 			    hsize, fptr->buflen);
3081 			cmd->Status = EXT_STATUS_COPY_ERR;
3082 			cmd->DetailStatus = 0;
3083 			return;
3084 		}
3085 
3086 		cpsize = ((fptr->buflen - hsize) < 100 ?
3087 		    fptr->buflen - hsize : 100);
3088 
3089 		if (ddi_copyout(fptr->buf+hsize,
3090 		    (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3091 		    cpsize, mode) != 0) {
3092 			CACHE_UNLOCK(ha);
3093 			EL(ha, "fw ddicopy failed, done\n");
3094 			cmd->Status = EXT_STATUS_COPY_ERR;
3095 			cmd->DetailStatus = 0;
3096 			return;
3097 		}
3098 		bsize += 100;
3099 	}
3100 
3101 	CACHE_UNLOCK(ha);
3102 	cmd->Status = EXT_STATUS_OK;
3103 	cmd->DetailStatus = bsize;
3104 
3105 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3106 }
3107 
3108 /*
3109  * ql_get_fcache_ex
3110  *	Dumps flash cache contents.
3111  *
3112  * Input:
3113  *	ha:	adapter state pointer.
3114  *	cmd:	EXT_IOCTL cmd struct pointer.
3115  *	mode:	flags.
3116  *
3117  * Returns:
3118  *	None, request status indicated in cmd->Status.
3119  *
3120  * Context:
3121  *	Kernel context.
3122  */
3123 static void
3124 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3125 {
3126 	uint32_t	bsize = 0;
3127 	uint32_t	boff = 0;
3128 	ql_fcache_t	*fptr;
3129 
3130 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3131 
3132 	CACHE_LOCK(ha);
3133 	if (ha->fcache == NULL) {
3134 		CACHE_UNLOCK(ha);
3135 		cmd->Status = EXT_STATUS_ERR;
3136 		EL(ha, "failed, adapter fcache not setup\n");
3137 		return;
3138 	}
3139 
3140 	/* Make sure user passed enough buffer space */
3141 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3142 		bsize += FBUFSIZE;
3143 	}
3144 
3145 	if (cmd->ResponseLen < bsize) {
3146 		CACHE_UNLOCK(ha);
3147 		if (cmd->ResponseLen != 0) {
3148 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3149 			    bsize, cmd->ResponseLen);
3150 		}
3151 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3152 		cmd->DetailStatus = bsize;
3153 		return;
3154 	}
3155 
3156 	boff = 0;
3157 	fptr = ha->fcache;
3158 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3159 		/* Get the next image */
3160 		if (ddi_copyout(fptr->buf,
3161 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3162 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3163 		    mode) != 0) {
3164 			CACHE_UNLOCK(ha);
3165 			EL(ha, "failed, ddicopy at %xh, done\n", boff);
3166 			cmd->Status = EXT_STATUS_COPY_ERR;
3167 			cmd->DetailStatus = 0;
3168 			return;
3169 		}
3170 		boff += FBUFSIZE;
3171 		fptr = fptr->next;
3172 	}
3173 
3174 	CACHE_UNLOCK(ha);
3175 	cmd->Status = EXT_STATUS_OK;
3176 	cmd->DetailStatus = bsize;
3177 
3178 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3179 }
3180 
3181 /*
3182  * ql_read_flash
3183  *	Get flash contents.
3184  *
3185  * Input:
3186  *	ha:	adapter state pointer.
3187  *	cmd:	EXT_IOCTL cmd struct pointer.
3188  *	mode:	flags.
3189  *
3190  * Returns:
3191  *	None, request status indicated in cmd->Status.
3192  *
3193  * Context:
3194  *	Kernel context.
3195  */
3196 static void
3197 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3198 {
3199 	ql_xioctl_t	*xp = ha->xioctl;
3200 
3201 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3202 
3203 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3204 		EL(ha, "ql_stall_driver failed\n");
3205 		cmd->Status = EXT_STATUS_BUSY;
3206 		cmd->DetailStatus = xp->fdesc.flash_size;
3207 		cmd->ResponseLen = 0;
3208 		return;
3209 	}
3210 
3211 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3212 		cmd->Status = EXT_STATUS_ERR;
3213 		cmd->DetailStatus = xp->fdesc.flash_size;
3214 		EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3215 		    cmd->ResponseLen, xp->fdesc.flash_size);
3216 		cmd->ResponseLen = 0;
3217 	} else {
3218 		/* adjust read size to flash size */
3219 		if (cmd->ResponseLen > xp->fdesc.flash_size) {
3220 			EL(ha, "adjusting req=%xh, max=%xh\n",
3221 			    cmd->ResponseLen, xp->fdesc.flash_size);
3222 			cmd->ResponseLen = xp->fdesc.flash_size;
3223 		}
3224 
3225 		/* Get flash data. */
3226 		if (ql_flash_fcode_dump(ha,
3227 		    (void *)(uintptr_t)(cmd->ResponseAdr),
3228 		    (size_t)(cmd->ResponseLen), 0, mode) != 0) {
3229 			cmd->Status = EXT_STATUS_COPY_ERR;
3230 			cmd->ResponseLen = 0;
3231 			EL(ha, "failed,\n");
3232 		}
3233 	}
3234 
3235 	/* Resume I/O */
3236 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
3237 		ql_restart_driver(ha);
3238 	} else {
3239 		EL(ha, "isp_abort_needed for restart\n");
3240 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3241 		    DRIVER_STALL);
3242 	}
3243 
3244 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3245 }
3246 
3247 /*
3248  * ql_write_flash
3249  *	Loads flash contents.
3250  *
3251  * Input:
3252  *	ha:	adapter state pointer.
3253  *	cmd:	EXT_IOCTL cmd struct pointer.
3254  *	mode:	flags.
3255  *
3256  * Returns:
3257  *	None, request status indicated in cmd->Status.
3258  *
3259  * Context:
3260  *	Kernel context.
3261  */
3262 static void
3263 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3264 {
3265 	ql_xioctl_t	*xp = ha->xioctl;
3266 
3267 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3268 
3269 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3270 		EL(ha, "ql_stall_driver failed\n");
3271 		cmd->Status = EXT_STATUS_BUSY;
3272 		cmd->DetailStatus = xp->fdesc.flash_size;
3273 		cmd->ResponseLen = 0;
3274 		return;
3275 	}
3276 
3277 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3278 		cmd->Status = EXT_STATUS_ERR;
3279 		cmd->DetailStatus = xp->fdesc.flash_size;
3280 		EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3281 		    cmd->RequestLen, xp->fdesc.flash_size);
3282 		cmd->ResponseLen = 0;
3283 	} else {
3284 		/* Load flash data. */
3285 		if (cmd->RequestLen > xp->fdesc.flash_size) {
3286 			cmd->Status = EXT_STATUS_ERR;
3287 			cmd->DetailStatus =  xp->fdesc.flash_size;
3288 			EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3289 			    cmd->RequestLen, xp->fdesc.flash_size);
3290 		} else if (ql_flash_fcode_load(ha,
3291 		    (void *)(uintptr_t)(cmd->RequestAdr),
3292 		    (size_t)(cmd->RequestLen), mode) != 0) {
3293 			cmd->Status = EXT_STATUS_COPY_ERR;
3294 			EL(ha, "failed,\n");
3295 		}
3296 	}
3297 
3298 	/* Resume I/O */
3299 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
3300 		ql_restart_driver(ha);
3301 	} else {
3302 		EL(ha, "isp_abort_needed for restart\n");
3303 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3304 		    DRIVER_STALL);
3305 	}
3306 
3307 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3308 }
3309 
3310 /*
3311  * ql_diagnostic_loopback
3312  *	Performs EXT_CC_LOOPBACK Command
3313  *
3314  * Input:
3315  *	ha:	adapter state pointer.
3316  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3317  *	mode:	flags.
3318  *
3319  * Returns:
3320  *	None, request status indicated in cmd->Status.
3321  *
3322  * Context:
3323  *	Kernel context.
3324  */
3325 static void
3326 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3327 {
3328 	EXT_LOOPBACK_REQ	plbreq;
3329 	EXT_LOOPBACK_RSP	plbrsp;
3330 	ql_mbx_data_t		mr;
3331 	uint32_t		rval;
3332 	caddr_t			bp;
3333 	uint16_t		opt;
3334 
3335 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3336 
3337 	/* Get loop back request. */
3338 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
3339 	    (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3340 		EL(ha, "failed, ddi_copyin\n");
3341 		cmd->Status = EXT_STATUS_COPY_ERR;
3342 		cmd->ResponseLen = 0;
3343 		return;
3344 	}
3345 
3346 	opt = (uint16_t)(plbreq.Options & MBC_LOOPBACK_POINT_MASK);
3347 
3348 	/* Check transfer length fits in buffer. */
3349 	if (plbreq.BufferLength < plbreq.TransferCount &&
3350 	    plbreq.TransferCount < MAILBOX_BUFFER_SIZE) {
3351 		EL(ha, "failed, BufferLength=%d, xfercnt=%d, "
3352 		    "mailbox_buffer_size=%d\n", plbreq.BufferLength,
3353 		    plbreq.TransferCount, MAILBOX_BUFFER_SIZE);
3354 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3355 		cmd->ResponseLen = 0;
3356 		return;
3357 	}
3358 
3359 	/* Allocate command memory. */
3360 	bp = kmem_zalloc(plbreq.TransferCount, KM_SLEEP);
3361 	if (bp == NULL) {
3362 		EL(ha, "failed, kmem_zalloc\n");
3363 		cmd->Status = EXT_STATUS_NO_MEMORY;
3364 		cmd->ResponseLen = 0;
3365 		return;
3366 	}
3367 
3368 	/* Get loopback data. */
3369 	if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3370 	    bp, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3371 		EL(ha, "failed, ddi_copyin-2\n");
3372 		kmem_free(bp, plbreq.TransferCount);
3373 		cmd->Status = EXT_STATUS_COPY_ERR;
3374 		cmd->ResponseLen = 0;
3375 		return;
3376 	}
3377 
3378 	if ((ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) ||
3379 	    ql_stall_driver(ha, 0) != QL_SUCCESS) {
3380 		EL(ha, "failed, LOOP_NOT_READY\n");
3381 		kmem_free(bp, plbreq.TransferCount);
3382 		cmd->Status = EXT_STATUS_BUSY;
3383 		cmd->ResponseLen = 0;
3384 		return;
3385 	}
3386 
3387 	/* Shutdown IP. */
3388 	if (ha->flags & IP_INITIALIZED) {
3389 		(void) ql_shutdown_ip(ha);
3390 	}
3391 
3392 	/* determine topology so we can send the loopback or the echo */
3393 	/* Echo is supported on 2300's only and above */
3394 
3395 	if (CFG_IST(ha, CFG_CTRL_8081)) {
3396 		if (!(ha->task_daemon_flags & LOOP_DOWN) && opt ==
3397 		    MBC_LOOPBACK_POINT_EXTERNAL) {
3398 			if (plbreq.TransferCount > 252) {
3399 				EL(ha, "transfer count (%d) > 252\n",
3400 				    plbreq.TransferCount);
3401 				kmem_free(bp, plbreq.TransferCount);
3402 				cmd->Status = EXT_STATUS_INVALID_PARAM;
3403 				cmd->ResponseLen = 0;
3404 				return;
3405 			}
3406 			plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3407 			rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount,
3408 			    MBC_ECHO_ELS, &mr);
3409 		} else {
3410 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
3411 				(void) ql_set_loop_point(ha, opt);
3412 			}
3413 			plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3414 			rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount,
3415 			    opt, plbreq.IterationCount, &mr);
3416 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
3417 				(void) ql_set_loop_point(ha, 0);
3418 			}
3419 		}
3420 	} else {
3421 		if (!(ha->task_daemon_flags & LOOP_DOWN) &&
3422 		    (ha->topology & QL_F_PORT) &&
3423 		    ha->device_id >= 0x2300) {
3424 			QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using "
3425 			    "echo\n", ha->instance);
3426 			plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3427 			rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount,
3428 			    (uint16_t)(CFG_IST(ha, CFG_CTRL_8081) ?
3429 			    MBC_ECHO_ELS : MBC_ECHO_64BIT), &mr);
3430 		} else {
3431 			plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3432 			rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount,
3433 			    opt, plbreq.IterationCount, &mr);
3434 		}
3435 	}
3436 
3437 	ql_restart_driver(ha);
3438 
3439 	/* Restart IP if it was shutdown. */
3440 	if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3441 		(void) ql_initialize_ip(ha);
3442 		ql_isp_rcvbuf(ha);
3443 	}
3444 
3445 	if (rval != QL_SUCCESS) {
3446 		EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3447 		kmem_free(bp, plbreq.TransferCount);
3448 		cmd->Status = EXT_STATUS_MAILBOX;
3449 		cmd->DetailStatus = rval;
3450 		cmd->ResponseLen = 0;
3451 		return;
3452 	}
3453 
3454 	/* Return loopback data. */
3455 	if (ql_send_buffer_data(bp, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3456 	    plbreq.TransferCount, mode) != plbreq.TransferCount) {
3457 		EL(ha, "failed, ddi_copyout\n");
3458 		kmem_free(bp, plbreq.TransferCount);
3459 		cmd->Status = EXT_STATUS_COPY_ERR;
3460 		cmd->ResponseLen = 0;
3461 		return;
3462 	}
3463 	kmem_free(bp, plbreq.TransferCount);
3464 
3465 	/* Return loopback results. */
3466 	plbrsp.BufferAddress = plbreq.BufferAddress;
3467 	plbrsp.BufferLength = plbreq.TransferCount;
3468 	plbrsp.CompletionStatus = mr.mb[0];
3469 
3470 	if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3471 		plbrsp.CrcErrorCount = 0;
3472 		plbrsp.DisparityErrorCount = 0;
3473 		plbrsp.FrameLengthErrorCount = 0;
3474 		plbrsp.IterationCountLastError = 0;
3475 	} else {
3476 		plbrsp.CrcErrorCount = mr.mb[1];
3477 		plbrsp.DisparityErrorCount = mr.mb[2];
3478 		plbrsp.FrameLengthErrorCount = mr.mb[3];
3479 		plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18];
3480 	}
3481 
3482 	rval = ddi_copyout((void *)&plbrsp,
3483 	    (void *)(uintptr_t)cmd->ResponseAdr,
3484 	    sizeof (EXT_LOOPBACK_RSP), mode);
3485 	if (rval != 0) {
3486 		EL(ha, "failed, ddi_copyout-2\n");
3487 		cmd->Status = EXT_STATUS_COPY_ERR;
3488 		cmd->ResponseLen = 0;
3489 		return;
3490 	}
3491 	cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3492 
3493 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3494 }
3495 
3496 /*
3497  * ql_set_loop_point
3498  *	Setup loop point for port configuration.
3499  *
3500  * Input:
3501  *	ha:	adapter state structure.
3502  *	opt:	loop point option.
3503  *
3504  * Returns:
3505  *	ql local function return status code.
3506  *
3507  * Context:
3508  *	Kernel context.
3509  */
3510 static int
3511 ql_set_loop_point(ql_adapter_state_t *ha, uint16_t opt)
3512 {
3513 	ql_mbx_data_t	mr;
3514 	int		rval;
3515 	uint32_t	timer;
3516 
3517 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3518 
3519 	/*
3520 	 * We get the current port config, modify the loopback field and
3521 	 * write it back out.
3522 	 */
3523 	if ((rval = ql_get_port_config(ha, &mr)) != QL_SUCCESS) {
3524 		EL(ha, "get_port_config status=%xh\n", rval);
3525 		return (rval);
3526 	}
3527 	/*
3528 	 * Set the loopback mode field while maintaining the others.
3529 	 * Currently only internal or none are supported.
3530 	 */
3531 	mr.mb[1] = (uint16_t)(mr.mb[1] &~LOOPBACK_MODE_FIELD_MASK);
3532 	if (opt == MBC_LOOPBACK_POINT_INTERNAL) {
3533 		mr.mb[1] = (uint16_t)(mr.mb[1] |
3534 		    LOOPBACK_MODE(LOOPBACK_MODE_INTERNAL));
3535 	}
3536 	/*
3537 	 * Changing the port configuration will cause the port state to cycle
3538 	 * down and back up. The indication that this has happened is that
3539 	 * the point to point flag gets set.
3540 	 */
3541 	ADAPTER_STATE_LOCK(ha);
3542 	ha->flags &= ~POINT_TO_POINT;
3543 	ADAPTER_STATE_UNLOCK(ha);
3544 	if ((rval = ql_set_port_config(ha, &mr)) != QL_SUCCESS) {
3545 		EL(ha, "set_port_config status=%xh\n", rval);
3546 	}
3547 
3548 	/* wait for a while */
3549 	for (timer = opt ? 10 : 0; timer; timer--) {
3550 		if (ha->flags & POINT_TO_POINT) {
3551 			break;
3552 		}
3553 		/* Delay for 1000000 usec (1 second). */
3554 		ql_delay(ha, 1000000);
3555 	}
3556 
3557 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3558 
3559 	return (rval);
3560 }
3561 
3562 /*
3563  * ql_send_els_rnid
3564  *	IOCTL for extended link service RNID command.
3565  *
3566  * Input:
3567  *	ha:	adapter state pointer.
3568  *	cmd:	User space CT arguments pointer.
3569  *	mode:	flags.
3570  *
3571  * Returns:
3572  *	None, request status indicated in cmd->Status.
3573  *
3574  * Context:
3575  *	Kernel context.
3576  */
3577 static void
3578 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3579 {
3580 	EXT_RNID_REQ	tmp_rnid;
3581 	port_id_t	tmp_fcid;
3582 	caddr_t		tmp_buf, bptr;
3583 	uint32_t	copy_len;
3584 	ql_tgt_t	*tq;
3585 	EXT_RNID_DATA	rnid_data;
3586 	uint32_t	loop_ready_wait = 10 * 60 * 10;
3587 	int		rval = 0;
3588 	uint32_t	local_hba = 0;
3589 
3590 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3591 
3592 	if (DRIVER_SUSPENDED(ha)) {
3593 		EL(ha, "failed, LOOP_NOT_READY\n");
3594 		cmd->Status = EXT_STATUS_BUSY;
3595 		cmd->ResponseLen = 0;
3596 		return;
3597 	}
3598 
3599 	if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3600 		/* parameter error */
3601 		EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3602 		    cmd->RequestLen);
3603 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3604 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3605 		cmd->ResponseLen = 0;
3606 		return;
3607 	}
3608 
3609 	if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
3610 	    &tmp_rnid, cmd->RequestLen, mode) != 0) {
3611 		EL(ha, "failed, ddi_copyin\n");
3612 		cmd->Status = EXT_STATUS_COPY_ERR;
3613 		cmd->ResponseLen = 0;
3614 		return;
3615 	}
3616 
3617 	/* Find loop ID of the device */
3618 	if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3619 		bptr = CFG_IST(ha, CFG_CTRL_24258081) ?
3620 		    (caddr_t)&ha->init_ctrl_blk.cb24.node_name :
3621 		    (caddr_t)&ha->init_ctrl_blk.cb.node_name;
3622 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3623 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3624 			local_hba = 1;
3625 		} else {
3626 			tq = ql_find_port(ha,
3627 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3628 		}
3629 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3630 		bptr = CFG_IST(ha, CFG_CTRL_24258081) ?
3631 		    (caddr_t)&ha->init_ctrl_blk.cb24.port_name :
3632 		    (caddr_t)&ha->init_ctrl_blk.cb.port_name;
3633 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3634 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3635 			local_hba = 1;
3636 		} else {
3637 			tq = ql_find_port(ha,
3638 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3639 		}
3640 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3641 		/*
3642 		 * Copy caller's d_id to tmp space.
3643 		 */
3644 		bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3645 		    EXT_DEF_PORTID_SIZE_ACTUAL);
3646 		BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3647 
3648 		if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
3649 		    EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3650 			local_hba = 1;
3651 		} else {
3652 			tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3653 			    QLNT_PID);
3654 		}
3655 	}
3656 
3657 	/* Allocate memory for command. */
3658 	tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP);
3659 	if (tmp_buf == NULL) {
3660 		EL(ha, "failed, kmem_zalloc\n");
3661 		cmd->Status = EXT_STATUS_NO_MEMORY;
3662 		cmd->ResponseLen = 0;
3663 		return;
3664 	}
3665 
3666 	if (local_hba) {
3667 		rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3668 		if (rval != QL_SUCCESS) {
3669 			EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3670 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3671 			cmd->Status = EXT_STATUS_ERR;
3672 			cmd->ResponseLen = 0;
3673 			return;
3674 		}
3675 
3676 		/* Save gotten RNID data. */
3677 		bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3678 
3679 		/* Now build the Send RNID response */
3680 		tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3681 		tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3682 		tmp_buf[2] = 0;
3683 		tmp_buf[3] = sizeof (EXT_RNID_DATA);
3684 
3685 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
3686 			bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4],
3687 			    EXT_DEF_WWN_NAME_SIZE);
3688 			bcopy(ha->init_ctrl_blk.cb24.node_name,
3689 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3690 			    EXT_DEF_WWN_NAME_SIZE);
3691 		} else {
3692 			bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4],
3693 			    EXT_DEF_WWN_NAME_SIZE);
3694 			bcopy(ha->init_ctrl_blk.cb.node_name,
3695 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3696 			    EXT_DEF_WWN_NAME_SIZE);
3697 		}
3698 
3699 		bcopy((uint8_t *)&rnid_data,
3700 		    &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3701 		    sizeof (EXT_RNID_DATA));
3702 	} else {
3703 		if (tq == NULL) {
3704 			/* no matching device */
3705 			EL(ha, "failed, device not found\n");
3706 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3707 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3708 			cmd->DetailStatus = EXT_DSTATUS_TARGET;
3709 			cmd->ResponseLen = 0;
3710 			return;
3711 		}
3712 
3713 		/* Send command */
3714 		rval = ql_send_rnid_els(ha, tq->loop_id,
3715 		    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3716 		if (rval != QL_SUCCESS) {
3717 			EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3718 			    rval, tq->loop_id);
3719 			while (LOOP_NOT_READY(ha)) {
3720 				ql_delay(ha, 100000);
3721 				if (loop_ready_wait-- == 0) {
3722 					EL(ha, "failed, loop not ready\n");
3723 					cmd->Status = EXT_STATUS_ERR;
3724 					cmd->ResponseLen = 0;
3725 				}
3726 			}
3727 			rval = ql_send_rnid_els(ha, tq->loop_id,
3728 			    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE,
3729 			    tmp_buf);
3730 			if (rval != QL_SUCCESS) {
3731 				/* error */
3732 				EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3733 				    rval, tq->loop_id);
3734 				kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3735 				cmd->Status = EXT_STATUS_ERR;
3736 				cmd->ResponseLen = 0;
3737 				return;
3738 			}
3739 		}
3740 	}
3741 
3742 	/* Copy the response */
3743 	copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3744 	    SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3745 
3746 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3747 	    copy_len, mode) != copy_len) {
3748 		cmd->Status = EXT_STATUS_COPY_ERR;
3749 		EL(ha, "failed, ddi_copyout\n");
3750 	} else {
3751 		cmd->ResponseLen = copy_len;
3752 		if (copy_len < SEND_RNID_RSP_SIZE) {
3753 			cmd->Status = EXT_STATUS_DATA_OVERRUN;
3754 			EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3755 
3756 		} else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3757 			cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3758 			EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3759 		} else {
3760 			cmd->Status = EXT_STATUS_OK;
3761 			QL_PRINT_9(CE_CONT, "(%d): done\n",
3762 			    ha->instance);
3763 		}
3764 	}
3765 
3766 	kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3767 }
3768 
3769 /*
3770  * ql_set_host_data
3771  *	Process IOCTL subcommand to set host/adapter related data.
3772  *
3773  * Input:
3774  *	ha:	adapter state pointer.
3775  *	cmd:	User space CT arguments pointer.
3776  *	mode:	flags.
3777  *
3778  * Returns:
3779  *	None, request status indicated in cmd->Status.
3780  *
3781  * Context:
3782  *	Kernel context.
3783  */
3784 static void
3785 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3786 {
3787 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3788 	    cmd->SubCode);
3789 
3790 	/*
3791 	 * case off on command subcode
3792 	 */
3793 	switch (cmd->SubCode) {
3794 	case EXT_SC_SET_RNID:
3795 		ql_set_rnid_parameters(ha, cmd, mode);
3796 		break;
3797 	case EXT_SC_RST_STATISTICS:
3798 		(void) ql_reset_statistics(ha, cmd);
3799 		break;
3800 	case EXT_SC_SET_BEACON_STATE:
3801 		ql_set_led_state(ha, cmd, mode);
3802 		break;
3803 	case EXT_SC_SET_PARMS:
3804 	case EXT_SC_SET_BUS_MODE:
3805 	case EXT_SC_SET_DR_DUMP_BUF:
3806 	case EXT_SC_SET_RISC_CODE:
3807 	case EXT_SC_SET_FLASH_RAM:
3808 	case EXT_SC_SET_LUN_BITMASK:
3809 	case EXT_SC_SET_RETRY_CNT:
3810 	case EXT_SC_SET_RTIN:
3811 	case EXT_SC_SET_FC_LUN_BITMASK:
3812 	case EXT_SC_ADD_TARGET_DEVICE:
3813 	case EXT_SC_SWAP_TARGET_DEVICE:
3814 	case EXT_SC_SET_SEL_TIMEOUT:
3815 	default:
3816 		/* function not supported. */
3817 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3818 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3819 		break;
3820 	}
3821 
3822 	if (cmd->Status != EXT_STATUS_OK) {
3823 		EL(ha, "failed, Status=%d\n", cmd->Status);
3824 	} else {
3825 		/*EMPTY*/
3826 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3827 	}
3828 }
3829 
3830 /*
3831  * ql_get_host_data
3832  *	Performs EXT_CC_GET_DATA subcommands.
3833  *
3834  * Input:
3835  *	ha:	adapter state pointer.
3836  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3837  *	mode:	flags.
3838  *
3839  * Returns:
3840  *	None, request status indicated in cmd->Status.
3841  *
3842  * Context:
3843  *	Kernel context.
3844  */
3845 static void
3846 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3847 {
3848 	int	out_size = 0;
3849 
3850 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3851 	    cmd->SubCode);
3852 
3853 	/* case off on command subcode */
3854 	switch (cmd->SubCode) {
3855 	case EXT_SC_GET_STATISTICS:
3856 		out_size = sizeof (EXT_HBA_PORT_STAT);
3857 		break;
3858 	case EXT_SC_GET_FC_STATISTICS:
3859 		out_size = sizeof (EXT_HBA_PORT_STAT);
3860 		break;
3861 	case EXT_SC_GET_PORT_SUMMARY:
3862 		out_size = sizeof (EXT_DEVICEDATA);
3863 		break;
3864 	case EXT_SC_GET_RNID:
3865 		out_size = sizeof (EXT_RNID_DATA);
3866 		break;
3867 	case EXT_SC_GET_TARGET_ID:
3868 		out_size = sizeof (EXT_DEST_ADDR);
3869 		break;
3870 	case EXT_SC_GET_BEACON_STATE:
3871 		out_size = sizeof (EXT_BEACON_CONTROL);
3872 		break;
3873 	case EXT_SC_GET_FC4_STATISTICS:
3874 		out_size = sizeof (EXT_HBA_FC4STATISTICS);
3875 		break;
3876 	case EXT_SC_GET_DCBX_PARAM:
3877 		out_size = EXT_DEF_DCBX_PARAM_BUF_SIZE;
3878 		break;
3879 	case EXT_SC_GET_RESOURCE_CNTS:
3880 		out_size = sizeof (EXT_RESOURCE_CNTS);
3881 		break;
3882 	case EXT_SC_GET_FCF_LIST:
3883 		out_size = sizeof (EXT_FCF_LIST);
3884 		break;
3885 	case EXT_SC_GET_SCSI_ADDR:
3886 	case EXT_SC_GET_ERR_DETECTIONS:
3887 	case EXT_SC_GET_BUS_MODE:
3888 	case EXT_SC_GET_DR_DUMP_BUF:
3889 	case EXT_SC_GET_RISC_CODE:
3890 	case EXT_SC_GET_FLASH_RAM:
3891 	case EXT_SC_GET_LINK_STATUS:
3892 	case EXT_SC_GET_LOOP_ID:
3893 	case EXT_SC_GET_LUN_BITMASK:
3894 	case EXT_SC_GET_PORT_DATABASE:
3895 	case EXT_SC_GET_PORT_DATABASE_MEM:
3896 	case EXT_SC_GET_POSITION_MAP:
3897 	case EXT_SC_GET_RETRY_CNT:
3898 	case EXT_SC_GET_RTIN:
3899 	case EXT_SC_GET_FC_LUN_BITMASK:
3900 	case EXT_SC_GET_SEL_TIMEOUT:
3901 	default:
3902 		/* function not supported. */
3903 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3904 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3905 		cmd->ResponseLen = 0;
3906 		return;
3907 	}
3908 
3909 	if (cmd->ResponseLen < out_size) {
3910 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3911 		cmd->DetailStatus = out_size;
3912 		EL(ha, "failed, ResponseLen=%xh, size=%xh\n",
3913 		    cmd->ResponseLen, out_size);
3914 		cmd->ResponseLen = 0;
3915 		return;
3916 	}
3917 
3918 	switch (cmd->SubCode) {
3919 	case EXT_SC_GET_RNID:
3920 		ql_get_rnid_parameters(ha, cmd, mode);
3921 		break;
3922 	case EXT_SC_GET_STATISTICS:
3923 		ql_get_statistics(ha, cmd, mode);
3924 		break;
3925 	case EXT_SC_GET_FC_STATISTICS:
3926 		ql_get_statistics_fc(ha, cmd, mode);
3927 		break;
3928 	case EXT_SC_GET_FC4_STATISTICS:
3929 		ql_get_statistics_fc4(ha, cmd, mode);
3930 		break;
3931 	case EXT_SC_GET_PORT_SUMMARY:
3932 		ql_get_port_summary(ha, cmd, mode);
3933 		break;
3934 	case EXT_SC_GET_TARGET_ID:
3935 		ql_get_target_id(ha, cmd, mode);
3936 		break;
3937 	case EXT_SC_GET_BEACON_STATE:
3938 		ql_get_led_state(ha, cmd, mode);
3939 		break;
3940 	case EXT_SC_GET_DCBX_PARAM:
3941 		ql_get_dcbx_parameters(ha, cmd, mode);
3942 		break;
3943 	case EXT_SC_GET_FCF_LIST:
3944 		ql_get_fcf_list(ha, cmd, mode);
3945 		break;
3946 	case EXT_SC_GET_RESOURCE_CNTS:
3947 		ql_get_resource_counts(ha, cmd, mode);
3948 		break;
3949 	}
3950 
3951 	if (cmd->Status != EXT_STATUS_OK) {
3952 		EL(ha, "failed, Status=%d\n", cmd->Status);
3953 	} else {
3954 		/*EMPTY*/
3955 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3956 	}
3957 }
3958 
3959 /* ******************************************************************** */
3960 /*			Helper Functions				*/
3961 /* ******************************************************************** */
3962 
3963 /*
3964  * ql_lun_count
3965  *	Get numbers of LUNS on target.
3966  *
3967  * Input:
3968  *	ha:	adapter state pointer.
3969  *	q:	device queue pointer.
3970  *
3971  * Returns:
3972  *	Number of LUNs.
3973  *
3974  * Context:
3975  *	Kernel context.
3976  */
3977 static int
3978 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
3979 {
3980 	int	cnt;
3981 
3982 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3983 
3984 	/* Bypass LUNs that failed. */
3985 	cnt = ql_report_lun(ha, tq);
3986 	if (cnt == 0) {
3987 		cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
3988 	}
3989 
3990 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3991 
3992 	return (cnt);
3993 }
3994 
3995 /*
3996  * ql_report_lun
3997  *	Get numbers of LUNS using report LUN command.
3998  *
3999  * Input:
4000  *	ha:	adapter state pointer.
4001  *	q:	target queue pointer.
4002  *
4003  * Returns:
4004  *	Number of LUNs.
4005  *
4006  * Context:
4007  *	Kernel context.
4008  */
4009 static int
4010 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
4011 {
4012 	int			rval;
4013 	uint8_t			retries;
4014 	ql_mbx_iocb_t		*pkt;
4015 	ql_rpt_lun_lst_t	*rpt;
4016 	dma_mem_t		dma_mem;
4017 	uint32_t		pkt_size, cnt;
4018 	uint16_t		comp_status;
4019 	uint8_t			scsi_status_h, scsi_status_l, *reqs;
4020 
4021 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4022 
4023 	if (DRIVER_SUSPENDED(ha)) {
4024 		EL(ha, "failed, LOOP_NOT_READY\n");
4025 		return (0);
4026 	}
4027 
4028 	pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
4029 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4030 	if (pkt == NULL) {
4031 		EL(ha, "failed, kmem_zalloc\n");
4032 		return (0);
4033 	}
4034 	rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4035 
4036 	/* Get DMA memory for the IOCB */
4037 	if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
4038 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4039 		cmn_err(CE_WARN, "%s(%d): DMA memory "
4040 		    "alloc failed", QL_NAME, ha->instance);
4041 		kmem_free(pkt, pkt_size);
4042 		return (0);
4043 	}
4044 
4045 	for (retries = 0; retries < 4; retries++) {
4046 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4047 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4048 			pkt->cmd24.entry_count = 1;
4049 
4050 			/* Set N_port handle */
4051 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4052 
4053 			/* Set target ID */
4054 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4055 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4056 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4057 
4058 			/* Set Virtual Port ID */
4059 			pkt->cmd24.vp_index = ha->vp_index;
4060 
4061 			/* Set ISP command timeout. */
4062 			pkt->cmd24.timeout = LE_16(15);
4063 
4064 			/* Load SCSI CDB */
4065 			pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
4066 			pkt->cmd24.scsi_cdb[6] =
4067 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4068 			pkt->cmd24.scsi_cdb[7] =
4069 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4070 			pkt->cmd24.scsi_cdb[8] =
4071 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4072 			pkt->cmd24.scsi_cdb[9] =
4073 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4074 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4075 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4076 				    + cnt, 4);
4077 			}
4078 
4079 			/* Set tag queue control flags */
4080 			pkt->cmd24.task = TA_STAG;
4081 
4082 			/* Set transfer direction. */
4083 			pkt->cmd24.control_flags = CF_RD;
4084 
4085 			/* Set data segment count. */
4086 			pkt->cmd24.dseg_count = LE_16(1);
4087 
4088 			/* Load total byte count. */
4089 			/* Load data descriptor. */
4090 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
4091 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4092 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
4093 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4094 			pkt->cmd24.total_byte_count =
4095 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4096 			pkt->cmd24.dseg_0_length =
4097 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4098 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4099 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4100 			pkt->cmd3.entry_count = 1;
4101 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4102 				pkt->cmd3.target_l = LSB(tq->loop_id);
4103 				pkt->cmd3.target_h = MSB(tq->loop_id);
4104 			} else {
4105 				pkt->cmd3.target_h = LSB(tq->loop_id);
4106 			}
4107 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4108 			pkt->cmd3.timeout = LE_16(15);
4109 			pkt->cmd3.dseg_count = LE_16(1);
4110 			pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
4111 			pkt->cmd3.scsi_cdb[6] =
4112 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4113 			pkt->cmd3.scsi_cdb[7] =
4114 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4115 			pkt->cmd3.scsi_cdb[8] =
4116 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4117 			pkt->cmd3.scsi_cdb[9] =
4118 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4119 			pkt->cmd3.byte_count =
4120 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4121 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4122 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4123 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4124 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4125 			pkt->cmd3.dseg_0_length =
4126 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4127 		} else {
4128 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4129 			pkt->cmd.entry_count = 1;
4130 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4131 				pkt->cmd.target_l = LSB(tq->loop_id);
4132 				pkt->cmd.target_h = MSB(tq->loop_id);
4133 			} else {
4134 				pkt->cmd.target_h = LSB(tq->loop_id);
4135 			}
4136 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4137 			pkt->cmd.timeout = LE_16(15);
4138 			pkt->cmd.dseg_count = LE_16(1);
4139 			pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
4140 			pkt->cmd.scsi_cdb[6] =
4141 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4142 			pkt->cmd.scsi_cdb[7] =
4143 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4144 			pkt->cmd.scsi_cdb[8] =
4145 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4146 			pkt->cmd.scsi_cdb[9] =
4147 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4148 			pkt->cmd.byte_count =
4149 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4150 			pkt->cmd.dseg_0_address = (uint32_t)
4151 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4152 			pkt->cmd.dseg_0_length =
4153 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4154 		}
4155 
4156 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4157 		    sizeof (ql_mbx_iocb_t));
4158 
4159 		/* Sync in coming DMA buffer. */
4160 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4161 		    DDI_DMA_SYNC_FORKERNEL);
4162 		/* Copy in coming DMA data. */
4163 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4164 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4165 
4166 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4167 			pkt->sts24.entry_status = (uint8_t)
4168 			    (pkt->sts24.entry_status & 0x3c);
4169 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4170 			scsi_status_h = pkt->sts24.scsi_status_h;
4171 			scsi_status_l = pkt->sts24.scsi_status_l;
4172 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4173 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4174 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4175 		} else {
4176 			pkt->sts.entry_status = (uint8_t)
4177 			    (pkt->sts.entry_status & 0x7e);
4178 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4179 			scsi_status_h = pkt->sts.scsi_status_h;
4180 			scsi_status_l = pkt->sts.scsi_status_l;
4181 			reqs = &pkt->sts.req_sense_data[0];
4182 		}
4183 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4184 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4185 			    pkt->sts.entry_status, tq->d_id.b24);
4186 			rval = QL_FUNCTION_PARAMETER_ERROR;
4187 		}
4188 
4189 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4190 		    scsi_status_l & STATUS_CHECK) {
4191 			/* Device underrun, treat as OK. */
4192 			if (rval == QL_SUCCESS &&
4193 			    comp_status == CS_DATA_UNDERRUN &&
4194 			    scsi_status_h & FCP_RESID_UNDER) {
4195 				break;
4196 			}
4197 
4198 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4199 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4200 			    comp_status, scsi_status_h, scsi_status_l);
4201 
4202 			if (rval == QL_SUCCESS) {
4203 				if ((comp_status == CS_TIMEOUT) ||
4204 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4205 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4206 					rval = QL_FUNCTION_TIMEOUT;
4207 					break;
4208 				}
4209 				rval = QL_FUNCTION_FAILED;
4210 			} else if (rval == QL_ABORTED) {
4211 				break;
4212 			}
4213 
4214 			if (scsi_status_l & STATUS_CHECK) {
4215 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4216 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4217 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4218 				    reqs[1], reqs[2], reqs[3], reqs[4],
4219 				    reqs[5], reqs[6], reqs[7], reqs[8],
4220 				    reqs[9], reqs[10], reqs[11], reqs[12],
4221 				    reqs[13], reqs[14], reqs[15], reqs[16],
4222 				    reqs[17]);
4223 			}
4224 		} else {
4225 			break;
4226 		}
4227 		bzero((caddr_t)pkt, pkt_size);
4228 	}
4229 
4230 	if (rval != QL_SUCCESS) {
4231 		EL(ha, "failed=%xh\n", rval);
4232 		rval = 0;
4233 	} else {
4234 		QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance);
4235 		QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4236 		rval = (int)(BE_32(rpt->hdr.len) / 8);
4237 	}
4238 
4239 	kmem_free(pkt, pkt_size);
4240 	ql_free_dma_resource(ha, &dma_mem);
4241 
4242 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4243 
4244 	return (rval);
4245 }
4246 
4247 /*
4248  * ql_inq_scan
4249  *	Get numbers of LUNS using inquiry command.
4250  *
4251  * Input:
4252  *	ha:		adapter state pointer.
4253  *	tq:		target queue pointer.
4254  *	count:		scan for the number of existing LUNs.
4255  *
4256  * Returns:
4257  *	Number of LUNs.
4258  *
4259  * Context:
4260  *	Kernel context.
4261  */
4262 static int
4263 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
4264 {
4265 	int		lun, cnt, rval;
4266 	ql_mbx_iocb_t	*pkt;
4267 	uint8_t		*inq;
4268 	uint32_t	pkt_size;
4269 
4270 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4271 
4272 	pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4273 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4274 	if (pkt == NULL) {
4275 		EL(ha, "failed, kmem_zalloc\n");
4276 		return (0);
4277 	}
4278 	inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4279 
4280 	cnt = 0;
4281 	for (lun = 0; lun < MAX_LUNS; lun++) {
4282 
4283 		if (DRIVER_SUSPENDED(ha)) {
4284 			rval = QL_LOOP_DOWN;
4285 			cnt = 0;
4286 			break;
4287 		}
4288 
4289 		rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4290 		if (rval == QL_SUCCESS) {
4291 			switch (*inq) {
4292 			case DTYPE_DIRECT:
4293 			case DTYPE_PROCESSOR:	/* Appliance. */
4294 			case DTYPE_WORM:
4295 			case DTYPE_RODIRECT:
4296 			case DTYPE_SCANNER:
4297 			case DTYPE_OPTICAL:
4298 			case DTYPE_CHANGER:
4299 			case DTYPE_ESI:
4300 				cnt++;
4301 				break;
4302 			case DTYPE_SEQUENTIAL:
4303 				cnt++;
4304 				tq->flags |= TQF_TAPE_DEVICE;
4305 				break;
4306 			default:
4307 				QL_PRINT_9(CE_CONT, "(%d): failed, "
4308 				    "unsupported device id=%xh, lun=%d, "
4309 				    "type=%xh\n", ha->instance, tq->loop_id,
4310 				    lun, *inq);
4311 				break;
4312 			}
4313 
4314 			if (*inq == DTYPE_ESI || cnt >= count) {
4315 				break;
4316 			}
4317 		} else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4318 			cnt = 0;
4319 			break;
4320 		}
4321 	}
4322 
4323 	kmem_free(pkt, pkt_size);
4324 
4325 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4326 
4327 	return (cnt);
4328 }
4329 
4330 /*
4331  * ql_inq
4332  *	Issue inquiry command.
4333  *
4334  * Input:
4335  *	ha:		adapter state pointer.
4336  *	tq:		target queue pointer.
4337  *	lun:		LUN number.
4338  *	pkt:		command and buffer pointer.
4339  *	inq_len:	amount of inquiry data.
4340  *
4341  * Returns:
4342  *	ql local function return status code.
4343  *
4344  * Context:
4345  *	Kernel context.
4346  */
4347 static int
4348 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4349     uint8_t inq_len)
4350 {
4351 	dma_mem_t	dma_mem;
4352 	int		rval, retries;
4353 	uint32_t	pkt_size, cnt;
4354 	uint16_t	comp_status;
4355 	uint8_t		scsi_status_h, scsi_status_l, *reqs;
4356 	caddr_t		inq_data;
4357 
4358 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4359 
4360 	if (DRIVER_SUSPENDED(ha)) {
4361 		EL(ha, "failed, loop down\n");
4362 		return (QL_FUNCTION_TIMEOUT);
4363 	}
4364 
4365 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4366 	bzero((caddr_t)pkt, pkt_size);
4367 
4368 	inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4369 
4370 	/* Get DMA memory for the IOCB */
4371 	if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4372 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4373 		cmn_err(CE_WARN, "%s(%d): DMA memory "
4374 		    "alloc failed", QL_NAME, ha->instance);
4375 		return (0);
4376 	}
4377 
4378 	for (retries = 0; retries < 4; retries++) {
4379 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4380 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4381 			pkt->cmd24.entry_count = 1;
4382 
4383 			/* Set LUN number */
4384 			pkt->cmd24.fcp_lun[2] = LSB(lun);
4385 			pkt->cmd24.fcp_lun[3] = MSB(lun);
4386 
4387 			/* Set N_port handle */
4388 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4389 
4390 			/* Set target ID */
4391 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4392 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4393 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4394 
4395 			/* Set Virtual Port ID */
4396 			pkt->cmd24.vp_index = ha->vp_index;
4397 
4398 			/* Set ISP command timeout. */
4399 			pkt->cmd24.timeout = LE_16(15);
4400 
4401 			/* Load SCSI CDB */
4402 			pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4403 			pkt->cmd24.scsi_cdb[4] = inq_len;
4404 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4405 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4406 				    + cnt, 4);
4407 			}
4408 
4409 			/* Set tag queue control flags */
4410 			pkt->cmd24.task = TA_STAG;
4411 
4412 			/* Set transfer direction. */
4413 			pkt->cmd24.control_flags = CF_RD;
4414 
4415 			/* Set data segment count. */
4416 			pkt->cmd24.dseg_count = LE_16(1);
4417 
4418 			/* Load total byte count. */
4419 			pkt->cmd24.total_byte_count = LE_32(inq_len);
4420 
4421 			/* Load data descriptor. */
4422 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
4423 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4424 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
4425 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4426 			pkt->cmd24.dseg_0_length = LE_32(inq_len);
4427 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4428 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4429 			cnt = CMD_TYPE_3_DATA_SEGMENTS;
4430 
4431 			pkt->cmd3.entry_count = 1;
4432 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4433 				pkt->cmd3.target_l = LSB(tq->loop_id);
4434 				pkt->cmd3.target_h = MSB(tq->loop_id);
4435 			} else {
4436 				pkt->cmd3.target_h = LSB(tq->loop_id);
4437 			}
4438 			pkt->cmd3.lun_l = LSB(lun);
4439 			pkt->cmd3.lun_h = MSB(lun);
4440 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4441 			pkt->cmd3.timeout = LE_16(15);
4442 			pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4443 			pkt->cmd3.scsi_cdb[4] = inq_len;
4444 			pkt->cmd3.dseg_count = LE_16(1);
4445 			pkt->cmd3.byte_count = LE_32(inq_len);
4446 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4447 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4448 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4449 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4450 			pkt->cmd3.dseg_0_length = LE_32(inq_len);
4451 		} else {
4452 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4453 			cnt = CMD_TYPE_2_DATA_SEGMENTS;
4454 
4455 			pkt->cmd.entry_count = 1;
4456 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4457 				pkt->cmd.target_l = LSB(tq->loop_id);
4458 				pkt->cmd.target_h = MSB(tq->loop_id);
4459 			} else {
4460 				pkt->cmd.target_h = LSB(tq->loop_id);
4461 			}
4462 			pkt->cmd.lun_l = LSB(lun);
4463 			pkt->cmd.lun_h = MSB(lun);
4464 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4465 			pkt->cmd.timeout = LE_16(15);
4466 			pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4467 			pkt->cmd.scsi_cdb[4] = inq_len;
4468 			pkt->cmd.dseg_count = LE_16(1);
4469 			pkt->cmd.byte_count = LE_32(inq_len);
4470 			pkt->cmd.dseg_0_address = (uint32_t)
4471 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4472 			pkt->cmd.dseg_0_length = LE_32(inq_len);
4473 		}
4474 
4475 /*		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4476 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4477 		    sizeof (ql_mbx_iocb_t));
4478 
4479 		/* Sync in coming IOCB DMA buffer. */
4480 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4481 		    DDI_DMA_SYNC_FORKERNEL);
4482 		/* Copy in coming DMA data. */
4483 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4484 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4485 
4486 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4487 			pkt->sts24.entry_status = (uint8_t)
4488 			    (pkt->sts24.entry_status & 0x3c);
4489 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4490 			scsi_status_h = pkt->sts24.scsi_status_h;
4491 			scsi_status_l = pkt->sts24.scsi_status_l;
4492 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4493 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4494 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4495 		} else {
4496 			pkt->sts.entry_status = (uint8_t)
4497 			    (pkt->sts.entry_status & 0x7e);
4498 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4499 			scsi_status_h = pkt->sts.scsi_status_h;
4500 			scsi_status_l = pkt->sts.scsi_status_l;
4501 			reqs = &pkt->sts.req_sense_data[0];
4502 		}
4503 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4504 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4505 			    pkt->sts.entry_status, tq->d_id.b24);
4506 			rval = QL_FUNCTION_PARAMETER_ERROR;
4507 		}
4508 
4509 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4510 		    scsi_status_l & STATUS_CHECK) {
4511 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4512 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4513 			    comp_status, scsi_status_h, scsi_status_l);
4514 
4515 			if (rval == QL_SUCCESS) {
4516 				if ((comp_status == CS_TIMEOUT) ||
4517 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4518 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4519 					rval = QL_FUNCTION_TIMEOUT;
4520 					break;
4521 				}
4522 				rval = QL_FUNCTION_FAILED;
4523 			}
4524 
4525 			if (scsi_status_l & STATUS_CHECK) {
4526 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4527 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4528 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4529 				    reqs[1], reqs[2], reqs[3], reqs[4],
4530 				    reqs[5], reqs[6], reqs[7], reqs[8],
4531 				    reqs[9], reqs[10], reqs[11], reqs[12],
4532 				    reqs[13], reqs[14], reqs[15], reqs[16],
4533 				    reqs[17]);
4534 			}
4535 		} else {
4536 			break;
4537 		}
4538 	}
4539 	ql_free_dma_resource(ha, &dma_mem);
4540 
4541 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4542 
4543 	return (rval);
4544 }
4545 
4546 /*
4547  * ql_get_buffer_data
4548  *	Copies data from user space to kernal buffer.
4549  *
4550  * Input:
4551  *	src:	User source buffer address.
4552  *	dst:	Kernal destination buffer address.
4553  *	size:	Amount of data.
4554  *	mode:	flags.
4555  *
4556  * Returns:
4557  *	Returns number of bytes transferred.
4558  *
4559  * Context:
4560  *	Kernel context.
4561  */
4562 static uint32_t
4563 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4564 {
4565 	uint32_t	cnt;
4566 
4567 	for (cnt = 0; cnt < size; cnt++) {
4568 		if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4569 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4570 			break;
4571 		}
4572 	}
4573 
4574 	return (cnt);
4575 }
4576 
4577 /*
4578  * ql_send_buffer_data
4579  *	Copies data from kernal buffer to user space.
4580  *
4581  * Input:
4582  *	src:	Kernal source buffer address.
4583  *	dst:	User destination buffer address.
4584  *	size:	Amount of data.
4585  *	mode:	flags.
4586  *
4587  * Returns:
4588  *	Returns number of bytes transferred.
4589  *
4590  * Context:
4591  *	Kernel context.
4592  */
4593 static uint32_t
4594 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4595 {
4596 	uint32_t	cnt;
4597 
4598 	for (cnt = 0; cnt < size; cnt++) {
4599 		if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4600 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4601 			break;
4602 		}
4603 	}
4604 
4605 	return (cnt);
4606 }
4607 
4608 /*
4609  * ql_find_port
4610  *	Locates device queue.
4611  *
4612  * Input:
4613  *	ha:	adapter state pointer.
4614  *	name:	device port name.
4615  *
4616  * Returns:
4617  *	Returns target queue pointer.
4618  *
4619  * Context:
4620  *	Kernel context.
4621  */
4622 static ql_tgt_t *
4623 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type)
4624 {
4625 	ql_link_t	*link;
4626 	ql_tgt_t	*tq;
4627 	uint16_t	index;
4628 
4629 	/* Scan port list for requested target */
4630 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4631 		for (link = ha->dev[index].first; link != NULL;
4632 		    link = link->next) {
4633 			tq = link->base_address;
4634 
4635 			switch (type) {
4636 			case QLNT_LOOP_ID:
4637 				if (bcmp(name, &tq->loop_id,
4638 				    sizeof (uint16_t)) == 0) {
4639 					return (tq);
4640 				}
4641 				break;
4642 			case QLNT_PORT:
4643 				if (bcmp(name, tq->port_name, 8) == 0) {
4644 					return (tq);
4645 				}
4646 				break;
4647 			case QLNT_NODE:
4648 				if (bcmp(name, tq->node_name, 8) == 0) {
4649 					return (tq);
4650 				}
4651 				break;
4652 			case QLNT_PID:
4653 				if (bcmp(name, tq->d_id.r.d_id,
4654 				    sizeof (tq->d_id.r.d_id)) == 0) {
4655 					return (tq);
4656 				}
4657 				break;
4658 			default:
4659 				EL(ha, "failed, invalid type=%d\n",  type);
4660 				return (NULL);
4661 			}
4662 		}
4663 	}
4664 
4665 	return (NULL);
4666 }
4667 
4668 /*
4669  * ql_24xx_flash_desc
4670  *	Get flash descriptor table.
4671  *
4672  * Input:
4673  *	ha:		adapter state pointer.
4674  *
4675  * Returns:
4676  *	ql local function return status code.
4677  *
4678  * Context:
4679  *	Kernel context.
4680  */
4681 static int
4682 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4683 {
4684 	uint32_t	cnt;
4685 	uint16_t	chksum, *bp, data;
4686 	int		rval;
4687 	flash_desc_t	*fdesc;
4688 	ql_xioctl_t	*xp = ha->xioctl;
4689 
4690 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4691 
4692 	if (ha->flash_desc_addr == 0) {
4693 		QL_PRINT_9(CE_CONT, "(%d): desc ptr=0\n", ha->instance);
4694 		return (QL_FUNCTION_FAILED);
4695 	}
4696 
4697 	if ((fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP)) == NULL) {
4698 		EL(ha, "kmem_zalloc=null\n");
4699 		return (QL_MEMORY_ALLOC_FAILED);
4700 	}
4701 	rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t),
4702 	    ha->flash_desc_addr << 2);
4703 	if (rval != QL_SUCCESS) {
4704 		EL(ha, "read status=%xh\n", rval);
4705 		kmem_free(fdesc, sizeof (flash_desc_t));
4706 		return (rval);
4707 	}
4708 
4709 	chksum = 0;
4710 	bp = (uint16_t *)fdesc;
4711 	for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4712 		data = *bp++;
4713 		LITTLE_ENDIAN_16(&data);
4714 		chksum += data;
4715 	}
4716 
4717 	LITTLE_ENDIAN_32(&fdesc->flash_valid);
4718 	LITTLE_ENDIAN_16(&fdesc->flash_version);
4719 	LITTLE_ENDIAN_16(&fdesc->flash_len);
4720 	LITTLE_ENDIAN_16(&fdesc->flash_checksum);
4721 	LITTLE_ENDIAN_16(&fdesc->flash_manuf);
4722 	LITTLE_ENDIAN_16(&fdesc->flash_id);
4723 	LITTLE_ENDIAN_32(&fdesc->block_size);
4724 	LITTLE_ENDIAN_32(&fdesc->alt_block_size);
4725 	LITTLE_ENDIAN_32(&fdesc->flash_size);
4726 	LITTLE_ENDIAN_32(&fdesc->write_enable_data);
4727 	LITTLE_ENDIAN_32(&fdesc->read_timeout);
4728 
4729 	/* flash size in desc table is in 1024 bytes */
4730 	fdesc->flash_size = fdesc->flash_size * 0x400;
4731 
4732 	if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD ||
4733 	    fdesc->flash_version != FLASH_DESC_VERSION) {
4734 		EL(ha, "invalid descriptor table\n");
4735 		kmem_free(fdesc, sizeof (flash_desc_t));
4736 		return (QL_FUNCTION_FAILED);
4737 	}
4738 
4739 	bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t));
4740 	kmem_free(fdesc, sizeof (flash_desc_t));
4741 
4742 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4743 
4744 	return (QL_SUCCESS);
4745 }
4746 
4747 /*
4748  * ql_setup_flash
4749  *	Gets the manufacturer and id number of the flash chip, and
4750  *	sets up the size parameter.
4751  *
4752  * Input:
4753  *	ha:	adapter state pointer.
4754  *
4755  * Returns:
4756  *	int:	ql local function return status code.
4757  *
4758  * Context:
4759  *	Kernel context.
4760  */
4761 static int
4762 ql_setup_flash(ql_adapter_state_t *ha)
4763 {
4764 	ql_xioctl_t	*xp = ha->xioctl;
4765 	int		rval = QL_SUCCESS;
4766 
4767 	if (xp->fdesc.flash_size != 0) {
4768 		return (rval);
4769 	}
4770 
4771 	if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) {
4772 		return (QL_FUNCTION_FAILED);
4773 	}
4774 
4775 	if (CFG_IST(ha, CFG_CTRL_258081)) {
4776 		/*
4777 		 * Temporarily set the ha->xioctl->fdesc.flash_size to
4778 		 * 25xx flash size to avoid failing of ql_dump_focde.
4779 		 */
4780 		if (CFG_IST(ha, CFG_CTRL_8021)) {
4781 			ha->xioctl->fdesc.flash_size = 0x800000;
4782 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
4783 			ha->xioctl->fdesc.flash_size = 0x200000;
4784 		} else {
4785 			ha->xioctl->fdesc.flash_size = 0x400000;
4786 		}
4787 
4788 		if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4789 			EL(ha, "flash desc table ok, exit\n");
4790 			return (rval);
4791 		}
4792 		if (CFG_IST(ha, CFG_CTRL_8021)) {
4793 			xp->fdesc.flash_manuf = WINBOND_FLASH;
4794 			xp->fdesc.flash_id = WINBOND_FLASHID;
4795 			xp->fdesc.flash_len = 0x17;
4796 		} else {
4797 			(void) ql_24xx_flash_id(ha);
4798 		}
4799 
4800 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
4801 		(void) ql_24xx_flash_id(ha);
4802 	} else {
4803 		ql_flash_enable(ha);
4804 
4805 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4806 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4807 		ql_write_flash_byte(ha, 0x5555, 0x90);
4808 		xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4809 
4810 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
4811 			ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4812 			ql_write_flash_byte(ha, 0x5555, 0x55);
4813 			ql_write_flash_byte(ha, 0xaaaa, 0x90);
4814 			xp->fdesc.flash_id = (uint16_t)
4815 			    ql_read_flash_byte(ha, 0x0002);
4816 		} else {
4817 			ql_write_flash_byte(ha, 0x5555, 0xaa);
4818 			ql_write_flash_byte(ha, 0x2aaa, 0x55);
4819 			ql_write_flash_byte(ha, 0x5555, 0x90);
4820 			xp->fdesc.flash_id = (uint16_t)
4821 			    ql_read_flash_byte(ha, 0x0001);
4822 		}
4823 
4824 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4825 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4826 		ql_write_flash_byte(ha, 0x5555, 0xf0);
4827 
4828 		ql_flash_disable(ha);
4829 	}
4830 
4831 	/* Default flash descriptor table. */
4832 	xp->fdesc.write_statusreg_cmd = 1;
4833 	xp->fdesc.write_enable_bits = 0;
4834 	xp->fdesc.unprotect_sector_cmd = 0;
4835 	xp->fdesc.protect_sector_cmd = 0;
4836 	xp->fdesc.write_disable_bits = 0x9c;
4837 	xp->fdesc.block_size = 0x10000;
4838 	xp->fdesc.erase_cmd = 0xd8;
4839 
4840 	switch (xp->fdesc.flash_manuf) {
4841 	case AMD_FLASH:
4842 		switch (xp->fdesc.flash_id) {
4843 		case SPAN_FLASHID_2048K:
4844 			xp->fdesc.flash_size = 0x200000;
4845 			break;
4846 		case AMD_FLASHID_1024K:
4847 			xp->fdesc.flash_size = 0x100000;
4848 			break;
4849 		case AMD_FLASHID_512K:
4850 		case AMD_FLASHID_512Kt:
4851 		case AMD_FLASHID_512Kb:
4852 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
4853 				xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
4854 			} else {
4855 				xp->fdesc.flash_size = 0x80000;
4856 			}
4857 			break;
4858 		case AMD_FLASHID_128K:
4859 			xp->fdesc.flash_size = 0x20000;
4860 			break;
4861 		default:
4862 			rval = QL_FUNCTION_FAILED;
4863 			break;
4864 		}
4865 		break;
4866 	case ST_FLASH:
4867 		switch (xp->fdesc.flash_id) {
4868 		case ST_FLASHID_128K:
4869 			xp->fdesc.flash_size = 0x20000;
4870 			break;
4871 		case ST_FLASHID_512K:
4872 			xp->fdesc.flash_size = 0x80000;
4873 			break;
4874 		case ST_FLASHID_M25PXX:
4875 			if (xp->fdesc.flash_len == 0x14) {
4876 				xp->fdesc.flash_size = 0x100000;
4877 			} else if (xp->fdesc.flash_len == 0x15) {
4878 				xp->fdesc.flash_size = 0x200000;
4879 			} else {
4880 				rval = QL_FUNCTION_FAILED;
4881 			}
4882 			break;
4883 		default:
4884 			rval = QL_FUNCTION_FAILED;
4885 			break;
4886 		}
4887 		break;
4888 	case SST_FLASH:
4889 		switch (xp->fdesc.flash_id) {
4890 		case SST_FLASHID_128K:
4891 			xp->fdesc.flash_size = 0x20000;
4892 			break;
4893 		case SST_FLASHID_1024K_A:
4894 			xp->fdesc.flash_size = 0x100000;
4895 			xp->fdesc.block_size = 0x8000;
4896 			xp->fdesc.erase_cmd = 0x52;
4897 			break;
4898 		case SST_FLASHID_1024K:
4899 		case SST_FLASHID_1024K_B:
4900 			xp->fdesc.flash_size = 0x100000;
4901 			break;
4902 		case SST_FLASHID_2048K:
4903 			xp->fdesc.flash_size = 0x200000;
4904 			break;
4905 		default:
4906 			rval = QL_FUNCTION_FAILED;
4907 			break;
4908 		}
4909 		break;
4910 	case MXIC_FLASH:
4911 		switch (xp->fdesc.flash_id) {
4912 		case MXIC_FLASHID_512K:
4913 			xp->fdesc.flash_size = 0x80000;
4914 			break;
4915 		case MXIC_FLASHID_1024K:
4916 			xp->fdesc.flash_size = 0x100000;
4917 			break;
4918 		case MXIC_FLASHID_25LXX:
4919 			if (xp->fdesc.flash_len == 0x14) {
4920 				xp->fdesc.flash_size = 0x100000;
4921 			} else if (xp->fdesc.flash_len == 0x15) {
4922 				xp->fdesc.flash_size = 0x200000;
4923 			} else {
4924 				rval = QL_FUNCTION_FAILED;
4925 			}
4926 			break;
4927 		default:
4928 			rval = QL_FUNCTION_FAILED;
4929 			break;
4930 		}
4931 		break;
4932 	case ATMEL_FLASH:
4933 		switch (xp->fdesc.flash_id) {
4934 		case ATMEL_FLASHID_1024K:
4935 			xp->fdesc.flash_size = 0x100000;
4936 			xp->fdesc.write_disable_bits = 0xbc;
4937 			xp->fdesc.unprotect_sector_cmd = 0x39;
4938 			xp->fdesc.protect_sector_cmd = 0x36;
4939 			break;
4940 		default:
4941 			rval = QL_FUNCTION_FAILED;
4942 			break;
4943 		}
4944 		break;
4945 	case WINBOND_FLASH:
4946 		switch (xp->fdesc.flash_id) {
4947 		case WINBOND_FLASHID:
4948 			if (xp->fdesc.flash_len == 0x15) {
4949 				xp->fdesc.flash_size = 0x200000;
4950 			} else if (xp->fdesc.flash_len == 0x16) {
4951 				xp->fdesc.flash_size = 0x400000;
4952 			} else if (xp->fdesc.flash_len == 0x17) {
4953 				xp->fdesc.flash_size = 0x800000;
4954 			} else {
4955 				rval = QL_FUNCTION_FAILED;
4956 			}
4957 			break;
4958 		default:
4959 			rval = QL_FUNCTION_FAILED;
4960 			break;
4961 		}
4962 		break;
4963 	case INTEL_FLASH:
4964 		switch (xp->fdesc.flash_id) {
4965 		case INTEL_FLASHID:
4966 			if (xp->fdesc.flash_len == 0x11) {
4967 				xp->fdesc.flash_size = 0x200000;
4968 			} else if (xp->fdesc.flash_len == 0x12) {
4969 				xp->fdesc.flash_size = 0x400000;
4970 			} else if (xp->fdesc.flash_len == 0x13) {
4971 				xp->fdesc.flash_size = 0x800000;
4972 			} else {
4973 				rval = QL_FUNCTION_FAILED;
4974 			}
4975 			break;
4976 		default:
4977 			rval = QL_FUNCTION_FAILED;
4978 			break;
4979 		}
4980 		break;
4981 	default:
4982 		rval = QL_FUNCTION_FAILED;
4983 		break;
4984 	}
4985 
4986 	/* Try flash table later. */
4987 	if (rval != QL_SUCCESS && CFG_IST(ha, CFG_CTRL_24258081)) {
4988 		EL(ha, "no default id\n");
4989 		return (QL_SUCCESS);
4990 	}
4991 
4992 	/*
4993 	 * hack for non std 2312 and 6312 boards. hardware people need to
4994 	 * use either the 128k flash chip (original), or something larger.
4995 	 * For driver purposes, we'll treat it as a 128k flash chip.
4996 	 */
4997 	if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
4998 	    ha->device_id == 0x6322) && (xp->fdesc.flash_size > 0x20000) &&
4999 	    (CFG_IST(ha, CFG_SBUS_CARD) ==  0)) {
5000 		EL(ha, "chip exceeds max size: %xh, using 128k\n",
5001 		    xp->fdesc.flash_size);
5002 		xp->fdesc.flash_size = 0x20000;
5003 	}
5004 
5005 	if (rval == QL_SUCCESS) {
5006 		EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n",
5007 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id,
5008 		    xp->fdesc.flash_size);
5009 	} else {
5010 		EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n",
5011 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id);
5012 	}
5013 
5014 	return (rval);
5015 }
5016 
5017 /*
5018  * ql_flash_fcode_load
5019  *	Loads fcode data into flash from application.
5020  *
5021  * Input:
5022  *	ha:	adapter state pointer.
5023  *	bp:	user buffer address.
5024  *	size:	user buffer size.
5025  *	mode:	flags
5026  *
5027  * Returns:
5028  *
5029  * Context:
5030  *	Kernel context.
5031  */
5032 static int
5033 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5034     int mode)
5035 {
5036 	uint8_t		*bfp;
5037 	ql_xioctl_t	*xp = ha->xioctl;
5038 	int		rval = 0;
5039 
5040 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5041 
5042 	if (bsize > xp->fdesc.flash_size) {
5043 		EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
5044 		    xp->fdesc.flash_size);
5045 		return (ENOMEM);
5046 	}
5047 
5048 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5049 		EL(ha, "failed, kmem_zalloc\n");
5050 		rval = ENOMEM;
5051 	} else  {
5052 		if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
5053 			EL(ha, "failed, ddi_copyin\n");
5054 			rval = EFAULT;
5055 		} else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) {
5056 			EL(ha, "failed, load_fcode\n");
5057 			rval = EFAULT;
5058 		} else {
5059 			/* Reset caches on all adapter instances. */
5060 			ql_update_flash_caches(ha);
5061 			rval = 0;
5062 		}
5063 		kmem_free(bfp, bsize);
5064 	}
5065 
5066 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5067 
5068 	return (rval);
5069 }
5070 
5071 /*
5072  * ql_load_fcode
5073  *	Loads fcode in to flash.
5074  *
5075  * Input:
5076  *	ha:	adapter state pointer.
5077  *	dp:	data pointer.
5078  *	size:	data length.
5079  *	addr:	flash byte address.
5080  *
5081  * Returns:
5082  *	ql local function return status code.
5083  *
5084  * Context:
5085  *	Kernel context.
5086  */
5087 int
5088 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr)
5089 {
5090 	uint32_t	cnt;
5091 	int		rval;
5092 
5093 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
5094 		return (ql_24xx_load_flash(ha, dp, size, addr));
5095 	}
5096 
5097 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5098 
5099 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5100 		/*
5101 		 * sbus has an additional check to make
5102 		 * sure they don't brick the HBA.
5103 		 */
5104 		if (dp[0] != 0xf1) {
5105 			EL(ha, "failed, incorrect fcode for sbus\n");
5106 			return (QL_FUNCTION_PARAMETER_ERROR);
5107 		}
5108 	}
5109 
5110 	GLOBAL_HW_LOCK();
5111 
5112 	/* Enable Flash Read/Write. */
5113 	ql_flash_enable(ha);
5114 
5115 	/* Erase flash prior to write. */
5116 	rval = ql_erase_flash(ha, 0);
5117 
5118 	if (rval == QL_SUCCESS) {
5119 		/* Write fcode data to flash. */
5120 		for (cnt = 0; cnt < (uint32_t)size; cnt++) {
5121 			/* Allow other system activity. */
5122 			if (cnt % 0x1000 == 0) {
5123 				drv_usecwait(1);
5124 			}
5125 			rval = ql_program_flash_address(ha, addr++, *dp++);
5126 			if (rval != QL_SUCCESS)
5127 				break;
5128 		}
5129 	}
5130 
5131 	ql_flash_disable(ha);
5132 
5133 	GLOBAL_HW_UNLOCK();
5134 
5135 	if (rval != QL_SUCCESS) {
5136 		EL(ha, "failed, rval=%xh\n", rval);
5137 	} else {
5138 		/*EMPTY*/
5139 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5140 	}
5141 	return (rval);
5142 }
5143 
5144 /*
5145  * ql_flash_fcode_dump
5146  *	Dumps FLASH to application.
5147  *
5148  * Input:
5149  *	ha:	adapter state pointer.
5150  *	bp:	user buffer address.
5151  *	bsize:	user buffer size
5152  *	faddr:	flash byte address
5153  *	mode:	flags
5154  *
5155  * Returns:
5156  *
5157  * Context:
5158  *	Kernel context.
5159  */
5160 static int
5161 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5162     uint32_t faddr, int mode)
5163 {
5164 	uint8_t		*bfp;
5165 	int		rval;
5166 	ql_xioctl_t	*xp = ha->xioctl;
5167 
5168 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5169 
5170 	/* adjust max read size to flash size */
5171 	if (bsize > xp->fdesc.flash_size) {
5172 		EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5173 		    xp->fdesc.flash_size);
5174 		bsize = xp->fdesc.flash_size;
5175 	}
5176 
5177 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5178 		EL(ha, "failed, kmem_zalloc\n");
5179 		rval = ENOMEM;
5180 	} else {
5181 		/* Dump Flash fcode. */
5182 		rval = ql_dump_fcode(ha, bfp, bsize, faddr);
5183 
5184 		if (rval != QL_SUCCESS) {
5185 			EL(ha, "failed, dump_fcode = %x\n", rval);
5186 			rval = EFAULT;
5187 		} else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5188 			EL(ha, "failed, ddi_copyout\n");
5189 			rval = EFAULT;
5190 		} else {
5191 			rval = 0;
5192 		}
5193 		kmem_free(bfp, bsize);
5194 	}
5195 
5196 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5197 
5198 	return (rval);
5199 }
5200 
5201 /*
5202  * ql_dump_fcode
5203  *	Dumps fcode from flash.
5204  *
5205  * Input:
5206  *	ha:		adapter state pointer.
5207  *	dp:		data pointer.
5208  *	size:		data length in bytes.
5209  *	startpos:	starting position in flash (byte address).
5210  *
5211  * Returns:
5212  *	ql local function return status code.
5213  *
5214  * Context:
5215  *	Kernel context.
5216  *
5217  */
5218 int
5219 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5220     uint32_t startpos)
5221 {
5222 	uint32_t	cnt, data, addr;
5223 	uint8_t		bp[4], *src;
5224 	int		fp_rval, rval = QL_SUCCESS;
5225 	dma_mem_t	mem;
5226 
5227 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5228 
5229 	/* make sure startpos+size doesn't exceed flash */
5230 	if (size + startpos > ha->xioctl->fdesc.flash_size) {
5231 		EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5232 		    size, startpos, ha->xioctl->fdesc.flash_size);
5233 		return (QL_FUNCTION_PARAMETER_ERROR);
5234 	}
5235 
5236 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
5237 		/* check start addr is 32 bit aligned for 24xx */
5238 		if ((startpos & 0x3) != 0) {
5239 			rval = ql_24xx_read_flash(ha,
5240 			    ha->flash_data_addr | startpos >> 2, &data);
5241 			if (rval != QL_SUCCESS) {
5242 				EL(ha, "failed2, rval = %xh\n", rval);
5243 				return (rval);
5244 			}
5245 			bp[0] = LSB(LSW(data));
5246 			bp[1] = MSB(LSW(data));
5247 			bp[2] = LSB(MSW(data));
5248 			bp[3] = MSB(MSW(data));
5249 			while (size && startpos & 0x3) {
5250 				*dp++ = bp[startpos & 0x3];
5251 				startpos++;
5252 				size--;
5253 			}
5254 			if (size == 0) {
5255 				QL_PRINT_9(CE_CONT, "(%d): done2\n",
5256 				    ha->instance);
5257 				return (rval);
5258 			}
5259 		}
5260 
5261 		/* adjust 24xx start addr for 32 bit words */
5262 		addr = startpos / 4 | ha->flash_data_addr;
5263 	}
5264 
5265 	bzero(&mem, sizeof (dma_mem_t));
5266 	/* Check for Fast page is supported */
5267 	if ((ha->pha->task_daemon_flags & FIRMWARE_UP) &&
5268 	    (CFG_IST(ha, CFG_CTRL_2581))) {
5269 		fp_rval = QL_SUCCESS;
5270 		/* Setup DMA buffer. */
5271 		rval = ql_get_dma_mem(ha, &mem, size,
5272 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN);
5273 		if (rval != QL_SUCCESS) {
5274 			EL(ha, "failed, ql_get_dma_mem=%xh\n",
5275 			    rval);
5276 			return (ENOMEM);
5277 		}
5278 	} else {
5279 		fp_rval = QL_NOT_SUPPORTED;
5280 	}
5281 
5282 	GLOBAL_HW_LOCK();
5283 
5284 	/* Enable Flash Read/Write. */
5285 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
5286 		ql_flash_enable(ha);
5287 	}
5288 
5289 	/* Read fcode data from flash. */
5290 	while (size) {
5291 		/* Allow other system activity. */
5292 		if (size % 0x1000 == 0) {
5293 			ql_delay(ha, 100000);
5294 		}
5295 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
5296 			if (fp_rval == QL_SUCCESS && (addr & 0x3f) == 0) {
5297 				cnt = (size + 3) >> 2;
5298 				fp_rval = ql_rd_risc_ram(ha, addr,
5299 				    mem.cookie.dmac_laddress, cnt);
5300 				if (fp_rval == QL_SUCCESS) {
5301 					for (src = mem.bp; size; size--) {
5302 						*dp++ = *src++;
5303 					}
5304 					addr += cnt;
5305 					continue;
5306 				}
5307 			}
5308 			rval = ql_24xx_read_flash(ha, addr++,
5309 			    &data);
5310 			if (rval != QL_SUCCESS) {
5311 				break;
5312 			}
5313 			bp[0] = LSB(LSW(data));
5314 			bp[1] = MSB(LSW(data));
5315 			bp[2] = LSB(MSW(data));
5316 			bp[3] = MSB(MSW(data));
5317 			for (cnt = 0; size && cnt < 4; size--) {
5318 				*dp++ = bp[cnt++];
5319 			}
5320 		} else {
5321 			*dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++);
5322 			size--;
5323 		}
5324 	}
5325 
5326 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
5327 		ql_flash_disable(ha);
5328 	}
5329 
5330 	GLOBAL_HW_UNLOCK();
5331 
5332 	if (mem.dma_handle != NULL) {
5333 		ql_free_dma_resource(ha, &mem);
5334 	}
5335 
5336 	if (rval != QL_SUCCESS) {
5337 		EL(ha, "failed, rval = %xh\n", rval);
5338 	} else {
5339 		/*EMPTY*/
5340 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5341 	}
5342 	return (rval);
5343 }
5344 
5345 /*
5346  * ql_program_flash_address
5347  *	Program flash address.
5348  *
5349  * Input:
5350  *	ha:	adapter state pointer.
5351  *	addr:	flash byte address.
5352  *	data:	data to be written to flash.
5353  *
5354  * Returns:
5355  *	ql local function return status code.
5356  *
5357  * Context:
5358  *	Kernel context.
5359  */
5360 static int
5361 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr,
5362     uint8_t data)
5363 {
5364 	int	rval;
5365 
5366 	/* Write Program Command Sequence */
5367 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5368 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5369 		ql_write_flash_byte(ha, addr, data);
5370 	} else {
5371 		ql_write_flash_byte(ha, 0x5555, 0xaa);
5372 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
5373 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5374 		ql_write_flash_byte(ha, addr, data);
5375 	}
5376 
5377 	/* Wait for write to complete. */
5378 	rval = ql_poll_flash(ha, addr, data);
5379 
5380 	if (rval != QL_SUCCESS) {
5381 		EL(ha, "failed, rval=%xh\n", rval);
5382 	}
5383 	return (rval);
5384 }
5385 
5386 /*
5387  * ql_set_rnid_parameters
5388  *	Set RNID parameters.
5389  *
5390  * Input:
5391  *	ha:	adapter state pointer.
5392  *	cmd:	User space CT arguments pointer.
5393  *	mode:	flags.
5394  */
5395 static void
5396 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5397 {
5398 	EXT_SET_RNID_REQ	tmp_set;
5399 	EXT_RNID_DATA		*tmp_buf;
5400 	int			rval = 0;
5401 
5402 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5403 
5404 	if (DRIVER_SUSPENDED(ha)) {
5405 		EL(ha, "failed, LOOP_NOT_READY\n");
5406 		cmd->Status = EXT_STATUS_BUSY;
5407 		cmd->ResponseLen = 0;
5408 		return;
5409 	}
5410 
5411 	cmd->ResponseLen = 0; /* NO response to caller. */
5412 	if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5413 		/* parameter error */
5414 		EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5415 		    cmd->RequestLen);
5416 		cmd->Status = EXT_STATUS_INVALID_PARAM;
5417 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5418 		cmd->ResponseLen = 0;
5419 		return;
5420 	}
5421 
5422 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set,
5423 	    cmd->RequestLen, mode);
5424 	if (rval != 0) {
5425 		EL(ha, "failed, ddi_copyin\n");
5426 		cmd->Status = EXT_STATUS_COPY_ERR;
5427 		cmd->ResponseLen = 0;
5428 		return;
5429 	}
5430 
5431 	/* Allocate memory for command. */
5432 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5433 	if (tmp_buf == NULL) {
5434 		EL(ha, "failed, kmem_zalloc\n");
5435 		cmd->Status = EXT_STATUS_NO_MEMORY;
5436 		cmd->ResponseLen = 0;
5437 		return;
5438 	}
5439 
5440 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5441 	    (caddr_t)tmp_buf);
5442 	if (rval != QL_SUCCESS) {
5443 		/* error */
5444 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5445 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5446 		cmd->Status = EXT_STATUS_ERR;
5447 		cmd->ResponseLen = 0;
5448 		return;
5449 	}
5450 
5451 	/* Now set the requested params. */
5452 	bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5453 	bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5454 	bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5455 
5456 	rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5457 	    (caddr_t)tmp_buf);
5458 	if (rval != QL_SUCCESS) {
5459 		/* error */
5460 		EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5461 		cmd->Status = EXT_STATUS_ERR;
5462 		cmd->ResponseLen = 0;
5463 	}
5464 
5465 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5466 
5467 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5468 }
5469 
5470 /*
5471  * ql_get_rnid_parameters
5472  *	Get RNID parameters.
5473  *
5474  * Input:
5475  *	ha:	adapter state pointer.
5476  *	cmd:	User space CT arguments pointer.
5477  *	mode:	flags.
5478  */
5479 static void
5480 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5481 {
5482 	EXT_RNID_DATA	*tmp_buf;
5483 	uint32_t	rval;
5484 
5485 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5486 
5487 	if (DRIVER_SUSPENDED(ha)) {
5488 		EL(ha, "failed, LOOP_NOT_READY\n");
5489 		cmd->Status = EXT_STATUS_BUSY;
5490 		cmd->ResponseLen = 0;
5491 		return;
5492 	}
5493 
5494 	/* Allocate memory for command. */
5495 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5496 	if (tmp_buf == NULL) {
5497 		EL(ha, "failed, kmem_zalloc\n");
5498 		cmd->Status = EXT_STATUS_NO_MEMORY;
5499 		cmd->ResponseLen = 0;
5500 		return;
5501 	}
5502 
5503 	/* Send command */
5504 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5505 	    (caddr_t)tmp_buf);
5506 	if (rval != QL_SUCCESS) {
5507 		/* error */
5508 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5509 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5510 		cmd->Status = EXT_STATUS_ERR;
5511 		cmd->ResponseLen = 0;
5512 		return;
5513 	}
5514 
5515 	/* Copy the response */
5516 	if (ql_send_buffer_data((caddr_t)tmp_buf,
5517 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
5518 	    sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5519 		EL(ha, "failed, ddi_copyout\n");
5520 		cmd->Status = EXT_STATUS_COPY_ERR;
5521 		cmd->ResponseLen = 0;
5522 	} else {
5523 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5524 		cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5525 	}
5526 
5527 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5528 }
5529 
5530 /*
5531  * ql_reset_statistics
5532  *	Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5533  *
5534  * Input:
5535  *	ha:	adapter state pointer.
5536  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5537  *
5538  * Returns:
5539  *	None, request status indicated in cmd->Status.
5540  *
5541  * Context:
5542  *	Kernel context.
5543  */
5544 static int
5545 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5546 {
5547 	ql_xioctl_t		*xp = ha->xioctl;
5548 	int			rval = 0;
5549 
5550 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5551 
5552 	if (DRIVER_SUSPENDED(ha)) {
5553 		EL(ha, "failed, LOOP_NOT_READY\n");
5554 		cmd->Status = EXT_STATUS_BUSY;
5555 		cmd->ResponseLen = 0;
5556 		return (QL_FUNCTION_SUSPENDED);
5557 	}
5558 
5559 	rval = ql_reset_link_status(ha);
5560 	if (rval != QL_SUCCESS) {
5561 		EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5562 		cmd->Status = EXT_STATUS_MAILBOX;
5563 		cmd->DetailStatus = rval;
5564 		cmd->ResponseLen = 0;
5565 	}
5566 
5567 	TASK_DAEMON_LOCK(ha);
5568 	xp->IosRequested = 0;
5569 	xp->BytesRequested = 0;
5570 	xp->IOInputRequests = 0;
5571 	xp->IOOutputRequests = 0;
5572 	xp->IOControlRequests = 0;
5573 	xp->IOInputMByteCnt = 0;
5574 	xp->IOOutputMByteCnt = 0;
5575 	xp->IOOutputByteCnt = 0;
5576 	xp->IOInputByteCnt = 0;
5577 	TASK_DAEMON_UNLOCK(ha);
5578 
5579 	INTR_LOCK(ha);
5580 	xp->ControllerErrorCount = 0;
5581 	xp->DeviceErrorCount = 0;
5582 	xp->TotalLipResets = 0;
5583 	xp->TotalInterrupts = 0;
5584 	INTR_UNLOCK(ha);
5585 
5586 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5587 
5588 	return (rval);
5589 }
5590 
5591 /*
5592  * ql_get_statistics
5593  *	Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5594  *
5595  * Input:
5596  *	ha:	adapter state pointer.
5597  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5598  *	mode:	flags.
5599  *
5600  * Returns:
5601  *	None, request status indicated in cmd->Status.
5602  *
5603  * Context:
5604  *	Kernel context.
5605  */
5606 static void
5607 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5608 {
5609 	EXT_HBA_PORT_STAT	ps = {0};
5610 	ql_link_stats_t		*ls;
5611 	int			rval;
5612 	ql_xioctl_t		*xp = ha->xioctl;
5613 	int			retry = 10;
5614 
5615 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5616 
5617 	while (ha->task_daemon_flags &
5618 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5619 		ql_delay(ha, 10000000);	/* 10 second delay */
5620 
5621 		retry--;
5622 
5623 		if (retry == 0) { /* effectively 100 seconds */
5624 			EL(ha, "failed, LOOP_NOT_READY\n");
5625 			cmd->Status = EXT_STATUS_BUSY;
5626 			cmd->ResponseLen = 0;
5627 			return;
5628 		}
5629 	}
5630 
5631 	/* Allocate memory for command. */
5632 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5633 	if (ls == NULL) {
5634 		EL(ha, "failed, kmem_zalloc\n");
5635 		cmd->Status = EXT_STATUS_NO_MEMORY;
5636 		cmd->ResponseLen = 0;
5637 		return;
5638 	}
5639 
5640 	/*
5641 	 * I think these are supposed to be port statistics
5642 	 * the loop ID or port ID should be in cmd->Instance.
5643 	 */
5644 	rval = ql_get_status_counts(ha, (uint16_t)
5645 	    (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id),
5646 	    sizeof (ql_link_stats_t), (caddr_t)ls, 0);
5647 	if (rval != QL_SUCCESS) {
5648 		EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
5649 		    ha->loop_id);
5650 		cmd->Status = EXT_STATUS_MAILBOX;
5651 		cmd->DetailStatus = rval;
5652 		cmd->ResponseLen = 0;
5653 	} else {
5654 		ps.ControllerErrorCount = xp->ControllerErrorCount;
5655 		ps.DeviceErrorCount = xp->DeviceErrorCount;
5656 		ps.IoCount = (uint32_t)(xp->IOInputRequests +
5657 		    xp->IOOutputRequests + xp->IOControlRequests);
5658 		ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt +
5659 		    xp->IOOutputMByteCnt);
5660 		ps.LipResetCount = xp->TotalLipResets;
5661 		ps.InterruptCount = xp->TotalInterrupts;
5662 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5663 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5664 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5665 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5666 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5667 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5668 
5669 		rval = ddi_copyout((void *)&ps,
5670 		    (void *)(uintptr_t)cmd->ResponseAdr,
5671 		    sizeof (EXT_HBA_PORT_STAT), mode);
5672 		if (rval != 0) {
5673 			EL(ha, "failed, ddi_copyout\n");
5674 			cmd->Status = EXT_STATUS_COPY_ERR;
5675 			cmd->ResponseLen = 0;
5676 		} else {
5677 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5678 		}
5679 	}
5680 
5681 	kmem_free(ls, sizeof (ql_link_stats_t));
5682 
5683 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5684 }
5685 
5686 /*
5687  * ql_get_statistics_fc
5688  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5689  *
5690  * Input:
5691  *	ha:	adapter state pointer.
5692  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5693  *	mode:	flags.
5694  *
5695  * Returns:
5696  *	None, request status indicated in cmd->Status.
5697  *
5698  * Context:
5699  *	Kernel context.
5700  */
5701 static void
5702 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5703 {
5704 	EXT_HBA_PORT_STAT	ps = {0};
5705 	ql_link_stats_t		*ls;
5706 	int			rval;
5707 	uint16_t		qlnt;
5708 	EXT_DEST_ADDR		pextdestaddr;
5709 	uint8_t			*name;
5710 	ql_tgt_t		*tq = NULL;
5711 	int			retry = 10;
5712 
5713 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5714 
5715 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
5716 	    (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) {
5717 		EL(ha, "failed, ddi_copyin\n");
5718 		cmd->Status = EXT_STATUS_COPY_ERR;
5719 		cmd->ResponseLen = 0;
5720 		return;
5721 	}
5722 
5723 	qlnt = QLNT_PORT;
5724 	name = pextdestaddr.DestAddr.WWPN;
5725 
5726 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5727 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
5728 	    name[5], name[6], name[7]);
5729 
5730 	tq = ql_find_port(ha, name, qlnt);
5731 
5732 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5733 		EL(ha, "failed, fc_port not found\n");
5734 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5735 		cmd->ResponseLen = 0;
5736 		return;
5737 	}
5738 
5739 	while (ha->task_daemon_flags &
5740 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE  | DRIVER_STALL)) {
5741 		ql_delay(ha, 10000000);	/* 10 second delay */
5742 
5743 		retry--;
5744 
5745 		if (retry == 0) { /* effectively 100 seconds */
5746 			EL(ha, "failed, LOOP_NOT_READY\n");
5747 			cmd->Status = EXT_STATUS_BUSY;
5748 			cmd->ResponseLen = 0;
5749 			return;
5750 		}
5751 	}
5752 
5753 	/* Allocate memory for command. */
5754 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5755 	if (ls == NULL) {
5756 		EL(ha, "failed, kmem_zalloc\n");
5757 		cmd->Status = EXT_STATUS_NO_MEMORY;
5758 		cmd->ResponseLen = 0;
5759 		return;
5760 	}
5761 
5762 	rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t),
5763 	    (caddr_t)ls, 0);
5764 	if (rval != QL_SUCCESS) {
5765 		EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval,
5766 		    tq->d_id.b24);
5767 		cmd->Status = EXT_STATUS_MAILBOX;
5768 		cmd->DetailStatus = rval;
5769 		cmd->ResponseLen = 0;
5770 	} else {
5771 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5772 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5773 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5774 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5775 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5776 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5777 
5778 		rval = ddi_copyout((void *)&ps,
5779 		    (void *)(uintptr_t)cmd->ResponseAdr,
5780 		    sizeof (EXT_HBA_PORT_STAT), mode);
5781 
5782 		if (rval != 0) {
5783 			EL(ha, "failed, ddi_copyout\n");
5784 			cmd->Status = EXT_STATUS_COPY_ERR;
5785 			cmd->ResponseLen = 0;
5786 		} else {
5787 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5788 		}
5789 	}
5790 
5791 	kmem_free(ls, sizeof (ql_link_stats_t));
5792 
5793 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5794 }
5795 
5796 /*
5797  * ql_get_statistics_fc4
5798  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5799  *
5800  * Input:
5801  *	ha:	adapter state pointer.
5802  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5803  *	mode:	flags.
5804  *
5805  * Returns:
5806  *	None, request status indicated in cmd->Status.
5807  *
5808  * Context:
5809  *	Kernel context.
5810  */
5811 static void
5812 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5813 {
5814 	uint32_t		rval;
5815 	EXT_HBA_FC4STATISTICS	fc4stats = {0};
5816 	ql_xioctl_t		*xp = ha->xioctl;
5817 
5818 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5819 
5820 	fc4stats.InputRequests = xp->IOInputRequests;
5821 	fc4stats.OutputRequests = xp->IOOutputRequests;
5822 	fc4stats.ControlRequests = xp->IOControlRequests;
5823 	fc4stats.InputMegabytes = xp->IOInputMByteCnt;
5824 	fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
5825 
5826 	rval = ddi_copyout((void *)&fc4stats,
5827 	    (void *)(uintptr_t)cmd->ResponseAdr,
5828 	    sizeof (EXT_HBA_FC4STATISTICS), mode);
5829 
5830 	if (rval != 0) {
5831 		EL(ha, "failed, ddi_copyout\n");
5832 		cmd->Status = EXT_STATUS_COPY_ERR;
5833 		cmd->ResponseLen = 0;
5834 	} else {
5835 		cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
5836 	}
5837 
5838 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5839 }
5840 
5841 /*
5842  * ql_set_led_state
5843  *	Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
5844  *
5845  * Input:
5846  *	ha:	adapter state pointer.
5847  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5848  *	mode:	flags.
5849  *
5850  * Returns:
5851  *	None, request status indicated in cmd->Status.
5852  *
5853  * Context:
5854  *	Kernel context.
5855  */
5856 static void
5857 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5858 {
5859 	EXT_BEACON_CONTROL	bstate;
5860 	uint32_t		rval;
5861 	ql_xioctl_t		*xp = ha->xioctl;
5862 
5863 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5864 
5865 	if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
5866 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5867 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5868 		EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL,"
5869 		    " Len=%xh\n", cmd->RequestLen);
5870 		cmd->ResponseLen = 0;
5871 		return;
5872 	}
5873 
5874 	if (ha->device_id < 0x2300) {
5875 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5876 		cmd->DetailStatus = 0;
5877 		EL(ha, "done - failed, Invalid function for HBA model\n");
5878 		cmd->ResponseLen = 0;
5879 		return;
5880 	}
5881 
5882 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate,
5883 	    cmd->RequestLen, mode);
5884 
5885 	if (rval != 0) {
5886 		cmd->Status = EXT_STATUS_COPY_ERR;
5887 		EL(ha, "done -  failed, ddi_copyin\n");
5888 		return;
5889 	}
5890 
5891 	switch (bstate.State) {
5892 	case EXT_DEF_GRN_BLINK_OFF:	/* turn beacon off */
5893 		if (xp->ledstate.BeaconState == BEACON_OFF) {
5894 			/* not quite an error -- LED state is already off */
5895 			cmd->Status = EXT_STATUS_OK;
5896 			EL(ha, "LED off request -- LED is already off\n");
5897 			break;
5898 		}
5899 
5900 		xp->ledstate.BeaconState = BEACON_OFF;
5901 		xp->ledstate.LEDflags = LED_ALL_OFF;
5902 
5903 		if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
5904 			cmd->Status = EXT_STATUS_MAILBOX;
5905 		} else {
5906 			cmd->Status = EXT_STATUS_OK;
5907 		}
5908 		break;
5909 
5910 	case EXT_DEF_GRN_BLINK_ON:	/* turn beacon on */
5911 		if (xp->ledstate.BeaconState == BEACON_ON) {
5912 			/* not quite an error -- LED state is already on */
5913 			cmd->Status = EXT_STATUS_OK;
5914 			EL(ha, "LED on request  - LED is already on\n");
5915 			break;
5916 		}
5917 
5918 		if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
5919 			cmd->Status = EXT_STATUS_MAILBOX;
5920 			break;
5921 		}
5922 
5923 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
5924 			xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
5925 		} else {
5926 			xp->ledstate.LEDflags = LED_GREEN;
5927 		}
5928 		xp->ledstate.BeaconState = BEACON_ON;
5929 
5930 		cmd->Status = EXT_STATUS_OK;
5931 		break;
5932 	default:
5933 		cmd->Status = EXT_STATUS_ERR;
5934 		EL(ha, "failed, unknown state request %xh\n", bstate.State);
5935 		break;
5936 	}
5937 
5938 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5939 }
5940 
5941 /*
5942  * ql_get_led_state
5943  *	Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
5944  *
5945  * Input:
5946  *	ha:	adapter state pointer.
5947  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5948  *	mode:	flags.
5949  *
5950  * Returns:
5951  *	None, request status indicated in cmd->Status.
5952  *
5953  * Context:
5954  *	Kernel context.
5955  */
5956 static void
5957 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5958 {
5959 	EXT_BEACON_CONTROL	bstate = {0};
5960 	uint32_t		rval;
5961 	ql_xioctl_t		*xp = ha->xioctl;
5962 
5963 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5964 
5965 	if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
5966 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5967 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5968 		EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL,"
5969 		    "Len=%xh\n", cmd->ResponseLen);
5970 		cmd->ResponseLen = 0;
5971 		return;
5972 	}
5973 
5974 	if (ha->device_id < 0x2300) {
5975 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5976 		cmd->DetailStatus = 0;
5977 		EL(ha, "done - failed, Invalid function for HBA model\n");
5978 		cmd->ResponseLen = 0;
5979 		return;
5980 	}
5981 
5982 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
5983 		cmd->Status = EXT_STATUS_BUSY;
5984 		EL(ha, "done -  failed, isp abort active\n");
5985 		cmd->ResponseLen = 0;
5986 		return;
5987 	}
5988 
5989 	/* inform the user of the current beacon state (off or on) */
5990 	bstate.State = xp->ledstate.BeaconState;
5991 
5992 	rval = ddi_copyout((void *)&bstate,
5993 	    (void *)(uintptr_t)cmd->ResponseAdr,
5994 	    sizeof (EXT_BEACON_CONTROL), mode);
5995 
5996 	if (rval != 0) {
5997 		EL(ha, "failed, ddi_copyout\n");
5998 		cmd->Status = EXT_STATUS_COPY_ERR;
5999 		cmd->ResponseLen = 0;
6000 	} else {
6001 		cmd->Status = EXT_STATUS_OK;
6002 		cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
6003 	}
6004 
6005 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6006 }
6007 
6008 /*
6009  * ql_blink_led
6010  *	Determine the next state of the LED and drive it
6011  *
6012  * Input:
6013  *	ha:	adapter state pointer.
6014  *
6015  * Context:
6016  *	Interrupt context.
6017  */
6018 void
6019 ql_blink_led(ql_adapter_state_t *ha)
6020 {
6021 	uint32_t		nextstate;
6022 	ql_xioctl_t		*xp = ha->xioctl;
6023 
6024 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6025 
6026 	if (xp->ledstate.BeaconState == BEACON_ON) {
6027 		/* determine the next led state */
6028 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
6029 			nextstate = (xp->ledstate.LEDflags) &
6030 			    (~(RD32_IO_REG(ha, gpiod)));
6031 		} else {
6032 			nextstate = (xp->ledstate.LEDflags) &
6033 			    (~(RD16_IO_REG(ha, gpiod)));
6034 		}
6035 
6036 		/* turn the led on or off */
6037 		ql_drive_led(ha, nextstate);
6038 	}
6039 
6040 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6041 }
6042 
6043 /*
6044  * ql_drive_led
6045  *	drive the led's as determined by LEDflags
6046  *
6047  * Input:
6048  *	ha:		adapter state pointer.
6049  *	LEDflags:	LED flags
6050  *
6051  * Context:
6052  *	Kernel/Interrupt context.
6053  */
6054 static void
6055 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
6056 {
6057 
6058 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6059 
6060 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
6061 
6062 		uint16_t	gpio_enable, gpio_data;
6063 
6064 		/* setup to send new data */
6065 		gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
6066 		gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
6067 		WRT16_IO_REG(ha, gpioe, gpio_enable);
6068 
6069 		/* read current data and clear out old led data */
6070 		gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
6071 		gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
6072 
6073 		/* set in the new led data. */
6074 		gpio_data = (uint16_t)(gpio_data | LEDflags);
6075 
6076 		/* write out the new led data */
6077 		WRT16_IO_REG(ha, gpiod, gpio_data);
6078 
6079 	} else if (CFG_IST(ha, CFG_CTRL_24258081)) {
6080 
6081 		uint32_t	gpio_data;
6082 
6083 		/* setup to send new data */
6084 		gpio_data = RD32_IO_REG(ha, gpiod);
6085 		gpio_data |= LED_MASK_UPDATE_24;
6086 		WRT32_IO_REG(ha, gpiod, gpio_data);
6087 
6088 		/* read current data and clear out old led data */
6089 		gpio_data = RD32_IO_REG(ha, gpiod);
6090 		gpio_data &= ~LED_MASK_COLORS_24;
6091 
6092 		/* set in the new led data */
6093 		gpio_data |= LEDflags;
6094 
6095 		/* write out the new led data */
6096 		WRT32_IO_REG(ha, gpiod, gpio_data);
6097 
6098 	} else {
6099 		EL(ha, "unsupported HBA: %xh", ha->device_id);
6100 	}
6101 
6102 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6103 }
6104 
6105 /*
6106  * ql_setup_led
6107  *	Setup LED for driver control
6108  *
6109  * Input:
6110  *	ha:	adapter state pointer.
6111  *
6112  * Context:
6113  *	Kernel/Interrupt context.
6114  */
6115 static uint32_t
6116 ql_setup_led(ql_adapter_state_t *ha)
6117 {
6118 	uint32_t	rval;
6119 	ql_mbx_data_t	mr;
6120 
6121 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6122 
6123 	/* decouple the LED control from the fw */
6124 	rval = ql_get_firmware_option(ha, &mr);
6125 	if (rval != QL_SUCCESS) {
6126 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
6127 		return (rval);
6128 	}
6129 
6130 	/* set the appropriate options */
6131 	mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
6132 
6133 	/* send it back to the firmware */
6134 	rval = ql_set_firmware_option(ha, &mr);
6135 	if (rval != QL_SUCCESS) {
6136 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
6137 		return (rval);
6138 	}
6139 
6140 	/* initally, turn the LED's off */
6141 	ql_drive_led(ha, LED_ALL_OFF);
6142 
6143 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6144 
6145 	return (rval);
6146 }
6147 
6148 /*
6149  * ql_wrapup_led
6150  *	Return LED control to the firmware
6151  *
6152  * Input:
6153  *	ha:	adapter state pointer.
6154  *
6155  * Context:
6156  *	Kernel/Interrupt context.
6157  */
6158 static uint32_t
6159 ql_wrapup_led(ql_adapter_state_t *ha)
6160 {
6161 	uint32_t	rval;
6162 	ql_mbx_data_t	mr;
6163 
6164 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6165 
6166 	/* Turn all LED's off */
6167 	ql_drive_led(ha, LED_ALL_OFF);
6168 
6169 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
6170 
6171 		uint32_t	gpio_data;
6172 
6173 		/* disable the LED update mask */
6174 		gpio_data = RD32_IO_REG(ha, gpiod);
6175 		gpio_data &= ~LED_MASK_UPDATE_24;
6176 
6177 		/* write out the data */
6178 		WRT32_IO_REG(ha, gpiod, gpio_data);
6179 	}
6180 
6181 	/* give LED control back to the f/w */
6182 	rval = ql_get_firmware_option(ha, &mr);
6183 	if (rval != QL_SUCCESS) {
6184 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
6185 		return (rval);
6186 	}
6187 
6188 	mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6189 
6190 	rval = ql_set_firmware_option(ha, &mr);
6191 	if (rval != QL_SUCCESS) {
6192 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
6193 		return (rval);
6194 	}
6195 
6196 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6197 
6198 	return (rval);
6199 }
6200 
6201 /*
6202  * ql_get_port_summary
6203  *	Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6204  *
6205  *	The EXT_IOCTL->RequestAdr points to a single
6206  *	UINT32 which identifies the device type.
6207  *
6208  * Input:
6209  *	ha:	adapter state pointer.
6210  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6211  *	mode:	flags.
6212  *
6213  * Returns:
6214  *	None, request status indicated in cmd->Status.
6215  *
6216  * Context:
6217  *	Kernel context.
6218  */
6219 static void
6220 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6221 {
6222 	EXT_DEVICEDATA		dd = {0};
6223 	EXT_DEVICEDATA		*uddp;
6224 	ql_link_t		*link;
6225 	ql_tgt_t		*tq;
6226 	uint32_t		rlen, dev_type, index;
6227 	int			rval = 0;
6228 	EXT_DEVICEDATAENTRY	*uddep, *ddep;
6229 
6230 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6231 
6232 	ddep = &dd.EntryList[0];
6233 
6234 	/*
6235 	 * Get the type of device the requestor is looking for.
6236 	 *
6237 	 * We ignore this for now.
6238 	 */
6239 	rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6240 	    (void *)&dev_type, sizeof (dev_type), mode);
6241 	if (rval != 0) {
6242 		cmd->Status = EXT_STATUS_COPY_ERR;
6243 		cmd->ResponseLen = 0;
6244 		EL(ha, "failed, ddi_copyin\n");
6245 		return;
6246 	}
6247 	/*
6248 	 * Count the number of entries to be returned. Count devices
6249 	 * that are offlline, but have been persistently bound.
6250 	 */
6251 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6252 		for (link = ha->dev[index].first; link != NULL;
6253 		    link = link->next) {
6254 			tq = link->base_address;
6255 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6256 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6257 				continue;	/* Skip this one */
6258 			}
6259 			dd.TotalDevices++;
6260 		}
6261 	}
6262 	/*
6263 	 * Compute the number of entries that can be returned
6264 	 * based upon the size of caller's response buffer.
6265 	 */
6266 	dd.ReturnListEntryCount = 0;
6267 	if (dd.TotalDevices == 0) {
6268 		rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY);
6269 	} else {
6270 		rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6271 		    (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6272 	}
6273 	if (rlen > cmd->ResponseLen) {
6274 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6275 		cmd->DetailStatus = rlen;
6276 		EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6277 		    rlen, cmd->ResponseLen);
6278 		cmd->ResponseLen = 0;
6279 		return;
6280 	}
6281 	cmd->ResponseLen = 0;
6282 	uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6283 	uddep = &uddp->EntryList[0];
6284 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6285 		for (link = ha->dev[index].first; link != NULL;
6286 		    link = link->next) {
6287 			tq = link->base_address;
6288 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6289 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6290 				continue;	/* Skip this one */
6291 			}
6292 
6293 			bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6294 
6295 			bcopy(tq->node_name, ddep->NodeWWN, 8);
6296 			bcopy(tq->port_name, ddep->PortWWN, 8);
6297 
6298 			ddep->PortID[0] = tq->d_id.b.domain;
6299 			ddep->PortID[1] = tq->d_id.b.area;
6300 			ddep->PortID[2] = tq->d_id.b.al_pa;
6301 
6302 			bcopy(tq->port_name,
6303 			    (caddr_t)&ddep->TargetAddress.Target, 8);
6304 
6305 			ddep->DeviceFlags = tq->flags;
6306 			ddep->LoopID = tq->loop_id;
6307 			QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, "
6308 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6309 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6310 			    ha->instance, ddep->TargetAddress.Target,
6311 			    ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6312 			    ddep->NodeWWN[2], ddep->NodeWWN[3],
6313 			    ddep->NodeWWN[4], ddep->NodeWWN[5],
6314 			    ddep->NodeWWN[6], ddep->NodeWWN[7],
6315 			    ddep->PortWWN[0], ddep->PortWWN[1],
6316 			    ddep->PortWWN[2], ddep->PortWWN[3],
6317 			    ddep->PortWWN[4], ddep->PortWWN[5],
6318 			    ddep->PortWWN[6], ddep->PortWWN[7]);
6319 			rval = ddi_copyout((void *)ddep, (void *)uddep,
6320 			    sizeof (EXT_DEVICEDATAENTRY), mode);
6321 
6322 			if (rval != 0) {
6323 				cmd->Status = EXT_STATUS_COPY_ERR;
6324 				cmd->ResponseLen = 0;
6325 				EL(ha, "failed, ddi_copyout\n");
6326 				break;
6327 			}
6328 			dd.ReturnListEntryCount++;
6329 			uddep++;
6330 			cmd->ResponseLen += (uint32_t)
6331 			    sizeof (EXT_DEVICEDATAENTRY);
6332 		}
6333 	}
6334 	rval = ddi_copyout((void *)&dd, (void *)uddp,
6335 	    sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6336 
6337 	if (rval != 0) {
6338 		cmd->Status = EXT_STATUS_COPY_ERR;
6339 		cmd->ResponseLen = 0;
6340 		EL(ha, "failed, ddi_copyout-2\n");
6341 	} else {
6342 		cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6343 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6344 	}
6345 }
6346 
6347 /*
6348  * ql_get_target_id
6349  *	Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6350  *
6351  * Input:
6352  *	ha:	adapter state pointer.
6353  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6354  *	mode:	flags.
6355  *
6356  * Returns:
6357  *	None, request status indicated in cmd->Status.
6358  *
6359  * Context:
6360  *	Kernel context.
6361  */
6362 static void
6363 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6364 {
6365 	uint32_t		rval;
6366 	uint16_t		qlnt;
6367 	EXT_DEST_ADDR		extdestaddr = {0};
6368 	uint8_t			*name;
6369 	uint8_t			wwpn[EXT_DEF_WWN_NAME_SIZE];
6370 	ql_tgt_t		*tq;
6371 
6372 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6373 
6374 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6375 	    (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) {
6376 		EL(ha, "failed, ddi_copyin\n");
6377 		cmd->Status = EXT_STATUS_COPY_ERR;
6378 		cmd->ResponseLen = 0;
6379 		return;
6380 	}
6381 
6382 	qlnt = QLNT_PORT;
6383 	name = wwpn;
6384 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6385 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
6386 	    name[5], name[6], name[7]);
6387 
6388 	tq = ql_find_port(ha, name, qlnt);
6389 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6390 		EL(ha, "failed, fc_port not found\n");
6391 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6392 		cmd->ResponseLen = 0;
6393 		return;
6394 	}
6395 
6396 	bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6397 
6398 	rval = ddi_copyout((void *)&extdestaddr,
6399 	    (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode);
6400 	if (rval != 0) {
6401 		EL(ha, "failed, ddi_copyout\n");
6402 		cmd->Status = EXT_STATUS_COPY_ERR;
6403 		cmd->ResponseLen = 0;
6404 	}
6405 
6406 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6407 }
6408 
6409 /*
6410  * ql_setup_fcache
6411  *	Populates selected flash sections into the cache
6412  *
6413  * Input:
6414  *	ha = adapter state pointer.
6415  *
6416  * Returns:
6417  *	ql local function return status code.
6418  *
6419  * Context:
6420  *	Kernel context.
6421  *
6422  * Note:
6423  *	Driver must be in stalled state prior to entering or
6424  *	add code to this function prior to calling ql_setup_flash()
6425  */
6426 int
6427 ql_setup_fcache(ql_adapter_state_t *ha)
6428 {
6429 	int		rval;
6430 	uint32_t	freadpos = 0;
6431 	uint32_t	fw_done = 0;
6432 	ql_fcache_t	*head = NULL;
6433 	ql_fcache_t	*tail = NULL;
6434 	ql_fcache_t	*ftmp;
6435 
6436 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6437 
6438 	CACHE_LOCK(ha);
6439 
6440 	/* If we already have populated it, rtn */
6441 	if (ha->fcache != NULL) {
6442 		CACHE_UNLOCK(ha);
6443 		EL(ha, "buffer already populated\n");
6444 		return (QL_SUCCESS);
6445 	}
6446 
6447 	ql_flash_nvram_defaults(ha);
6448 
6449 	if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6450 		CACHE_UNLOCK(ha);
6451 		EL(ha, "unable to setup flash; rval=%xh\n", rval);
6452 		return (rval);
6453 	}
6454 
6455 	while (freadpos != 0xffffffff) {
6456 		/* Allocate & populate this node */
6457 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6458 			EL(ha, "node alloc failed\n");
6459 			rval = QL_FUNCTION_FAILED;
6460 			break;
6461 		}
6462 
6463 		/* link in the new node */
6464 		if (head == NULL) {
6465 			head = tail = ftmp;
6466 		} else {
6467 			tail->next = ftmp;
6468 			tail = ftmp;
6469 		}
6470 
6471 		/* Do the firmware node first for 24xx/25xx's */
6472 		if (fw_done == 0) {
6473 			if (CFG_IST(ha, CFG_CTRL_24258081)) {
6474 				freadpos = ha->flash_fw_addr << 2;
6475 			}
6476 			fw_done = 1;
6477 		}
6478 
6479 		if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6480 		    freadpos)) != QL_SUCCESS) {
6481 			EL(ha, "failed, 24xx dump_fcode"
6482 			    " pos=%xh rval=%xh\n", freadpos, rval);
6483 			rval = QL_FUNCTION_FAILED;
6484 			break;
6485 		}
6486 
6487 		/* checkout the pci data / format */
6488 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6489 			EL(ha, "flash header incorrect\n");
6490 			rval = QL_FUNCTION_FAILED;
6491 			break;
6492 		}
6493 	}
6494 
6495 	if (rval != QL_SUCCESS) {
6496 		/* release all resources we have */
6497 		ftmp = head;
6498 		while (ftmp != NULL) {
6499 			tail = ftmp->next;
6500 			kmem_free(ftmp->buf, FBUFSIZE);
6501 			kmem_free(ftmp, sizeof (ql_fcache_t));
6502 			ftmp = tail;
6503 		}
6504 
6505 		EL(ha, "failed, done\n");
6506 	} else {
6507 		ha->fcache = head;
6508 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6509 	}
6510 	CACHE_UNLOCK(ha);
6511 
6512 	return (rval);
6513 }
6514 
6515 /*
6516  * ql_update_fcache
6517  *	re-populates updated flash into the fcache. If
6518  *	fcache does not exist (e.g., flash was empty/invalid on
6519  *	boot), this routine will create and the populate it.
6520  *
6521  * Input:
6522  *	ha	= adapter state pointer.
6523  *	*bpf 	= Pointer to flash buffer.
6524  *	bsize	= Size of flash buffer.
6525  *
6526  * Returns:
6527  *
6528  * Context:
6529  *	Kernel context.
6530  */
6531 void
6532 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6533 {
6534 	int		rval = QL_SUCCESS;
6535 	uint32_t	freadpos = 0;
6536 	uint32_t	fw_done = 0;
6537 	ql_fcache_t	*head = NULL;
6538 	ql_fcache_t	*tail = NULL;
6539 	ql_fcache_t	*ftmp;
6540 
6541 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6542 
6543 	while (freadpos != 0xffffffff) {
6544 
6545 		/* Allocate & populate this node */
6546 
6547 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6548 			EL(ha, "node alloc failed\n");
6549 			rval = QL_FUNCTION_FAILED;
6550 			break;
6551 		}
6552 
6553 		/* link in the new node */
6554 		if (head == NULL) {
6555 			head = tail = ftmp;
6556 		} else {
6557 			tail->next = ftmp;
6558 			tail = ftmp;
6559 		}
6560 
6561 		/* Do the firmware node first for 24xx's */
6562 		if (fw_done == 0) {
6563 			if (CFG_IST(ha, CFG_CTRL_24258081)) {
6564 				freadpos = ha->flash_fw_addr << 2;
6565 			}
6566 			fw_done = 1;
6567 		}
6568 
6569 		/* read in first FBUFSIZE bytes of this flash section */
6570 		if (freadpos+FBUFSIZE > bsize) {
6571 			EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6572 			    freadpos, bsize);
6573 			rval = QL_FUNCTION_FAILED;
6574 			break;
6575 		}
6576 		bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE);
6577 
6578 		/* checkout the pci data / format */
6579 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6580 			EL(ha, "flash header incorrect\n");
6581 			rval = QL_FUNCTION_FAILED;
6582 			break;
6583 		}
6584 	}
6585 
6586 	if (rval != QL_SUCCESS) {
6587 		/*
6588 		 * release all resources we have
6589 		 */
6590 		ql_fcache_rel(head);
6591 		EL(ha, "failed, done\n");
6592 	} else {
6593 		/*
6594 		 * Release previous fcache resources and update with new
6595 		 */
6596 		CACHE_LOCK(ha);
6597 		ql_fcache_rel(ha->fcache);
6598 		ha->fcache = head;
6599 		CACHE_UNLOCK(ha);
6600 
6601 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6602 	}
6603 }
6604 
6605 /*
6606  * ql_setup_fnode
6607  *	Allocates fcache node
6608  *
6609  * Input:
6610  *	ha = adapter state pointer.
6611  *	node = point to allocated fcache node (NULL = failed)
6612  *
6613  * Returns:
6614  *
6615  * Context:
6616  *	Kernel context.
6617  *
6618  * Note:
6619  *	Driver must be in stalled state prior to entering or
6620  *	add code to this function prior to calling ql_setup_flash()
6621  */
6622 static ql_fcache_t *
6623 ql_setup_fnode(ql_adapter_state_t *ha)
6624 {
6625 	ql_fcache_t	*fnode = NULL;
6626 
6627 	if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t),
6628 	    KM_SLEEP))) == NULL) {
6629 		EL(ha, "fnode alloc failed\n");
6630 		fnode = NULL;
6631 	} else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE,
6632 	    KM_SLEEP))) == NULL) {
6633 		EL(ha, "buf alloc failed\n");
6634 		kmem_free(fnode, sizeof (ql_fcache_t));
6635 		fnode = NULL;
6636 	} else {
6637 		fnode->buflen = FBUFSIZE;
6638 	}
6639 
6640 	return (fnode);
6641 }
6642 
6643 /*
6644  * ql_fcache_rel
6645  *	Releases the fcache resources
6646  *
6647  * Input:
6648  *	ha	= adapter state pointer.
6649  *	head	= Pointer to fcache linked list
6650  *
6651  * Returns:
6652  *
6653  * Context:
6654  *	Kernel context.
6655  *
6656  */
6657 void
6658 ql_fcache_rel(ql_fcache_t *head)
6659 {
6660 	ql_fcache_t	*ftmp = head;
6661 	ql_fcache_t	*tail;
6662 
6663 	/* release all resources we have */
6664 	while (ftmp != NULL) {
6665 		tail = ftmp->next;
6666 		kmem_free(ftmp->buf, FBUFSIZE);
6667 		kmem_free(ftmp, sizeof (ql_fcache_t));
6668 		ftmp = tail;
6669 	}
6670 }
6671 
6672 /*
6673  * ql_update_flash_caches
6674  *	Updates driver flash caches
6675  *
6676  * Input:
6677  *	ha:	adapter state pointer.
6678  *
6679  * Context:
6680  *	Kernel context.
6681  */
6682 static void
6683 ql_update_flash_caches(ql_adapter_state_t *ha)
6684 {
6685 	uint32_t		len;
6686 	ql_link_t		*link;
6687 	ql_adapter_state_t	*ha2;
6688 
6689 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6690 
6691 	/* Get base path length. */
6692 	for (len = (uint32_t)strlen(ha->devpath); len; len--) {
6693 		if (ha->devpath[len] == ',' ||
6694 		    ha->devpath[len] == '@') {
6695 			break;
6696 		}
6697 	}
6698 
6699 	/* Reset fcache on all adapter instances. */
6700 	for (link = ql_hba.first; link != NULL; link = link->next) {
6701 		ha2 = link->base_address;
6702 
6703 		if (strncmp(ha->devpath, ha2->devpath, len) != 0) {
6704 			continue;
6705 		}
6706 
6707 		CACHE_LOCK(ha2);
6708 		ql_fcache_rel(ha2->fcache);
6709 		ha2->fcache = NULL;
6710 
6711 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
6712 			if (ha2->vcache != NULL) {
6713 				kmem_free(ha2->vcache, QL_24XX_VPD_SIZE);
6714 				ha2->vcache = NULL;
6715 			}
6716 		}
6717 		CACHE_UNLOCK(ha2);
6718 
6719 		(void) ql_setup_fcache(ha2);
6720 	}
6721 
6722 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6723 }
6724 
6725 /*
6726  * ql_get_fbuf
6727  *	Search the fcache list for the type specified
6728  *
6729  * Input:
6730  *	fptr	= Pointer to fcache linked list
6731  *	ftype	= Type of image to be returned.
6732  *
6733  * Returns:
6734  *	Pointer to ql_fcache_t.
6735  *	NULL means not found.
6736  *
6737  * Context:
6738  *	Kernel context.
6739  *
6740  *
6741  */
6742 ql_fcache_t *
6743 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype)
6744 {
6745 	while (fptr != NULL) {
6746 		/* does this image meet criteria? */
6747 		if (ftype & fptr->type) {
6748 			break;
6749 		}
6750 		fptr = fptr->next;
6751 	}
6752 	return (fptr);
6753 }
6754 
6755 /*
6756  * ql_check_pci
6757  *
6758  *	checks the passed buffer for a valid pci signature and
6759  *	expected (and in range) pci length values.
6760  *
6761  *	For firmware type, a pci header is added since the image in
6762  *	the flash does not have one (!!!).
6763  *
6764  *	On successful pci check, nextpos adjusted to next pci header.
6765  *
6766  * Returns:
6767  *	-1 --> last pci image
6768  *	0 --> pci header valid
6769  *	1 --> pci header invalid.
6770  *
6771  * Context:
6772  *	Kernel context.
6773  */
6774 static int
6775 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
6776 {
6777 	pci_header_t	*pcih;
6778 	pci_data_t	*pcid;
6779 	uint32_t	doff;
6780 	uint8_t		*pciinfo;
6781 
6782 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6783 
6784 	if (fcache != NULL) {
6785 		pciinfo = fcache->buf;
6786 	} else {
6787 		EL(ha, "failed, null fcache ptr passed\n");
6788 		return (1);
6789 	}
6790 
6791 	if (pciinfo == NULL) {
6792 		EL(ha, "failed, null pciinfo ptr passed\n");
6793 		return (1);
6794 	}
6795 
6796 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
6797 		caddr_t	bufp;
6798 		uint_t	len;
6799 
6800 		if (pciinfo[0] != SBUS_CODE_FCODE) {
6801 			EL(ha, "failed, unable to detect sbus fcode\n");
6802 			return (1);
6803 		}
6804 		fcache->type = FTYPE_FCODE;
6805 
6806 		/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
6807 		if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
6808 		    PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
6809 		    DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
6810 		    (int *)&len) == DDI_PROP_SUCCESS) {
6811 
6812 			(void) snprintf(fcache->verstr,
6813 			    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
6814 			kmem_free(bufp, len);
6815 		}
6816 
6817 		*nextpos = 0xffffffff;
6818 
6819 		QL_PRINT_9(CE_CONT, "(%d): CFG_SBUS_CARD, done\n",
6820 		    ha->instance);
6821 
6822 		return (0);
6823 	}
6824 
6825 	if (*nextpos == ha->flash_fw_addr << 2) {
6826 
6827 		pci_header_t	fwh = {0};
6828 		pci_data_t	fwd = {0};
6829 		uint8_t		*buf, *bufp;
6830 
6831 		/*
6832 		 * Build a pci header for the firmware module
6833 		 */
6834 		if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
6835 		    NULL) {
6836 			EL(ha, "failed, unable to allocate buffer\n");
6837 			return (1);
6838 		}
6839 
6840 		fwh.signature[0] = PCI_HEADER0;
6841 		fwh.signature[1] = PCI_HEADER1;
6842 		fwh.dataoffset[0] = LSB(sizeof (pci_header_t));
6843 		fwh.dataoffset[1] = MSB(sizeof (pci_header_t));
6844 
6845 		fwd.signature[0] = 'P';
6846 		fwd.signature[1] = 'C';
6847 		fwd.signature[2] = 'I';
6848 		fwd.signature[3] = 'R';
6849 		fwd.codetype = PCI_CODE_FW;
6850 		fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
6851 		fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
6852 
6853 		bufp = buf;
6854 		bcopy(&fwh, bufp, sizeof (pci_header_t));
6855 		bufp += sizeof (pci_header_t);
6856 		bcopy(&fwd, bufp, sizeof (pci_data_t));
6857 		bufp += sizeof (pci_data_t);
6858 
6859 		bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
6860 		    sizeof (pci_data_t)));
6861 		bcopy(buf, fcache->buf, FBUFSIZE);
6862 
6863 		fcache->type = FTYPE_FW;
6864 
6865 		(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6866 		    "%d.%02d.%02d", fcache->buf[19], fcache->buf[23],
6867 		    fcache->buf[27]);
6868 
6869 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
6870 			*nextpos = 0x200000;
6871 		} else if (CFG_IST(ha, CFG_CTRL_8021)) {
6872 			*nextpos = 0x80000;
6873 		} else {
6874 			*nextpos = 0;
6875 		}
6876 		kmem_free(buf, FBUFSIZE);
6877 
6878 		QL_PRINT_9(CE_CONT, "(%d): FTYPE_FW, done\n", ha->instance);
6879 
6880 		return (0);
6881 	}
6882 
6883 	/* get to the pci header image length */
6884 	pcih = (pci_header_t *)pciinfo;
6885 
6886 	doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
6887 
6888 	/* some header section sanity check */
6889 	if (pcih->signature[0] != PCI_HEADER0 ||
6890 	    pcih->signature[1] != PCI_HEADER1 || doff > 50) {
6891 		EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
6892 		    pcih->signature[0], pcih->signature[1], doff);
6893 		return (1);
6894 	}
6895 
6896 	pcid = (pci_data_t *)(pciinfo + doff);
6897 
6898 	/* a slight sanity data section check */
6899 	if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
6900 	    pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
6901 		EL(ha, "failed, data sig mismatch!\n");
6902 		return (1);
6903 	}
6904 
6905 	if (pcid->indicator == PCI_IND_LAST_IMAGE) {
6906 		QL_PRINT_9(CE_CONT, "(%d): last image\n", ha->instance);
6907 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
6908 			ql_flash_layout_table(ha, *nextpos +
6909 			    (pcid->imagelength[0] | (pcid->imagelength[1] <<
6910 			    8)) * PCI_SECTOR_SIZE);
6911 			(void) ql_24xx_flash_desc(ha);
6912 		}
6913 		*nextpos = 0xffffffff;
6914 	} else {
6915 		/* adjust the next flash read start position */
6916 		*nextpos += (pcid->imagelength[0] |
6917 		    (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
6918 	}
6919 
6920 	switch (pcid->codetype) {
6921 	case PCI_CODE_X86PC:
6922 		fcache->type = FTYPE_BIOS;
6923 		break;
6924 	case PCI_CODE_FCODE:
6925 		fcache->type = FTYPE_FCODE;
6926 		break;
6927 	case PCI_CODE_EFI:
6928 		fcache->type = FTYPE_EFI;
6929 		break;
6930 	case PCI_CODE_HPPA:
6931 		fcache->type = FTYPE_HPPA;
6932 		break;
6933 	default:
6934 		fcache->type = FTYPE_UNKNOWN;
6935 		break;
6936 	}
6937 
6938 	(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6939 	    "%d.%02d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
6940 
6941 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6942 
6943 	return (0);
6944 }
6945 
6946 /*
6947  * ql_flash_layout_table
6948  *	Obtains flash addresses from table
6949  *
6950  * Input:
6951  *	ha:		adapter state pointer.
6952  *	flt_paddr:	flash layout pointer address.
6953  *
6954  * Context:
6955  *	Kernel context.
6956  */
6957 static void
6958 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr)
6959 {
6960 	ql_flt_ptr_t	*fptr;
6961 	uint8_t		*bp;
6962 	int		rval;
6963 	uint32_t	len, faddr, cnt;
6964 	uint16_t	chksum, w16;
6965 
6966 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6967 
6968 	/* Process flash layout table header */
6969 	len = sizeof (ql_flt_ptr_t);
6970 	if ((bp = kmem_zalloc(len, KM_SLEEP)) == NULL) {
6971 		EL(ha, "kmem_zalloc=null\n");
6972 		return;
6973 	}
6974 
6975 	/* Process pointer to flash layout table */
6976 	if ((rval = ql_dump_fcode(ha, bp, len, flt_paddr)) != QL_SUCCESS) {
6977 		EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr,
6978 		    rval);
6979 		kmem_free(bp, len);
6980 		return;
6981 	}
6982 	fptr = (ql_flt_ptr_t *)bp;
6983 
6984 	/* Verify pointer to flash layout table. */
6985 	for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
6986 		w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6987 		chksum += w16;
6988 	}
6989 	if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' ||
6990 	    fptr->sig[2] != 'L' || fptr->sig[3] != 'T') {
6991 		EL(ha, "ptr chksum=%xh, sig=%c%c%c%c\n", chksum, fptr->sig[0],
6992 		    fptr->sig[1], fptr->sig[2], fptr->sig[3]);
6993 		kmem_free(bp, len);
6994 		return;
6995 	}
6996 	faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2],
6997 	    fptr->addr[3]);
6998 
6999 	kmem_free(bp, len);
7000 
7001 	ql_process_flt(ha, faddr);
7002 
7003 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7004 }
7005 
7006 /*
7007  * ql_process_flt
7008  *	Obtains flash addresses from flash layout table
7009  *
7010  * Input:
7011  *	ha:	adapter state pointer.
7012  *	faddr:	flash layout table byte address.
7013  *
7014  * Context:
7015  *	Kernel context.
7016  */
7017 static void
7018 ql_process_flt(ql_adapter_state_t *ha, uint32_t faddr)
7019 {
7020 	ql_flt_hdr_t	*fhdr;
7021 	ql_flt_region_t	*frgn;
7022 	uint8_t		*bp, *eaddr, nv_rg, vpd_rg;
7023 	int		rval;
7024 	uint32_t	len, cnt, fe_addr;
7025 	uint16_t	chksum, w16;
7026 
7027 	QL_PRINT_9(CE_CONT, "(%d): started faddr=%xh\n", ha->instance, faddr);
7028 
7029 	/* Process flash layout table header */
7030 	if ((bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP)) == NULL) {
7031 		EL(ha, "kmem_zalloc=null\n");
7032 		return;
7033 	}
7034 	fhdr = (ql_flt_hdr_t *)bp;
7035 
7036 	/* Process flash layout table. */
7037 	if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) !=
7038 	    QL_SUCCESS) {
7039 		EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval);
7040 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7041 		return;
7042 	}
7043 
7044 	/* Verify flash layout table. */
7045 	len = (uint32_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) +
7046 	    sizeof (ql_flt_hdr_t) + sizeof (ql_flt_region_t));
7047 	if (len > FLASH_LAYOUT_TABLE_SIZE) {
7048 		chksum = 0xffff;
7049 	} else {
7050 		for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7051 			w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
7052 			chksum += w16;
7053 		}
7054 	}
7055 	w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]);
7056 	if (chksum != 0 || w16 != 1) {
7057 		EL(ha, "table chksum=%xh, version=%d\n", chksum, w16);
7058 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7059 		return;
7060 	}
7061 	eaddr = bp + len;
7062 
7063 	/* Process Function/Port Configuration Map. */
7064 	nv_rg = vpd_rg = 0;
7065 	if (CFG_IST(ha, CFG_CTRL_8021)) {
7066 		uint16_t	i;
7067 		uint8_t		*mbp = eaddr;
7068 		ql_fp_cfg_map_t	*cmp = (ql_fp_cfg_map_t *)mbp;
7069 
7070 		len = (uint32_t)(CHAR_TO_SHORT(cmp->hdr.len[0],
7071 		    cmp->hdr.len[1]));
7072 		if (len > FLASH_LAYOUT_TABLE_SIZE) {
7073 			chksum = 0xffff;
7074 		} else {
7075 			for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7076 				w16 = (uint16_t)CHAR_TO_SHORT(mbp[cnt],
7077 				    mbp[cnt + 1]);
7078 				chksum += w16;
7079 			}
7080 		}
7081 		w16 = CHAR_TO_SHORT(cmp->hdr.version[0], cmp->hdr.version[1]);
7082 		if (chksum != 0 || w16 != 1 ||
7083 		    cmp->hdr.Signature[0] != 'F' ||
7084 		    cmp->hdr.Signature[1] != 'P' ||
7085 		    cmp->hdr.Signature[2] != 'C' ||
7086 		    cmp->hdr.Signature[3] != 'M') {
7087 			EL(ha, "cfg_map chksum=%xh, version=%d, "
7088 			    "sig=%c%c%c%c\n", chksum, w16,
7089 			    cmp->hdr.Signature[0], cmp->hdr.Signature[1],
7090 			    cmp->hdr.Signature[2], cmp->hdr.Signature[3]);
7091 		} else {
7092 			cnt = (uint16_t)
7093 			    (CHAR_TO_SHORT(cmp->hdr.NumberEntries[0],
7094 			    cmp->hdr.NumberEntries[1]));
7095 			/* Locate entry for function. */
7096 			for (i = 0; i < cnt; i++) {
7097 				if (cmp->cfg[i].FunctionType == FT_FC &&
7098 				    cmp->cfg[i].FunctionNumber[0] ==
7099 				    ha->function_number &&
7100 				    cmp->cfg[i].FunctionNumber[1] == 0) {
7101 					nv_rg = cmp->cfg[i].ConfigRegion;
7102 					vpd_rg = cmp->cfg[i].VpdRegion;
7103 					break;
7104 				}
7105 			}
7106 
7107 			if (nv_rg == 0 || vpd_rg == 0) {
7108 				EL(ha, "cfg_map nv_rg=%d, vpd_rg=%d\n", nv_rg,
7109 				    vpd_rg);
7110 				nv_rg = vpd_rg = 0;
7111 			}
7112 		}
7113 	}
7114 
7115 	/* Process flash layout table regions */
7116 	for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t));
7117 	    (uint8_t *)frgn < eaddr; frgn++) {
7118 		faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1],
7119 		    frgn->beg_addr[2], frgn->beg_addr[3]);
7120 		faddr >>= 2;
7121 		fe_addr = CHAR_TO_LONG(frgn->end_addr[0], frgn->end_addr[1],
7122 		    frgn->end_addr[2], frgn->end_addr[3]);
7123 		fe_addr >>= 2;
7124 
7125 		switch (frgn->region) {
7126 		case FLASH_8021_BOOTLOADER_REGION:
7127 			ha->bootloader_addr = faddr;
7128 			ha->bootloader_size = (fe_addr - faddr) + 1;
7129 			QL_PRINT_9(CE_CONT, "(%d): bootloader_addr=%xh, "
7130 			    "size=%xh\n", ha->instance, faddr,
7131 			    ha->bootloader_size);
7132 			break;
7133 		case FLASH_FW_REGION:
7134 		case FLASH_8021_FW_REGION:
7135 			ha->flash_fw_addr = faddr;
7136 			ha->flash_fw_size = (fe_addr - faddr) + 1;
7137 			QL_PRINT_9(CE_CONT, "(%d): flash_fw_addr=%xh, "
7138 			    "size=%xh\n", ha->instance, faddr,
7139 			    ha->flash_fw_size);
7140 			break;
7141 		case FLASH_GOLDEN_FW_REGION:
7142 		case FLASH_8021_GOLDEN_FW_REGION:
7143 			ha->flash_golden_fw_addr = faddr;
7144 			QL_PRINT_9(CE_CONT, "(%d): flash_golden_fw_addr=%xh\n",
7145 			    ha->instance, faddr);
7146 			break;
7147 		case FLASH_8021_VPD_REGION:
7148 			if (!vpd_rg || vpd_rg == FLASH_8021_VPD_REGION) {
7149 				ha->flash_vpd_addr = faddr;
7150 				QL_PRINT_9(CE_CONT, "(%d): 8021_flash_vpd_"
7151 				    "addr=%xh\n", ha->instance, faddr);
7152 			}
7153 			break;
7154 		case FLASH_VPD_0_REGION:
7155 			if (vpd_rg) {
7156 				if (vpd_rg == FLASH_VPD_0_REGION) {
7157 					ha->flash_vpd_addr = faddr;
7158 					QL_PRINT_9(CE_CONT, "(%d): vpd_rg  "
7159 					    "flash_vpd_addr=%xh\n",
7160 					    ha->instance, faddr);
7161 				}
7162 			} else if (!(ha->flags & FUNCTION_1) &&
7163 			    !(CFG_IST(ha, CFG_CTRL_8021))) {
7164 				ha->flash_vpd_addr = faddr;
7165 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
7166 				    "\n", ha->instance, faddr);
7167 			}
7168 			break;
7169 		case FLASH_NVRAM_0_REGION:
7170 			if (nv_rg) {
7171 				if (nv_rg == FLASH_NVRAM_0_REGION) {
7172 					ADAPTER_STATE_LOCK(ha);
7173 					ha->flags &= ~FUNCTION_1;
7174 					ADAPTER_STATE_UNLOCK(ha);
7175 					ha->flash_nvram_addr = faddr;
7176 					QL_PRINT_9(CE_CONT, "(%d): nv_rg "
7177 					    "flash_nvram_addr=%xh\n",
7178 					    ha->instance, faddr);
7179 				}
7180 			} else if (!(ha->flags & FUNCTION_1)) {
7181 				ha->flash_nvram_addr = faddr;
7182 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
7183 				    "%xh\n", ha->instance, faddr);
7184 			}
7185 			break;
7186 		case FLASH_VPD_1_REGION:
7187 			if (vpd_rg) {
7188 				if (vpd_rg == FLASH_VPD_1_REGION) {
7189 					ha->flash_vpd_addr = faddr;
7190 					QL_PRINT_9(CE_CONT, "(%d): vpd_rg "
7191 					    "flash_vpd_addr=%xh\n",
7192 					    ha->instance, faddr);
7193 				}
7194 			} else if (ha->flags & FUNCTION_1 &&
7195 			    !(CFG_IST(ha, CFG_CTRL_8021))) {
7196 				ha->flash_vpd_addr = faddr;
7197 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
7198 				    "\n", ha->instance, faddr);
7199 			}
7200 			break;
7201 		case FLASH_NVRAM_1_REGION:
7202 			if (nv_rg) {
7203 				if (nv_rg == FLASH_NVRAM_1_REGION) {
7204 					ADAPTER_STATE_LOCK(ha);
7205 					ha->flags |= FUNCTION_1;
7206 					ADAPTER_STATE_UNLOCK(ha);
7207 					ha->flash_nvram_addr = faddr;
7208 					QL_PRINT_9(CE_CONT, "(%d): nv_rg "
7209 					    "flash_nvram_addr=%xh\n",
7210 					    ha->instance, faddr);
7211 				}
7212 			} else if (ha->flags & FUNCTION_1) {
7213 				ha->flash_nvram_addr = faddr;
7214 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
7215 				    "%xh\n", ha->instance, faddr);
7216 			}
7217 			break;
7218 		case FLASH_DESC_TABLE_REGION:
7219 			if (!(CFG_IST(ha, CFG_CTRL_8021))) {
7220 				ha->flash_desc_addr = faddr;
7221 				QL_PRINT_9(CE_CONT, "(%d): flash_desc_addr="
7222 				    "%xh\n", ha->instance, faddr);
7223 			}
7224 			break;
7225 		case FLASH_ERROR_LOG_0_REGION:
7226 			if (!(ha->flags & FUNCTION_1)) {
7227 				ha->flash_errlog_start = faddr;
7228 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
7229 				    "%xh\n", ha->instance, faddr);
7230 			}
7231 			break;
7232 		case FLASH_ERROR_LOG_1_REGION:
7233 			if (ha->flags & FUNCTION_1) {
7234 				ha->flash_errlog_start = faddr;
7235 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
7236 				    "%xh\n", ha->instance, faddr);
7237 			}
7238 			break;
7239 		default:
7240 			break;
7241 		}
7242 	}
7243 	kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7244 
7245 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7246 }
7247 
7248 /*
7249  * ql_flash_nvram_defaults
7250  *	Flash default addresses.
7251  *
7252  * Input:
7253  *	ha:		adapter state pointer.
7254  *
7255  * Returns:
7256  *	ql local function return status code.
7257  *
7258  * Context:
7259  *	Kernel context.
7260  */
7261 static void
7262 ql_flash_nvram_defaults(ql_adapter_state_t *ha)
7263 {
7264 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7265 
7266 	if (ha->flags & FUNCTION_1) {
7267 		if (CFG_IST(ha, CFG_CTRL_2300)) {
7268 			ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR;
7269 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7270 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
7271 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7272 			ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR;
7273 			ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR;
7274 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1;
7275 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7276 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7277 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7278 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7279 			ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR;
7280 			ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR;
7281 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1;
7282 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7283 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7284 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7285 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7286 			ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR;
7287 			ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR;
7288 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1;
7289 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7290 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7291 		} else if (CFG_IST(ha, CFG_CTRL_8021)) {
7292 			ha->flash_data_addr = 0;
7293 			ha->flash_nvram_addr = NVRAM_8021_FUNC1_ADDR;
7294 			ha->flash_vpd_addr = VPD_8021_FUNC1_ADDR;
7295 			ha->flash_errlog_start = 0;
7296 			ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7297 			ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7298 			ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7299 			ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7300 			ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7301 		}
7302 	} else {
7303 		if (CFG_IST(ha, CFG_CTRL_2200)) {
7304 			ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR;
7305 			ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR;
7306 		} else if (CFG_IST(ha, CFG_CTRL_2300) ||
7307 		    (CFG_IST(ha, CFG_CTRL_6322))) {
7308 			ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR;
7309 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7310 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
7311 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7312 			ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR;
7313 			ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR;
7314 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0;
7315 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7316 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7317 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7318 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7319 			ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR;
7320 			ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR;
7321 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0;
7322 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7323 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7324 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7325 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7326 			ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR;
7327 			ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR;
7328 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0;
7329 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7330 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7331 		} else if (CFG_IST(ha, CFG_CTRL_8021)) {
7332 			ha->flash_data_addr = 0;
7333 			ha->flash_nvram_addr = NVRAM_8021_FUNC0_ADDR;
7334 			ha->flash_vpd_addr = VPD_8021_FUNC0_ADDR;
7335 			ha->flash_errlog_start = 0;
7336 			ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7337 			ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7338 			ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7339 			ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7340 			ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7341 		} else {
7342 			EL(ha, "unassigned flash fn0 addr: %x\n",
7343 			    ha->device_id);
7344 		}
7345 	}
7346 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7347 }
7348 
7349 /*
7350  * ql_get_sfp
7351  *	Returns sfp data to sdmapi caller
7352  *
7353  * Input:
7354  *	ha:	adapter state pointer.
7355  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7356  *	mode:	flags.
7357  *
7358  * Returns:
7359  *	None, request status indicated in cmd->Status.
7360  *
7361  * Context:
7362  *	Kernel context.
7363  */
7364 static void
7365 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7366 {
7367 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7368 
7369 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
7370 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7371 		EL(ha, "failed, invalid request for HBA\n");
7372 		return;
7373 	}
7374 
7375 	if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
7376 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7377 		cmd->DetailStatus = QL_24XX_SFP_SIZE;
7378 		EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
7379 		    cmd->ResponseLen);
7380 		return;
7381 	}
7382 
7383 	/* Dump SFP data in user buffer */
7384 	if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7385 	    mode)) != 0) {
7386 		cmd->Status = EXT_STATUS_COPY_ERR;
7387 		EL(ha, "failed, copy error\n");
7388 	} else {
7389 		cmd->Status = EXT_STATUS_OK;
7390 	}
7391 
7392 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7393 }
7394 
7395 /*
7396  * ql_dump_sfp
7397  *	Dumps SFP.
7398  *
7399  * Input:
7400  *	ha:	adapter state pointer.
7401  *	bp:	buffer address.
7402  *	mode:	flags
7403  *
7404  * Returns:
7405  *
7406  * Context:
7407  *	Kernel context.
7408  */
7409 static int
7410 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
7411 {
7412 	dma_mem_t	mem;
7413 	uint32_t	cnt;
7414 	int		rval2, rval = 0;
7415 	uint32_t	dxfer;
7416 
7417 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7418 
7419 	/* Get memory for SFP. */
7420 
7421 	if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
7422 	    QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
7423 		EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
7424 		return (ENOMEM);
7425 	}
7426 
7427 	for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
7428 		rval2 = ql_read_sfp(ha, &mem,
7429 		    (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
7430 		    (uint16_t)(cnt & 0xff));
7431 		if (rval2 != QL_SUCCESS) {
7432 			EL(ha, "failed, read_sfp=%xh\n", rval2);
7433 			rval = EFAULT;
7434 			break;
7435 		}
7436 
7437 		/* copy the data back */
7438 		if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
7439 		    mode)) != mem.size) {
7440 			/* ddi copy error */
7441 			EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
7442 			rval = EFAULT;
7443 			break;
7444 		}
7445 
7446 		/* adjust the buffer pointer */
7447 		bp = (caddr_t)bp + mem.size;
7448 	}
7449 
7450 	ql_free_phys(ha, &mem);
7451 
7452 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7453 
7454 	return (rval);
7455 }
7456 
7457 /*
7458  * ql_port_param
7459  *	Retrieves or sets the firmware port speed settings
7460  *
7461  * Input:
7462  *	ha:	adapter state pointer.
7463  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7464  *	mode:	flags.
7465  *
7466  * Returns:
7467  *	None, request status indicated in cmd->Status.
7468  *
7469  * Context:
7470  *	Kernel context.
7471  *
7472  */
7473 static void
7474 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7475 {
7476 	uint8_t			*name;
7477 	ql_tgt_t		*tq;
7478 	EXT_PORT_PARAM		port_param = {0};
7479 	uint32_t		rval = QL_SUCCESS;
7480 	uint32_t		idma_rate;
7481 
7482 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7483 
7484 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7485 		EL(ha, "invalid request for this HBA\n");
7486 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7487 		cmd->ResponseLen = 0;
7488 		return;
7489 	}
7490 
7491 	if (LOOP_NOT_READY(ha)) {
7492 		EL(ha, "failed, loop not ready\n");
7493 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
7494 		cmd->ResponseLen = 0;
7495 		return;
7496 	}
7497 
7498 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7499 	    (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) {
7500 		EL(ha, "failed, ddi_copyin\n");
7501 		cmd->Status = EXT_STATUS_COPY_ERR;
7502 		cmd->ResponseLen = 0;
7503 		return;
7504 	}
7505 
7506 	if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
7507 		EL(ha, "Unsupported dest lookup type: %xh\n",
7508 		    port_param.FCScsiAddr.DestType);
7509 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7510 		cmd->ResponseLen = 0;
7511 		return;
7512 	}
7513 
7514 	name = port_param.FCScsiAddr.DestAddr.WWPN;
7515 
7516 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
7517 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
7518 	    name[5], name[6], name[7]);
7519 
7520 	tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
7521 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
7522 		EL(ha, "failed, fc_port not found\n");
7523 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7524 		cmd->ResponseLen = 0;
7525 		return;
7526 	}
7527 
7528 	cmd->Status = EXT_STATUS_OK;
7529 	cmd->DetailStatus = EXT_STATUS_OK;
7530 
7531 	switch (port_param.Mode) {
7532 	case EXT_IIDMA_MODE_GET:
7533 		/*
7534 		 * Report the firmware's port rate for the wwpn
7535 		 */
7536 		rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7537 		    port_param.Mode);
7538 
7539 		if (rval != QL_SUCCESS) {
7540 			EL(ha, "iidma get failed: %xh\n", rval);
7541 			cmd->Status = EXT_STATUS_MAILBOX;
7542 			cmd->DetailStatus = rval;
7543 			cmd->ResponseLen = 0;
7544 		} else {
7545 			switch (idma_rate) {
7546 			case IIDMA_RATE_1GB:
7547 				port_param.Speed =
7548 				    EXT_DEF_PORTSPEED_1GBIT;
7549 				break;
7550 			case IIDMA_RATE_2GB:
7551 				port_param.Speed =
7552 				    EXT_DEF_PORTSPEED_2GBIT;
7553 				break;
7554 			case IIDMA_RATE_4GB:
7555 				port_param.Speed =
7556 				    EXT_DEF_PORTSPEED_4GBIT;
7557 				break;
7558 			case IIDMA_RATE_8GB:
7559 				port_param.Speed =
7560 				    EXT_DEF_PORTSPEED_8GBIT;
7561 				break;
7562 			case IIDMA_RATE_10GB:
7563 				port_param.Speed =
7564 				    EXT_DEF_PORTSPEED_10GBIT;
7565 				break;
7566 			default:
7567 				port_param.Speed =
7568 				    EXT_DEF_PORTSPEED_UNKNOWN;
7569 				EL(ha, "failed, Port speed rate=%xh\n",
7570 				    idma_rate);
7571 				break;
7572 			}
7573 
7574 			/* Copy back the data */
7575 			rval = ddi_copyout((void *)&port_param,
7576 			    (void *)(uintptr_t)cmd->ResponseAdr,
7577 			    sizeof (EXT_PORT_PARAM), mode);
7578 
7579 			if (rval != 0) {
7580 				cmd->Status = EXT_STATUS_COPY_ERR;
7581 				cmd->ResponseLen = 0;
7582 				EL(ha, "failed, ddi_copyout\n");
7583 			} else {
7584 				cmd->ResponseLen = (uint32_t)
7585 				    sizeof (EXT_PORT_PARAM);
7586 			}
7587 		}
7588 		break;
7589 
7590 	case EXT_IIDMA_MODE_SET:
7591 		/*
7592 		 * Set the firmware's port rate for the wwpn
7593 		 */
7594 		switch (port_param.Speed) {
7595 		case EXT_DEF_PORTSPEED_1GBIT:
7596 			idma_rate = IIDMA_RATE_1GB;
7597 			break;
7598 		case EXT_DEF_PORTSPEED_2GBIT:
7599 			idma_rate = IIDMA_RATE_2GB;
7600 			break;
7601 		case EXT_DEF_PORTSPEED_4GBIT:
7602 			idma_rate = IIDMA_RATE_4GB;
7603 			break;
7604 		case EXT_DEF_PORTSPEED_8GBIT:
7605 			idma_rate = IIDMA_RATE_8GB;
7606 			break;
7607 		case EXT_DEF_PORTSPEED_10GBIT:
7608 			port_param.Speed = IIDMA_RATE_10GB;
7609 			break;
7610 		default:
7611 			EL(ha, "invalid set iidma rate: %x\n",
7612 			    port_param.Speed);
7613 			cmd->Status = EXT_STATUS_INVALID_PARAM;
7614 			cmd->ResponseLen = 0;
7615 			rval = QL_PARAMETER_ERROR;
7616 			break;
7617 		}
7618 
7619 		if (rval == QL_SUCCESS) {
7620 			rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7621 			    port_param.Mode);
7622 			if (rval != QL_SUCCESS) {
7623 				EL(ha, "iidma set failed: %xh\n", rval);
7624 				cmd->Status = EXT_STATUS_MAILBOX;
7625 				cmd->DetailStatus = rval;
7626 				cmd->ResponseLen = 0;
7627 			}
7628 		}
7629 		break;
7630 	default:
7631 		EL(ha, "invalid mode specified: %x\n", port_param.Mode);
7632 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7633 		cmd->ResponseLen = 0;
7634 		cmd->DetailStatus = 0;
7635 		break;
7636 	}
7637 
7638 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7639 }
7640 
7641 /*
7642  * ql_get_fwexttrace
7643  *	Dumps f/w extended trace buffer
7644  *
7645  * Input:
7646  *	ha:	adapter state pointer.
7647  *	bp:	buffer address.
7648  *	mode:	flags
7649  *
7650  * Returns:
7651  *
7652  * Context:
7653  *	Kernel context.
7654  */
7655 /* ARGSUSED */
7656 static void
7657 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7658 {
7659 	int	rval;
7660 	caddr_t	payload;
7661 
7662 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7663 
7664 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
7665 		EL(ha, "invalid request for this HBA\n");
7666 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7667 		cmd->ResponseLen = 0;
7668 		return;
7669 	}
7670 
7671 	if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
7672 	    (ha->fwexttracebuf.bp == NULL)) {
7673 		EL(ha, "f/w extended trace is not enabled\n");
7674 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7675 		cmd->ResponseLen = 0;
7676 		return;
7677 	}
7678 
7679 	if (cmd->ResponseLen < FWEXTSIZE) {
7680 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7681 		cmd->DetailStatus = FWEXTSIZE;
7682 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n",
7683 		    cmd->ResponseLen, FWEXTSIZE);
7684 		cmd->ResponseLen = 0;
7685 		return;
7686 	}
7687 
7688 	/* Time Stamp */
7689 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP);
7690 	if (rval != QL_SUCCESS) {
7691 		EL(ha, "f/w extended trace insert"
7692 		    "time stamp failed: %xh\n", rval);
7693 		cmd->Status = EXT_STATUS_ERR;
7694 		cmd->ResponseLen = 0;
7695 		return;
7696 	}
7697 
7698 	/* Disable Tracing */
7699 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE);
7700 	if (rval != QL_SUCCESS) {
7701 		EL(ha, "f/w extended trace disable failed: %xh\n", rval);
7702 		cmd->Status = EXT_STATUS_ERR;
7703 		cmd->ResponseLen = 0;
7704 		return;
7705 	}
7706 
7707 	/* Allocate payload buffer */
7708 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7709 	if (payload == NULL) {
7710 		EL(ha, "failed, kmem_zalloc\n");
7711 		cmd->Status = EXT_STATUS_NO_MEMORY;
7712 		cmd->ResponseLen = 0;
7713 		return;
7714 	}
7715 
7716 	/* Sync DMA buffer. */
7717 	(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
7718 	    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
7719 
7720 	/* Copy trace buffer data. */
7721 	ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload,
7722 	    (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE,
7723 	    DDI_DEV_AUTOINCR);
7724 
7725 	/* Send payload to application. */
7726 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7727 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7728 		EL(ha, "failed, send_buffer_data\n");
7729 		cmd->Status = EXT_STATUS_COPY_ERR;
7730 		cmd->ResponseLen = 0;
7731 	} else {
7732 		cmd->Status = EXT_STATUS_OK;
7733 	}
7734 
7735 	kmem_free(payload, FWEXTSIZE);
7736 
7737 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7738 }
7739 
7740 /*
7741  * ql_get_fwfcetrace
7742  *	Dumps f/w fibre channel event trace buffer
7743  *
7744  * Input:
7745  *	ha:	adapter state pointer.
7746  *	bp:	buffer address.
7747  *	mode:	flags
7748  *
7749  * Returns:
7750  *
7751  * Context:
7752  *	Kernel context.
7753  */
7754 /* ARGSUSED */
7755 static void
7756 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7757 {
7758 	int	rval;
7759 	caddr_t	payload;
7760 
7761 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7762 
7763 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
7764 		EL(ha, "invalid request for this HBA\n");
7765 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7766 		cmd->ResponseLen = 0;
7767 		return;
7768 	}
7769 
7770 	if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
7771 	    (ha->fwfcetracebuf.bp == NULL)) {
7772 		EL(ha, "f/w FCE trace is not enabled\n");
7773 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7774 		cmd->ResponseLen = 0;
7775 		return;
7776 	}
7777 
7778 	if (cmd->ResponseLen < FWFCESIZE) {
7779 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7780 		cmd->DetailStatus = FWFCESIZE;
7781 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n",
7782 		    cmd->ResponseLen, FWFCESIZE);
7783 		cmd->ResponseLen = 0;
7784 		return;
7785 	}
7786 
7787 	/* Disable Tracing */
7788 	rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE);
7789 	if (rval != QL_SUCCESS) {
7790 		EL(ha, "f/w FCE trace disable failed: %xh\n", rval);
7791 		cmd->Status = EXT_STATUS_ERR;
7792 		cmd->ResponseLen = 0;
7793 		return;
7794 	}
7795 
7796 	/* Allocate payload buffer */
7797 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7798 	if (payload == NULL) {
7799 		EL(ha, "failed, kmem_zalloc\n");
7800 		cmd->Status = EXT_STATUS_NO_MEMORY;
7801 		cmd->ResponseLen = 0;
7802 		return;
7803 	}
7804 
7805 	/* Sync DMA buffer. */
7806 	(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
7807 	    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
7808 
7809 	/* Copy trace buffer data. */
7810 	ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)payload,
7811 	    (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE,
7812 	    DDI_DEV_AUTOINCR);
7813 
7814 	/* Send payload to application. */
7815 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7816 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7817 		EL(ha, "failed, send_buffer_data\n");
7818 		cmd->Status = EXT_STATUS_COPY_ERR;
7819 		cmd->ResponseLen = 0;
7820 	} else {
7821 		cmd->Status = EXT_STATUS_OK;
7822 	}
7823 
7824 	kmem_free(payload, FWFCESIZE);
7825 
7826 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7827 }
7828 
7829 /*
7830  * ql_get_pci_data
7831  *	Retrieves pci config space data
7832  *
7833  * Input:
7834  *	ha:	adapter state pointer.
7835  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7836  *	mode:	flags.
7837  *
7838  * Returns:
7839  *	None, request status indicated in cmd->Status.
7840  *
7841  * Context:
7842  *	Kernel context.
7843  *
7844  */
7845 static void
7846 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7847 {
7848 	uint8_t		cap_ptr;
7849 	uint8_t		cap_id;
7850 	uint32_t	buf_size = 256;
7851 
7852 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7853 
7854 	/*
7855 	 * First check the "Capabilities List" bit of the status register.
7856 	 */
7857 	if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
7858 		/*
7859 		 * Now get the capability pointer
7860 		 */
7861 		cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
7862 		while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
7863 			/*
7864 			 * Check for the pcie capability.
7865 			 */
7866 			cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
7867 			if (cap_id == PCI_CAP_ID_PCI_E) {
7868 				buf_size = 4096;
7869 				break;
7870 			}
7871 			cap_ptr = (uint8_t)ql_pci_config_get8(ha,
7872 			    (cap_ptr + PCI_CAP_NEXT_PTR));
7873 		}
7874 	}
7875 
7876 	if (cmd->ResponseLen < buf_size) {
7877 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7878 		cmd->DetailStatus = buf_size;
7879 		EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
7880 		    cmd->ResponseLen);
7881 		return;
7882 	}
7883 
7884 	/* Dump PCI config data. */
7885 	if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7886 	    buf_size, mode)) != 0) {
7887 		cmd->Status = EXT_STATUS_COPY_ERR;
7888 		cmd->DetailStatus = 0;
7889 		EL(ha, "failed, copy err pci_dump\n");
7890 	} else {
7891 		cmd->Status = EXT_STATUS_OK;
7892 		cmd->DetailStatus = buf_size;
7893 	}
7894 
7895 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7896 }
7897 
7898 /*
7899  * ql_pci_dump
7900  *	Dumps PCI config data to application buffer.
7901  *
7902  * Input:
7903  *	ha = adapter state pointer.
7904  *	bp = user buffer address.
7905  *
7906  * Returns:
7907  *
7908  * Context:
7909  *	Kernel context.
7910  */
7911 int
7912 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
7913 {
7914 	uint32_t	pci_os;
7915 	uint32_t	*ptr32, *org_ptr32;
7916 
7917 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7918 
7919 	ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
7920 	if (ptr32 == NULL) {
7921 		EL(ha, "failed kmem_zalloc\n");
7922 		return (ENOMEM);
7923 	}
7924 
7925 	/* store the initial value of ptr32 */
7926 	org_ptr32 = ptr32;
7927 	for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
7928 		*ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
7929 		LITTLE_ENDIAN_32(ptr32);
7930 		ptr32++;
7931 	}
7932 
7933 	if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
7934 	    0) {
7935 		EL(ha, "failed ddi_copyout\n");
7936 		kmem_free(org_ptr32, pci_size);
7937 		return (EFAULT);
7938 	}
7939 
7940 	QL_DUMP_9(org_ptr32, 8, pci_size);
7941 
7942 	kmem_free(org_ptr32, pci_size);
7943 
7944 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7945 
7946 	return (0);
7947 }
7948 
7949 /*
7950  * ql_menlo_reset
7951  *	Reset Menlo
7952  *
7953  * Input:
7954  *	ha:	adapter state pointer.
7955  *	bp:	buffer address.
7956  *	mode:	flags
7957  *
7958  * Returns:
7959  *
7960  * Context:
7961  *	Kernel context.
7962  */
7963 static void
7964 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7965 {
7966 	EXT_MENLO_RESET	rst;
7967 	ql_mbx_data_t	mr;
7968 	int		rval;
7969 
7970 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7971 
7972 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7973 		EL(ha, "failed, invalid request for HBA\n");
7974 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7975 		cmd->ResponseLen = 0;
7976 		return;
7977 	}
7978 
7979 	/*
7980 	 * TODO: only vp_index 0 can do this (?)
7981 	 */
7982 
7983 	/*  Verify the size of request structure. */
7984 	if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) {
7985 		/* Return error */
7986 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7987 		    sizeof (EXT_MENLO_RESET));
7988 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7989 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7990 		cmd->ResponseLen = 0;
7991 		return;
7992 	}
7993 
7994 	/* Get reset request. */
7995 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7996 	    (void *)&rst, sizeof (EXT_MENLO_RESET), mode) != 0) {
7997 		EL(ha, "failed, ddi_copyin\n");
7998 		cmd->Status = EXT_STATUS_COPY_ERR;
7999 		cmd->ResponseLen = 0;
8000 		return;
8001 	}
8002 
8003 	/* Wait for I/O to stop and daemon to stall. */
8004 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8005 		EL(ha, "ql_stall_driver failed\n");
8006 		ql_restart_hba(ha);
8007 		cmd->Status = EXT_STATUS_BUSY;
8008 		cmd->ResponseLen = 0;
8009 		return;
8010 	}
8011 
8012 	rval = ql_reset_menlo(ha, &mr, rst.Flags);
8013 	if (rval != QL_SUCCESS) {
8014 		EL(ha, "failed, status=%xh\n", rval);
8015 		cmd->Status = EXT_STATUS_MAILBOX;
8016 		cmd->DetailStatus = rval;
8017 		cmd->ResponseLen = 0;
8018 	} else if (mr.mb[1] != 0) {
8019 		EL(ha, "failed, substatus=%d\n", mr.mb[1]);
8020 		cmd->Status = EXT_STATUS_ERR;
8021 		cmd->DetailStatus = mr.mb[1];
8022 		cmd->ResponseLen = 0;
8023 	}
8024 
8025 	ql_restart_hba(ha);
8026 
8027 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8028 }
8029 
8030 /*
8031  * ql_menlo_get_fw_version
8032  *	Get Menlo firmware version.
8033  *
8034  * Input:
8035  *	ha:	adapter state pointer.
8036  *	bp:	buffer address.
8037  *	mode:	flags
8038  *
8039  * Returns:
8040  *
8041  * Context:
8042  *	Kernel context.
8043  */
8044 static void
8045 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8046 {
8047 	int				rval;
8048 	ql_mbx_iocb_t			*pkt;
8049 	EXT_MENLO_GET_FW_VERSION	ver = {0};
8050 
8051 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8052 
8053 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8054 		EL(ha, "failed, invalid request for HBA\n");
8055 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8056 		cmd->ResponseLen = 0;
8057 		return;
8058 	}
8059 
8060 	if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) {
8061 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8062 		cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION);
8063 		EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
8064 		    sizeof (EXT_MENLO_GET_FW_VERSION));
8065 		cmd->ResponseLen = 0;
8066 		return;
8067 	}
8068 
8069 	/* Allocate packet. */
8070 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8071 	if (pkt == NULL) {
8072 		EL(ha, "failed, kmem_zalloc\n");
8073 		cmd->Status = EXT_STATUS_NO_MEMORY;
8074 		cmd->ResponseLen = 0;
8075 		return;
8076 	}
8077 
8078 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
8079 	pkt->mvfy.entry_count = 1;
8080 	pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW);
8081 
8082 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8083 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
8084 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
8085 	ver.FwVersion = LE_32(pkt->mvfy.fw_version);
8086 
8087 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
8088 	    pkt->mvfy.options_status != CS_COMPLETE) {
8089 		/* Command error */
8090 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8091 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8092 		    pkt->mvfy.failure_code);
8093 		cmd->Status = EXT_STATUS_ERR;
8094 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8095 		    QL_FUNCTION_FAILED;
8096 		cmd->ResponseLen = 0;
8097 	} else if (ddi_copyout((void *)&ver,
8098 	    (void *)(uintptr_t)cmd->ResponseAdr,
8099 	    sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) {
8100 		EL(ha, "failed, ddi_copyout\n");
8101 		cmd->Status = EXT_STATUS_COPY_ERR;
8102 		cmd->ResponseLen = 0;
8103 	} else {
8104 		cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION);
8105 	}
8106 
8107 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8108 
8109 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8110 }
8111 
8112 /*
8113  * ql_menlo_update_fw
8114  *	Get Menlo update firmware.
8115  *
8116  * Input:
8117  *	ha:	adapter state pointer.
8118  *	bp:	buffer address.
8119  *	mode:	flags
8120  *
8121  * Returns:
8122  *
8123  * Context:
8124  *	Kernel context.
8125  */
8126 static void
8127 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8128 {
8129 	ql_mbx_iocb_t		*pkt;
8130 	dma_mem_t		*dma_mem;
8131 	EXT_MENLO_UPDATE_FW	fw;
8132 	uint32_t		*ptr32;
8133 	int			rval;
8134 
8135 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8136 
8137 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8138 		EL(ha, "failed, invalid request for HBA\n");
8139 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8140 		cmd->ResponseLen = 0;
8141 		return;
8142 	}
8143 
8144 	/*
8145 	 * TODO: only vp_index 0 can do this (?)
8146 	 */
8147 
8148 	/*  Verify the size of request structure. */
8149 	if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) {
8150 		/* Return error */
8151 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8152 		    sizeof (EXT_MENLO_UPDATE_FW));
8153 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8154 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8155 		cmd->ResponseLen = 0;
8156 		return;
8157 	}
8158 
8159 	/* Get update fw request. */
8160 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw,
8161 	    sizeof (EXT_MENLO_UPDATE_FW), mode) != 0) {
8162 		EL(ha, "failed, ddi_copyin\n");
8163 		cmd->Status = EXT_STATUS_COPY_ERR;
8164 		cmd->ResponseLen = 0;
8165 		return;
8166 	}
8167 
8168 	/* Wait for I/O to stop and daemon to stall. */
8169 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8170 		EL(ha, "ql_stall_driver failed\n");
8171 		ql_restart_hba(ha);
8172 		cmd->Status = EXT_STATUS_BUSY;
8173 		cmd->ResponseLen = 0;
8174 		return;
8175 	}
8176 
8177 	/* Allocate packet. */
8178 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
8179 	if (dma_mem == NULL) {
8180 		EL(ha, "failed, kmem_zalloc\n");
8181 		cmd->Status = EXT_STATUS_NO_MEMORY;
8182 		cmd->ResponseLen = 0;
8183 		return;
8184 	}
8185 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8186 	if (pkt == NULL) {
8187 		EL(ha, "failed, kmem_zalloc\n");
8188 		kmem_free(dma_mem, sizeof (dma_mem_t));
8189 		ql_restart_hba(ha);
8190 		cmd->Status = EXT_STATUS_NO_MEMORY;
8191 		cmd->ResponseLen = 0;
8192 		return;
8193 	}
8194 
8195 	/* Get DMA memory for the IOCB */
8196 	if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
8197 	    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8198 		cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8199 		    "alloc failed", QL_NAME, ha->instance);
8200 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8201 		kmem_free(dma_mem, sizeof (dma_mem_t));
8202 		ql_restart_hba(ha);
8203 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8204 		cmd->ResponseLen = 0;
8205 		return;
8206 	}
8207 
8208 	/* Get firmware data. */
8209 	if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp,
8210 	    fw.TotalByteCount, mode) != fw.TotalByteCount) {
8211 		EL(ha, "failed, get_buffer_data\n");
8212 		ql_free_dma_resource(ha, dma_mem);
8213 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8214 		kmem_free(dma_mem, sizeof (dma_mem_t));
8215 		ql_restart_hba(ha);
8216 		cmd->Status = EXT_STATUS_COPY_ERR;
8217 		cmd->ResponseLen = 0;
8218 		return;
8219 	}
8220 
8221 	/* Sync DMA buffer. */
8222 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8223 	    DDI_DMA_SYNC_FORDEV);
8224 
8225 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
8226 	pkt->mvfy.entry_count = 1;
8227 	pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
8228 	ptr32 = dma_mem->bp;
8229 	pkt->mvfy.fw_version = LE_32(ptr32[2]);
8230 	pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
8231 	pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
8232 	pkt->mvfy.dseg_count = LE_16(1);
8233 	pkt->mvfy.dseg_0_address[0] = (uint32_t)
8234 	    LE_32(LSD(dma_mem->cookie.dmac_laddress));
8235 	pkt->mvfy.dseg_0_address[1] = (uint32_t)
8236 	    LE_32(MSD(dma_mem->cookie.dmac_laddress));
8237 	pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount);
8238 
8239 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8240 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
8241 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
8242 
8243 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
8244 	    pkt->mvfy.options_status != CS_COMPLETE) {
8245 		/* Command error */
8246 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8247 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8248 		    pkt->mvfy.failure_code);
8249 		cmd->Status = EXT_STATUS_ERR;
8250 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8251 		    QL_FUNCTION_FAILED;
8252 		cmd->ResponseLen = 0;
8253 	}
8254 
8255 	ql_free_dma_resource(ha, dma_mem);
8256 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8257 	kmem_free(dma_mem, sizeof (dma_mem_t));
8258 	ql_restart_hba(ha);
8259 
8260 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8261 }
8262 
8263 /*
8264  * ql_menlo_manage_info
8265  *	Get Menlo manage info.
8266  *
8267  * Input:
8268  *	ha:	adapter state pointer.
8269  *	bp:	buffer address.
8270  *	mode:	flags
8271  *
8272  * Returns:
8273  *
8274  * Context:
8275  *	Kernel context.
8276  */
8277 static void
8278 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8279 {
8280 	ql_mbx_iocb_t		*pkt;
8281 	dma_mem_t		*dma_mem = NULL;
8282 	EXT_MENLO_MANAGE_INFO	info;
8283 	int			rval;
8284 
8285 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8286 
8287 
8288 	/* The call is only supported for Schultz right now */
8289 	if (CFG_IST(ha, CFG_CTRL_8081)) {
8290 		ql_get_xgmac_statistics(ha, cmd, mode);
8291 		QL_PRINT_9(CE_CONT, "(%d): CFG_CTRL_81XX done\n",
8292 		    ha->instance);
8293 		return;
8294 	}
8295 
8296 	if (!CFG_IST(ha, CFG_CTRL_8081) || !CFG_IST(ha, CFG_CTRL_MENLO)) {
8297 		EL(ha, "failed, invalid request for HBA\n");
8298 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8299 		cmd->ResponseLen = 0;
8300 		return;
8301 	}
8302 
8303 	/*  Verify the size of request structure. */
8304 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
8305 		/* Return error */
8306 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8307 		    sizeof (EXT_MENLO_MANAGE_INFO));
8308 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8309 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8310 		cmd->ResponseLen = 0;
8311 		return;
8312 	}
8313 
8314 	/* Get manage info request. */
8315 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
8316 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
8317 		EL(ha, "failed, ddi_copyin\n");
8318 		cmd->Status = EXT_STATUS_COPY_ERR;
8319 		cmd->ResponseLen = 0;
8320 		return;
8321 	}
8322 
8323 	/* Allocate packet. */
8324 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8325 	if (pkt == NULL) {
8326 		EL(ha, "failed, kmem_zalloc\n");
8327 		ql_restart_driver(ha);
8328 		cmd->Status = EXT_STATUS_NO_MEMORY;
8329 		cmd->ResponseLen = 0;
8330 		return;
8331 	}
8332 
8333 	pkt->mdata.entry_type = MENLO_DATA_TYPE;
8334 	pkt->mdata.entry_count = 1;
8335 	pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
8336 
8337 	/* Get DMA memory for the IOCB */
8338 	if (info.Operation == MENLO_OP_READ_MEM ||
8339 	    info.Operation == MENLO_OP_WRITE_MEM) {
8340 		pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
8341 		pkt->mdata.parameter_1 =
8342 		    LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
8343 		dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
8344 		    KM_SLEEP);
8345 		if (dma_mem == NULL) {
8346 			EL(ha, "failed, kmem_zalloc\n");
8347 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8348 			cmd->Status = EXT_STATUS_NO_MEMORY;
8349 			cmd->ResponseLen = 0;
8350 			return;
8351 		}
8352 		if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
8353 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8354 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8355 			    "alloc failed", QL_NAME, ha->instance);
8356 			kmem_free(dma_mem, sizeof (dma_mem_t));
8357 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8358 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8359 			cmd->ResponseLen = 0;
8360 			return;
8361 		}
8362 		if (info.Operation == MENLO_OP_WRITE_MEM) {
8363 			/* Get data. */
8364 			if (ql_get_buffer_data(
8365 			    (caddr_t)(uintptr_t)info.pDataBytes,
8366 			    dma_mem->bp, info.TotalByteCount, mode) !=
8367 			    info.TotalByteCount) {
8368 				EL(ha, "failed, get_buffer_data\n");
8369 				ql_free_dma_resource(ha, dma_mem);
8370 				kmem_free(dma_mem, sizeof (dma_mem_t));
8371 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8372 				cmd->Status = EXT_STATUS_COPY_ERR;
8373 				cmd->ResponseLen = 0;
8374 				return;
8375 			}
8376 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
8377 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
8378 		}
8379 		pkt->mdata.dseg_count = LE_16(1);
8380 		pkt->mdata.dseg_0_address[0] = (uint32_t)
8381 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
8382 		pkt->mdata.dseg_0_address[1] = (uint32_t)
8383 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
8384 		pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount);
8385 	} else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
8386 		pkt->mdata.parameter_1 =
8387 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
8388 		pkt->mdata.parameter_2 =
8389 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
8390 		pkt->mdata.parameter_3 =
8391 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
8392 	} else if (info.Operation & MENLO_OP_GET_INFO) {
8393 		pkt->mdata.parameter_1 =
8394 		    LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
8395 		pkt->mdata.parameter_2 =
8396 		    LE_32(info.Parameters.ap.MenloInfo.InfoContext);
8397 	}
8398 
8399 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8400 	LITTLE_ENDIAN_16(&pkt->mdata.options_status);
8401 	LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
8402 
8403 	if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
8404 	    pkt->mdata.options_status != CS_COMPLETE) {
8405 		/* Command error */
8406 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8407 		    pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status,
8408 		    pkt->mdata.failure_code);
8409 		cmd->Status = EXT_STATUS_ERR;
8410 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8411 		    QL_FUNCTION_FAILED;
8412 		cmd->ResponseLen = 0;
8413 	} else if (info.Operation == MENLO_OP_READ_MEM) {
8414 		(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8415 		    DDI_DMA_SYNC_FORKERNEL);
8416 		if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes,
8417 		    dma_mem->bp, info.TotalByteCount, mode) !=
8418 		    info.TotalByteCount) {
8419 			cmd->Status = EXT_STATUS_COPY_ERR;
8420 			cmd->ResponseLen = 0;
8421 		}
8422 	}
8423 
8424 	ql_free_dma_resource(ha, dma_mem);
8425 	kmem_free(dma_mem, sizeof (dma_mem_t));
8426 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8427 
8428 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8429 }
8430 
8431 /*
8432  * ql_suspend_hba
8433  *	Suspends all adapter ports.
8434  *
8435  * Input:
8436  *	ha:		adapter state pointer.
8437  *	options:	BIT_0 --> leave driver stalled on exit if
8438  *				  failed.
8439  *
8440  * Returns:
8441  *	ql local function return status code.
8442  *
8443  * Context:
8444  *	Kernel context.
8445  */
8446 static int
8447 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
8448 {
8449 	ql_adapter_state_t	*ha2;
8450 	ql_link_t		*link;
8451 	int			rval = QL_SUCCESS;
8452 
8453 	/* Quiesce I/O on all adapter ports */
8454 	for (link = ql_hba.first; link != NULL; link = link->next) {
8455 		ha2 = link->base_address;
8456 
8457 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8458 			continue;
8459 		}
8460 
8461 		if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) {
8462 			EL(ha, "ql_stall_driver status=%xh\n", rval);
8463 			break;
8464 		}
8465 	}
8466 
8467 	return (rval);
8468 }
8469 
8470 /*
8471  * ql_restart_hba
8472  *	Restarts adapter.
8473  *
8474  * Input:
8475  *	ha:	adapter state pointer.
8476  *
8477  * Context:
8478  *	Kernel context.
8479  */
8480 static void
8481 ql_restart_hba(ql_adapter_state_t *ha)
8482 {
8483 	ql_adapter_state_t	*ha2;
8484 	ql_link_t		*link;
8485 
8486 	/* Resume I/O on all adapter ports */
8487 	for (link = ql_hba.first; link != NULL; link = link->next) {
8488 		ha2 = link->base_address;
8489 
8490 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8491 			continue;
8492 		}
8493 
8494 		ql_restart_driver(ha2);
8495 	}
8496 }
8497 
8498 /*
8499  * ql_get_vp_cnt_id
8500  *	Retrieves pci config space data
8501  *
8502  * Input:
8503  *	ha:	adapter state pointer.
8504  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8505  *	mode:	flags.
8506  *
8507  * Returns:
8508  *	None, request status indicated in cmd->Status.
8509  *
8510  * Context:
8511  *	Kernel context.
8512  *
8513  */
8514 static void
8515 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8516 {
8517 	ql_adapter_state_t	*vha;
8518 	PEXT_VPORT_ID_CNT	ptmp_vp;
8519 	int			id = 0;
8520 	int			rval;
8521 	char			name[MAXPATHLEN];
8522 
8523 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8524 
8525 	/*
8526 	 * To be backward compatible with older API
8527 	 * check for the size of old EXT_VPORT_ID_CNT
8528 	 */
8529 	if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) &&
8530 	    (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) {
8531 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8532 		cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
8533 		EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
8534 		    cmd->ResponseLen);
8535 		cmd->ResponseLen = 0;
8536 		return;
8537 	}
8538 
8539 	ptmp_vp = (EXT_VPORT_ID_CNT *)
8540 	    kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP);
8541 	if (ptmp_vp == NULL) {
8542 		EL(ha, "failed, kmem_zalloc\n");
8543 		cmd->ResponseLen = 0;
8544 		return;
8545 	}
8546 	vha = ha->vp_next;
8547 	while (vha != NULL) {
8548 		ptmp_vp->VpCnt++;
8549 		ptmp_vp->VpId[id] = vha->vp_index;
8550 		(void) ddi_pathname(vha->dip, name);
8551 		(void) strcpy((char *)ptmp_vp->vp_path[id], name);
8552 		ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance;
8553 		id++;
8554 		vha = vha->vp_next;
8555 	}
8556 	rval = ddi_copyout((void *)ptmp_vp,
8557 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8558 	    cmd->ResponseLen, mode);
8559 	if (rval != 0) {
8560 		cmd->Status = EXT_STATUS_COPY_ERR;
8561 		cmd->ResponseLen = 0;
8562 		EL(ha, "failed, ddi_copyout\n");
8563 	} else {
8564 		cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
8565 		QL_PRINT_9(CE_CONT, "(%d): done, vport_cnt=%d\n",
8566 		    ha->instance, ptmp_vp->VpCnt);
8567 	}
8568 
8569 }
8570 
8571 /*
8572  * ql_vp_ioctl
8573  *	Performs all EXT_CC_VPORT_CMD functions.
8574  *
8575  * Input:
8576  *	ha:	adapter state pointer.
8577  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8578  *	mode:	flags.
8579  *
8580  * Returns:
8581  *	None, request status indicated in cmd->Status.
8582  *
8583  * Context:
8584  *	Kernel context.
8585  */
8586 static void
8587 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8588 {
8589 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
8590 	    cmd->SubCode);
8591 
8592 	/* case off on command subcode */
8593 	switch (cmd->SubCode) {
8594 	case EXT_VF_SC_VPORT_GETINFO:
8595 		ql_qry_vport(ha, cmd, mode);
8596 		break;
8597 	default:
8598 		/* function not supported. */
8599 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
8600 		EL(ha, "failed, Unsupported Subcode=%xh\n",
8601 		    cmd->SubCode);
8602 		break;
8603 	}
8604 
8605 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8606 }
8607 
8608 /*
8609  * ql_qry_vport
8610  *	Performs EXT_VF_SC_VPORT_GETINFO subfunction.
8611  *
8612  * Input:
8613  *	ha:	adapter state pointer.
8614  *	cmd:	EXT_IOCTL cmd struct pointer.
8615  *	mode:	flags.
8616  *
8617  * Returns:
8618  *	None, request status indicated in cmd->Status.
8619  *
8620  * Context:
8621  *	Kernel context.
8622  */
8623 static void
8624 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
8625 {
8626 	ql_adapter_state_t	*tmp_vha;
8627 	EXT_VPORT_INFO		tmp_vport = {0};
8628 	int			max_vport;
8629 
8630 	QL_PRINT_9(CE_CONT, "(%d): started\n", vha->instance);
8631 
8632 	if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
8633 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8634 		cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
8635 		EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
8636 		    cmd->ResponseLen);
8637 		cmd->ResponseLen = 0;
8638 		return;
8639 	}
8640 
8641 	/* Fill in the vport information. */
8642 	bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
8643 	    EXT_DEF_WWN_NAME_SIZE);
8644 	bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
8645 	    EXT_DEF_WWN_NAME_SIZE);
8646 	tmp_vport.state = vha->state;
8647 	tmp_vport.id = vha->vp_index;
8648 
8649 	tmp_vha = vha->pha->vp_next;
8650 	while (tmp_vha != NULL) {
8651 		tmp_vport.used++;
8652 		tmp_vha = tmp_vha->vp_next;
8653 	}
8654 
8655 	max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS :
8656 	    MAX_25_VIRTUAL_PORTS);
8657 	if (max_vport > tmp_vport.used) {
8658 		tmp_vport.free = max_vport - tmp_vport.used;
8659 	}
8660 
8661 	if (ddi_copyout((void *)&tmp_vport,
8662 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8663 	    sizeof (EXT_VPORT_INFO), mode) != 0) {
8664 		cmd->Status = EXT_STATUS_COPY_ERR;
8665 		cmd->ResponseLen = 0;
8666 		EL(vha, "failed, ddi_copyout\n");
8667 	} else {
8668 		cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
8669 		QL_PRINT_9(CE_CONT, "(%d): done\n", vha->instance);
8670 	}
8671 }
8672 
8673 /*
8674  * ql_access_flash
8675  *	Performs all EXT_CC_ACCESS_FLASH_OS functions.
8676  *
8677  * Input:
8678  *	pi:	port info pointer.
8679  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8680  *	mode:	flags.
8681  *
8682  * Returns:
8683  *	None, request status indicated in cmd->Status.
8684  *
8685  * Context:
8686  *	Kernel context.
8687  */
8688 static void
8689 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8690 {
8691 	int	rval;
8692 
8693 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8694 
8695 	switch (cmd->SubCode) {
8696 	case EXT_SC_FLASH_READ:
8697 		if ((rval = ql_flash_fcode_dump(ha,
8698 		    (void *)(uintptr_t)(cmd->ResponseAdr),
8699 		    (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) {
8700 			cmd->Status = EXT_STATUS_COPY_ERR;
8701 			cmd->ResponseLen = 0;
8702 			EL(ha, "flash_fcode_dump status=%xh\n", rval);
8703 		}
8704 		break;
8705 	case EXT_SC_FLASH_WRITE:
8706 		if ((rval = ql_r_m_w_flash(ha,
8707 		    (void *)(uintptr_t)(cmd->RequestAdr),
8708 		    (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) !=
8709 		    QL_SUCCESS) {
8710 			cmd->Status = EXT_STATUS_COPY_ERR;
8711 			cmd->ResponseLen = 0;
8712 			EL(ha, "r_m_w_flash status=%xh\n", rval);
8713 		} else {
8714 			/* Reset caches on all adapter instances. */
8715 			ql_update_flash_caches(ha);
8716 		}
8717 		break;
8718 	default:
8719 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8720 		cmd->Status = EXT_STATUS_ERR;
8721 		cmd->ResponseLen = 0;
8722 		break;
8723 	}
8724 
8725 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8726 }
8727 
8728 /*
8729  * ql_reset_cmd
8730  *	Performs all EXT_CC_RESET_FW_OS functions.
8731  *
8732  * Input:
8733  *	ha:	adapter state pointer.
8734  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8735  *
8736  * Returns:
8737  *	None, request status indicated in cmd->Status.
8738  *
8739  * Context:
8740  *	Kernel context.
8741  */
8742 static void
8743 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
8744 {
8745 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8746 
8747 	switch (cmd->SubCode) {
8748 	case EXT_SC_RESET_FC_FW:
8749 		EL(ha, "isp_abort_needed\n");
8750 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
8751 		break;
8752 	case EXT_SC_RESET_MPI_FW:
8753 		if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8754 			EL(ha, "invalid request for HBA\n");
8755 			cmd->Status = EXT_STATUS_INVALID_REQUEST;
8756 			cmd->ResponseLen = 0;
8757 		} else {
8758 			/* Wait for I/O to stop and daemon to stall. */
8759 			if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8760 				EL(ha, "ql_suspend_hba failed\n");
8761 				cmd->Status = EXT_STATUS_BUSY;
8762 				cmd->ResponseLen = 0;
8763 			} else if (ql_restart_mpi(ha) != QL_SUCCESS) {
8764 				cmd->Status = EXT_STATUS_ERR;
8765 				cmd->ResponseLen = 0;
8766 			} else {
8767 				uint8_t	timer;
8768 				/*
8769 				 * While the restart_mpi mailbox cmd may be
8770 				 * done the MPI is not. Wait at least 6 sec. or
8771 				 * exit if the loop comes up.
8772 				 */
8773 				for (timer = 6; timer; timer--) {
8774 					if (!(ha->task_daemon_flags &
8775 					    LOOP_DOWN)) {
8776 						break;
8777 					}
8778 					/* Delay for 1 second. */
8779 					ql_delay(ha, 1000000);
8780 				}
8781 			}
8782 			ql_restart_hba(ha);
8783 		}
8784 		break;
8785 	default:
8786 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8787 		cmd->Status = EXT_STATUS_ERR;
8788 		cmd->ResponseLen = 0;
8789 		break;
8790 	}
8791 
8792 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8793 }
8794 
8795 /*
8796  * ql_get_dcbx_parameters
8797  *	Get DCBX parameters.
8798  *
8799  * Input:
8800  *	ha:	adapter state pointer.
8801  *	cmd:	User space CT arguments pointer.
8802  *	mode:	flags.
8803  */
8804 static void
8805 ql_get_dcbx_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8806 {
8807 	uint8_t		*tmp_buf;
8808 	int		rval;
8809 
8810 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8811 
8812 	if (!(CFG_IST(ha, CFG_CTRL_8081))) {
8813 		EL(ha, "invalid request for HBA\n");
8814 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8815 		cmd->ResponseLen = 0;
8816 		return;
8817 	}
8818 
8819 	/* Allocate memory for command. */
8820 	tmp_buf = kmem_zalloc(EXT_DEF_DCBX_PARAM_BUF_SIZE, KM_SLEEP);
8821 	if (tmp_buf == NULL) {
8822 		EL(ha, "failed, kmem_zalloc\n");
8823 		cmd->Status = EXT_STATUS_NO_MEMORY;
8824 		cmd->ResponseLen = 0;
8825 		return;
8826 	}
8827 	/* Send command */
8828 	rval = ql_get_dcbx_params(ha, EXT_DEF_DCBX_PARAM_BUF_SIZE,
8829 	    (caddr_t)tmp_buf);
8830 	if (rval != QL_SUCCESS) {
8831 		/* error */
8832 		EL(ha, "failed, get_dcbx_params_mbx=%xh\n", rval);
8833 		kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8834 		cmd->Status = EXT_STATUS_ERR;
8835 		cmd->ResponseLen = 0;
8836 		return;
8837 	}
8838 
8839 	/* Copy the response */
8840 	if (ql_send_buffer_data((caddr_t)tmp_buf,
8841 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
8842 	    EXT_DEF_DCBX_PARAM_BUF_SIZE, mode) != EXT_DEF_DCBX_PARAM_BUF_SIZE) {
8843 		EL(ha, "failed, ddi_copyout\n");
8844 		cmd->Status = EXT_STATUS_COPY_ERR;
8845 		cmd->ResponseLen = 0;
8846 	} else {
8847 		cmd->ResponseLen = EXT_DEF_DCBX_PARAM_BUF_SIZE;
8848 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8849 	}
8850 	kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8851 
8852 }
8853 
8854 /*
8855  * ql_qry_cna_port
8856  *	Performs EXT_SC_QUERY_CNA_PORT subfunction.
8857  *
8858  * Input:
8859  *	ha:	adapter state pointer.
8860  *	cmd:	EXT_IOCTL cmd struct pointer.
8861  *	mode:	flags.
8862  *
8863  * Returns:
8864  *	None, request status indicated in cmd->Status.
8865  *
8866  * Context:
8867  *	Kernel context.
8868  */
8869 static void
8870 ql_qry_cna_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8871 {
8872 	EXT_CNA_PORT	cna_port = {0};
8873 
8874 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8875 
8876 	if (!(CFG_IST(ha, CFG_CTRL_8081))) {
8877 		EL(ha, "invalid request for HBA\n");
8878 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8879 		cmd->ResponseLen = 0;
8880 		return;
8881 	}
8882 
8883 	if (cmd->ResponseLen < sizeof (EXT_CNA_PORT)) {
8884 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8885 		cmd->DetailStatus = sizeof (EXT_CNA_PORT);
8886 		EL(ha, "failed, ResponseLen < EXT_CNA_PORT, Len=%xh\n",
8887 		    cmd->ResponseLen);
8888 		cmd->ResponseLen = 0;
8889 		return;
8890 	}
8891 
8892 	cna_port.VLanId = ha->fcoe_vlan_id;
8893 	cna_port.FabricParam = ha->fabric_params;
8894 	bcopy(ha->fcoe_vnport_mac, cna_port.VNPortMACAddress,
8895 	    EXT_DEF_MAC_ADDRESS_SIZE);
8896 
8897 	if (ddi_copyout((void *)&cna_port,
8898 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8899 	    sizeof (EXT_CNA_PORT), mode) != 0) {
8900 		cmd->Status = EXT_STATUS_COPY_ERR;
8901 		cmd->ResponseLen = 0;
8902 		EL(ha, "failed, ddi_copyout\n");
8903 	} else {
8904 		cmd->ResponseLen = sizeof (EXT_CNA_PORT);
8905 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8906 	}
8907 }
8908 
8909 /*
8910  * ql_qry_adapter_versions
8911  *	Performs EXT_SC_QUERY_ADAPTER_VERSIONS subfunction.
8912  *
8913  * Input:
8914  *	ha:	adapter state pointer.
8915  *	cmd:	EXT_IOCTL cmd struct pointer.
8916  *	mode:	flags.
8917  *
8918  * Returns:
8919  *	None, request status indicated in cmd->Status.
8920  *
8921  * Context:
8922  *	Kernel context.
8923  */
8924 static void
8925 ql_qry_adapter_versions(ql_adapter_state_t *ha, EXT_IOCTL *cmd,
8926     int mode)
8927 {
8928 	uint8_t				is_8142, mpi_cap;
8929 	uint32_t			ver_len, transfer_size;
8930 	PEXT_ADAPTERREGIONVERSION	padapter_ver = NULL;
8931 
8932 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8933 
8934 	/* 8142s do not have a EDC PHY firmware. */
8935 	mpi_cap = (uint8_t)(ha->mpi_capability_list >> 8);
8936 
8937 	is_8142 = 0;
8938 	/* Sizeof (Length + Reserved) = 8 Bytes */
8939 	if (mpi_cap == 0x02 || mpi_cap == 0x04) {
8940 		ver_len = (sizeof (EXT_REGIONVERSION) * (NO_OF_VERSIONS - 1))
8941 		    + 8;
8942 		is_8142 = 1;
8943 	} else {
8944 		ver_len = (sizeof (EXT_REGIONVERSION) * NO_OF_VERSIONS) + 8;
8945 	}
8946 
8947 	/* Allocate local memory for EXT_ADAPTERREGIONVERSION */
8948 	padapter_ver = (EXT_ADAPTERREGIONVERSION *)kmem_zalloc(ver_len,
8949 	    KM_SLEEP);
8950 
8951 	if (padapter_ver == NULL) {
8952 		EL(ha, "failed, kmem_zalloc\n");
8953 		cmd->Status = EXT_STATUS_NO_MEMORY;
8954 		cmd->ResponseLen = 0;
8955 		return;
8956 	}
8957 
8958 	padapter_ver->Length = 1;
8959 	/* Copy MPI version */
8960 	padapter_ver->RegionVersion[0].Region =
8961 	    EXT_OPT_ROM_REGION_MPI_RISC_FW;
8962 	padapter_ver->RegionVersion[0].Version[0] =
8963 	    ha->mpi_fw_major_version;
8964 	padapter_ver->RegionVersion[0].Version[1] =
8965 	    ha->mpi_fw_minor_version;
8966 	padapter_ver->RegionVersion[0].Version[2] =
8967 	    ha->mpi_fw_subminor_version;
8968 	padapter_ver->RegionVersion[0].VersionLength = 3;
8969 	padapter_ver->RegionVersion[0].Location = RUNNING_VERSION;
8970 
8971 	if (!is_8142) {
8972 		padapter_ver->RegionVersion[1].Region =
8973 		    EXT_OPT_ROM_REGION_EDC_PHY_FW;
8974 		padapter_ver->RegionVersion[1].Version[0] =
8975 		    ha->phy_fw_major_version;
8976 		padapter_ver->RegionVersion[1].Version[1] =
8977 		    ha->phy_fw_minor_version;
8978 		padapter_ver->RegionVersion[1].Version[2] =
8979 		    ha->phy_fw_subminor_version;
8980 		padapter_ver->RegionVersion[1].VersionLength = 3;
8981 		padapter_ver->RegionVersion[1].Location = RUNNING_VERSION;
8982 		padapter_ver->Length = NO_OF_VERSIONS;
8983 	}
8984 
8985 	if (cmd->ResponseLen < ver_len) {
8986 		EL(ha, "failed, ResponseLen < ver_len, ",
8987 		    "RespLen=%xh ver_len=%xh\n", cmd->ResponseLen, ver_len);
8988 		/* Calculate the No. of valid versions being returned. */
8989 		padapter_ver->Length = (uint32_t)
8990 		    ((cmd->ResponseLen - 8) / sizeof (EXT_REGIONVERSION));
8991 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8992 		cmd->DetailStatus = ver_len;
8993 		transfer_size = cmd->ResponseLen;
8994 	} else {
8995 		transfer_size = ver_len;
8996 	}
8997 
8998 	if (ddi_copyout((void *)padapter_ver,
8999 	    (void *)(uintptr_t)(cmd->ResponseAdr),
9000 	    transfer_size, mode) != 0) {
9001 		cmd->Status = EXT_STATUS_COPY_ERR;
9002 		cmd->ResponseLen = 0;
9003 		EL(ha, "failed, ddi_copyout\n");
9004 	} else {
9005 		cmd->ResponseLen = ver_len;
9006 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9007 	}
9008 
9009 	kmem_free(padapter_ver, ver_len);
9010 }
9011 
9012 /*
9013  * ql_get_xgmac_statistics
9014  *	Get XgMac information
9015  *
9016  * Input:
9017  *	ha:	adapter state pointer.
9018  *	cmd:	EXT_IOCTL cmd struct pointer.
9019  *	mode:	flags.
9020  *
9021  * Returns:
9022  *	None, request status indicated in cmd->Status.
9023  *
9024  * Context:
9025  *	Kernel context.
9026  */
9027 static void
9028 ql_get_xgmac_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9029 {
9030 	int			rval;
9031 	uint32_t		size;
9032 	int8_t			*tmp_buf;
9033 	EXT_MENLO_MANAGE_INFO	info;
9034 
9035 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9036 
9037 	/*  Verify the size of request structure. */
9038 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
9039 		/* Return error */
9040 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
9041 		    sizeof (EXT_MENLO_MANAGE_INFO));
9042 		cmd->Status = EXT_STATUS_INVALID_PARAM;
9043 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
9044 		cmd->ResponseLen = 0;
9045 		return;
9046 	}
9047 
9048 	/* Get manage info request. */
9049 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9050 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
9051 		EL(ha, "failed, ddi_copyin\n");
9052 		cmd->Status = EXT_STATUS_COPY_ERR;
9053 		cmd->ResponseLen = 0;
9054 		return;
9055 	}
9056 
9057 	size = info.TotalByteCount;
9058 	if (!size) {
9059 		/* parameter error */
9060 		cmd->Status = EXT_STATUS_INVALID_PARAM;
9061 		cmd->DetailStatus = 0;
9062 		EL(ha, "failed, size=%xh\n", size);
9063 		cmd->ResponseLen = 0;
9064 		return;
9065 	}
9066 
9067 	/* Allocate memory for command. */
9068 	tmp_buf = kmem_zalloc(size, KM_SLEEP);
9069 	if (tmp_buf == NULL) {
9070 		EL(ha, "failed, kmem_zalloc\n");
9071 		cmd->Status = EXT_STATUS_NO_MEMORY;
9072 		cmd->ResponseLen = 0;
9073 		return;
9074 	}
9075 
9076 	if (!(info.Operation & MENLO_OP_GET_INFO)) {
9077 		EL(ha, "Invalid request for 81XX\n");
9078 		kmem_free(tmp_buf, size);
9079 		cmd->Status = EXT_STATUS_ERR;
9080 		cmd->ResponseLen = 0;
9081 		return;
9082 	}
9083 
9084 	rval = ql_get_xgmac_stats(ha, size, (caddr_t)tmp_buf);
9085 
9086 	if (rval != QL_SUCCESS) {
9087 		/* error */
9088 		EL(ha, "failed, get_xgmac_stats =%xh\n", rval);
9089 		kmem_free(tmp_buf, size);
9090 		cmd->Status = EXT_STATUS_ERR;
9091 		cmd->ResponseLen = 0;
9092 		return;
9093 	}
9094 
9095 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)info.pDataBytes,
9096 	    size, mode) != size) {
9097 		EL(ha, "failed, ddi_copyout\n");
9098 		cmd->Status = EXT_STATUS_COPY_ERR;
9099 		cmd->ResponseLen = 0;
9100 	} else {
9101 		cmd->ResponseLen = info.TotalByteCount;
9102 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9103 	}
9104 	kmem_free(tmp_buf, size);
9105 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9106 }
9107 
9108 /*
9109  * ql_get_fcf_list
9110  *	Get FCF list.
9111  *
9112  * Input:
9113  *	ha:	adapter state pointer.
9114  *	cmd:	User space CT arguments pointer.
9115  *	mode:	flags.
9116  */
9117 static void
9118 ql_get_fcf_list(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9119 {
9120 	uint8_t			*tmp_buf;
9121 	int			rval;
9122 	EXT_FCF_LIST		fcf_list = {0};
9123 	ql_fcf_list_desc_t	mb_fcf_list = {0};
9124 
9125 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9126 
9127 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
9128 		EL(ha, "invalid request for HBA\n");
9129 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
9130 		cmd->ResponseLen = 0;
9131 		return;
9132 	}
9133 	/* Get manage info request. */
9134 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9135 	    (caddr_t)&fcf_list, sizeof (EXT_FCF_LIST), mode) != 0) {
9136 		EL(ha, "failed, ddi_copyin\n");
9137 		cmd->Status = EXT_STATUS_COPY_ERR;
9138 		cmd->ResponseLen = 0;
9139 		return;
9140 	}
9141 
9142 	if (!(fcf_list.BufSize)) {
9143 		/* Return error */
9144 		EL(ha, "failed, fcf_list BufSize is=%xh\n",
9145 		    fcf_list.BufSize);
9146 		cmd->Status = EXT_STATUS_INVALID_PARAM;
9147 		cmd->ResponseLen = 0;
9148 		return;
9149 	}
9150 	/* Allocate memory for command. */
9151 	tmp_buf = kmem_zalloc(fcf_list.BufSize, KM_SLEEP);
9152 	if (tmp_buf == NULL) {
9153 		EL(ha, "failed, kmem_zalloc\n");
9154 		cmd->Status = EXT_STATUS_NO_MEMORY;
9155 		cmd->ResponseLen = 0;
9156 		return;
9157 	}
9158 	/* build the descriptor */
9159 	if (fcf_list.Options) {
9160 		mb_fcf_list.options = FCF_LIST_RETURN_ONE;
9161 	} else {
9162 		mb_fcf_list.options = FCF_LIST_RETURN_ALL;
9163 	}
9164 	mb_fcf_list.fcf_index = (uint16_t)fcf_list.FcfIndex;
9165 	mb_fcf_list.buffer_size = fcf_list.BufSize;
9166 
9167 	/* Send command */
9168 	rval = ql_get_fcf_list_mbx(ha, &mb_fcf_list, (caddr_t)tmp_buf);
9169 	if (rval != QL_SUCCESS) {
9170 		/* error */
9171 		EL(ha, "failed, get_fcf_list_mbx=%xh\n", rval);
9172 		kmem_free(tmp_buf, fcf_list.BufSize);
9173 		cmd->Status = EXT_STATUS_ERR;
9174 		cmd->ResponseLen = 0;
9175 		return;
9176 	}
9177 
9178 	/* Copy the response */
9179 	if (ql_send_buffer_data((caddr_t)tmp_buf,
9180 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
9181 	    fcf_list.BufSize, mode) != fcf_list.BufSize) {
9182 		EL(ha, "failed, ddi_copyout\n");
9183 		cmd->Status = EXT_STATUS_COPY_ERR;
9184 		cmd->ResponseLen = 0;
9185 	} else {
9186 		cmd->ResponseLen = mb_fcf_list.buffer_size;
9187 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9188 	}
9189 
9190 	kmem_free(tmp_buf, fcf_list.BufSize);
9191 }
9192 
9193 /*
9194  * ql_get_resource_counts
9195  *	Get Resource counts:
9196  *
9197  * Input:
9198  *	ha:	adapter state pointer.
9199  *	cmd:	User space CT arguments pointer.
9200  *	mode:	flags.
9201  */
9202 static void
9203 ql_get_resource_counts(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9204 {
9205 	int			rval;
9206 	ql_mbx_data_t		mr;
9207 	EXT_RESOURCE_CNTS	tmp_rc_cnt = {0};
9208 
9209 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9210 
9211 	if (!(CFG_IST(ha, CFG_CTRL_242581))) {
9212 		EL(ha, "invalid request for HBA\n");
9213 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
9214 		cmd->ResponseLen = 0;
9215 		return;
9216 	}
9217 
9218 	if (cmd->ResponseLen < sizeof (EXT_RESOURCE_CNTS)) {
9219 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9220 		cmd->DetailStatus = sizeof (EXT_RESOURCE_CNTS);
9221 		EL(ha, "failed, ResponseLen < EXT_RESOURCE_CNTS, "
9222 		    "Len=%xh\n", cmd->ResponseLen);
9223 		cmd->ResponseLen = 0;
9224 		return;
9225 	}
9226 
9227 	rval = ql_get_resource_cnts(ha, &mr);
9228 	if (rval != QL_SUCCESS) {
9229 		EL(ha, "resource cnt mbx failed\n");
9230 		cmd->Status = EXT_STATUS_ERR;
9231 		cmd->ResponseLen = 0;
9232 		return;
9233 	}
9234 
9235 	tmp_rc_cnt.OrgTgtXchgCtrlCnt = (uint32_t)mr.mb[1];
9236 	tmp_rc_cnt.CurTgtXchgCtrlCnt = (uint32_t)mr.mb[2];
9237 	tmp_rc_cnt.CurXchgCtrlCnt = (uint32_t)mr.mb[3];
9238 	tmp_rc_cnt.OrgXchgCtrlCnt = (uint32_t)mr.mb[6];
9239 	tmp_rc_cnt.CurIocbBufCnt = (uint32_t)mr.mb[7];
9240 	tmp_rc_cnt.OrgIocbBufCnt = (uint32_t)mr.mb[10];
9241 	tmp_rc_cnt.NoOfSupVPs = (uint32_t)mr.mb[11];
9242 	tmp_rc_cnt.NoOfSupFCFs = (uint32_t)mr.mb[12];
9243 
9244 	rval = ddi_copyout((void *)&tmp_rc_cnt,
9245 	    (void *)(uintptr_t)(cmd->ResponseAdr),
9246 	    sizeof (EXT_RESOURCE_CNTS), mode);
9247 	if (rval != 0) {
9248 		cmd->Status = EXT_STATUS_COPY_ERR;
9249 		cmd->ResponseLen = 0;
9250 		EL(ha, "failed, ddi_copyout\n");
9251 	} else {
9252 		cmd->ResponseLen = sizeof (EXT_RESOURCE_CNTS);
9253 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9254 	}
9255 }
9256