1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2010 QLogic Corporation; ql_xioctl.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
61 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
62     boolean_t (*)(EXT_IOCTL *));
63 static boolean_t ql_validate_signature(EXT_IOCTL *);
64 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
65 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
66 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
67 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
90 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int);
91 
92 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
93 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
94 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
95 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
96     uint8_t);
97 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
98 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
99 static int ql_24xx_flash_desc(ql_adapter_state_t *);
100 static int ql_setup_flash(ql_adapter_state_t *);
101 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
102 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
103 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
104     uint32_t, int);
105 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
106     uint8_t);
107 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
108 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
109 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
110 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
113 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
115 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
116 static uint32_t ql_setup_led(ql_adapter_state_t *);
117 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
118 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
119 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
120 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
121 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
122 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
123 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
124 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
125 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
126 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
127 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
128 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
129 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
130 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
131 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
132 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
134 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
135 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
138 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
139 static void ql_restart_hba(ql_adapter_state_t *);
140 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
141 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
143 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
144 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
145 static void ql_update_flash_caches(ql_adapter_state_t *);
146 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
147 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
148 
149 /* ******************************************************************** */
150 /*			External IOCTL support.				*/
151 /* ******************************************************************** */
152 
153 /*
154  * ql_alloc_xioctl_resource
155  *	Allocates resources needed by module code.
156  *
157  * Input:
158  *	ha:		adapter state pointer.
159  *
160  * Returns:
161  *	SYS_ERRNO
162  *
163  * Context:
164  *	Kernel context.
165  */
166 int
167 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
168 {
169 	ql_xioctl_t	*xp;
170 
171 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
172 
173 	if (ha->xioctl != NULL) {
174 		QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
175 		    ha->instance);
176 		return (0);
177 	}
178 
179 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
180 	if (xp == NULL) {
181 		EL(ha, "failed, kmem_zalloc\n");
182 		return (ENOMEM);
183 	}
184 	ha->xioctl = xp;
185 
186 	/* Allocate AEN tracking buffer */
187 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
188 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
189 	if (xp->aen_tracking_queue == NULL) {
190 		EL(ha, "failed, kmem_zalloc-2\n");
191 		ql_free_xioctl_resource(ha);
192 		return (ENOMEM);
193 	}
194 
195 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
196 
197 	return (0);
198 }
199 
200 /*
201  * ql_free_xioctl_resource
202  *	Frees resources used by module code.
203  *
204  * Input:
205  *	ha:		adapter state pointer.
206  *
207  * Context:
208  *	Kernel context.
209  */
210 void
211 ql_free_xioctl_resource(ql_adapter_state_t *ha)
212 {
213 	ql_xioctl_t	*xp = ha->xioctl;
214 
215 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
216 
217 	if (xp == NULL) {
218 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
219 		return;
220 	}
221 
222 	if (xp->aen_tracking_queue != NULL) {
223 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
224 		    sizeof (EXT_ASYNC_EVENT));
225 		xp->aen_tracking_queue = NULL;
226 	}
227 
228 	kmem_free(xp, sizeof (ql_xioctl_t));
229 	ha->xioctl = NULL;
230 
231 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
232 }
233 
234 /*
235  * ql_xioctl
236  *	External IOCTL processing.
237  *
238  * Input:
239  *	ha:	adapter state pointer.
240  *	cmd:	function to perform
241  *	arg:	data type varies with request
242  *	mode:	flags
243  *	cred_p:	credentials pointer
244  *	rval_p:	pointer to result value
245  *
246  * Returns:
247  *	0:		success
248  *	ENXIO:		No such device or address
249  *	ENOPROTOOPT:	Protocol not available
250  *
251  * Context:
252  *	Kernel context.
253  */
254 /* ARGSUSED */
255 int
256 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
257     cred_t *cred_p, int *rval_p)
258 {
259 	int	rval;
260 
261 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
262 
263 	if (ha->xioctl == NULL) {
264 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
265 		return (ENXIO);
266 	}
267 
268 	switch (cmd) {
269 	case EXT_CC_QUERY:
270 	case EXT_CC_SEND_FCCT_PASSTHRU:
271 	case EXT_CC_REG_AEN:
272 	case EXT_CC_GET_AEN:
273 	case EXT_CC_SEND_SCSI_PASSTHRU:
274 	case EXT_CC_WWPN_TO_SCSIADDR:
275 	case EXT_CC_SEND_ELS_RNID:
276 	case EXT_CC_SET_DATA:
277 	case EXT_CC_GET_DATA:
278 	case EXT_CC_HOST_IDX:
279 	case EXT_CC_READ_NVRAM:
280 	case EXT_CC_UPDATE_NVRAM:
281 	case EXT_CC_READ_OPTION_ROM:
282 	case EXT_CC_READ_OPTION_ROM_EX:
283 	case EXT_CC_UPDATE_OPTION_ROM:
284 	case EXT_CC_UPDATE_OPTION_ROM_EX:
285 	case EXT_CC_GET_VPD:
286 	case EXT_CC_SET_VPD:
287 	case EXT_CC_LOOPBACK:
288 	case EXT_CC_GET_FCACHE:
289 	case EXT_CC_GET_FCACHE_EX:
290 	case EXT_CC_HOST_DRVNAME:
291 	case EXT_CC_GET_SFP_DATA:
292 	case EXT_CC_PORT_PARAM:
293 	case EXT_CC_GET_PCI_DATA:
294 	case EXT_CC_GET_FWEXTTRACE:
295 	case EXT_CC_GET_FWFCETRACE:
296 	case EXT_CC_GET_VP_CNT_ID:
297 	case EXT_CC_VPORT_CMD:
298 	case EXT_CC_ACCESS_FLASH:
299 	case EXT_CC_RESET_FW:
300 	case EXT_CC_MENLO_MANAGE_INFO:
301 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
302 		break;
303 	default:
304 		/* function not supported. */
305 		EL(ha, "function=%d not supported\n", cmd);
306 		rval = ENOPROTOOPT;
307 	}
308 
309 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
310 
311 	return (rval);
312 }
313 
314 /*
315  * ql_sdm_ioctl
316  *	Provides ioctl functions for SAN/Device Management functions
317  *	AKA External Ioctl functions.
318  *
319  * Input:
320  *	ha:		adapter state pointer.
321  *	ioctl_code:	ioctl function to perform
322  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
323  *	mode:		flags
324  *
325  * Returns:
326  *	0:	success
327  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
328  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
329  *		copyout of EXT_IOCTL status info failed.
330  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
331  *	EBUSY:	Device busy
332  *
333  * Context:
334  *	Kernel context.
335  */
336 static int
337 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
338 {
339 	EXT_IOCTL		*cmd;
340 	int			rval;
341 	ql_adapter_state_t	*vha;
342 
343 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
344 
345 	/* Copy argument structure (EXT_IOCTL) from application land. */
346 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
347 	    ql_validate_signature)) != 0) {
348 		/*
349 		 * a non-zero value at this time means a problem getting
350 		 * the requested information from application land, just
351 		 * return the error code and hope for the best.
352 		 */
353 		EL(ha, "failed, sdm_setup\n");
354 		return (rval);
355 	}
356 
357 	/*
358 	 * Map the physical ha ptr (which the ioctl is called with)
359 	 * to the virtual ha that the caller is addressing.
360 	 */
361 	if (ha->flags & VP_ENABLED) {
362 		/*
363 		 * Special case: HbaSelect == 0 is physical ha
364 		 */
365 		if (cmd->HbaSelect != 0) {
366 			vha = ha->vp_next;
367 			while (vha != NULL) {
368 				if (vha->vp_index == cmd->HbaSelect) {
369 					ha = vha;
370 					break;
371 				}
372 				vha = vha->vp_next;
373 			}
374 
375 			/*
376 			 * If we can't find the specified vp index then
377 			 * we probably have an error (vp indexes shifting
378 			 * under our feet?).
379 			 */
380 			if (vha == NULL) {
381 				EL(ha, "Invalid HbaSelect vp index: %xh\n",
382 				    cmd->HbaSelect);
383 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
384 				cmd->ResponseLen = 0;
385 				return (EFAULT);
386 			}
387 		}
388 	}
389 
390 	/*
391 	 * If driver is suspended, stalled, or powered down rtn BUSY
392 	 */
393 	if (ha->flags & ADAPTER_SUSPENDED ||
394 	    ha->task_daemon_flags & DRIVER_STALL ||
395 	    ha->power_level != PM_LEVEL_D0) {
396 		EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
397 		    "driver suspended" :
398 		    (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
399 		    "FCA powered down"));
400 		cmd->Status = EXT_STATUS_BUSY;
401 		cmd->ResponseLen = 0;
402 		rval = EBUSY;
403 
404 		/* Return results to caller */
405 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
406 			EL(ha, "failed, sdm_return\n");
407 			rval = EFAULT;
408 		}
409 		return (rval);
410 	}
411 
412 	switch (ioctl_code) {
413 	case EXT_CC_QUERY_OS:
414 		ql_query(ha, cmd, mode);
415 		break;
416 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
417 		ql_fcct(ha, cmd, mode);
418 		break;
419 	case EXT_CC_REG_AEN_OS:
420 		ql_aen_reg(ha, cmd, mode);
421 		break;
422 	case EXT_CC_GET_AEN_OS:
423 		ql_aen_get(ha, cmd, mode);
424 		break;
425 	case EXT_CC_GET_DATA_OS:
426 		ql_get_host_data(ha, cmd, mode);
427 		break;
428 	case EXT_CC_SET_DATA_OS:
429 		ql_set_host_data(ha, cmd, mode);
430 		break;
431 	case EXT_CC_SEND_ELS_RNID_OS:
432 		ql_send_els_rnid(ha, cmd, mode);
433 		break;
434 	case EXT_CC_SCSI_PASSTHRU_OS:
435 		ql_scsi_passthru(ha, cmd, mode);
436 		break;
437 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
438 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
439 		break;
440 	case EXT_CC_HOST_IDX_OS:
441 		ql_host_idx(ha, cmd, mode);
442 		break;
443 	case EXT_CC_HOST_DRVNAME_OS:
444 		ql_host_drvname(ha, cmd, mode);
445 		break;
446 	case EXT_CC_READ_NVRAM_OS:
447 		ql_read_nvram(ha, cmd, mode);
448 		break;
449 	case EXT_CC_UPDATE_NVRAM_OS:
450 		ql_write_nvram(ha, cmd, mode);
451 		break;
452 	case EXT_CC_READ_OPTION_ROM_OS:
453 	case EXT_CC_READ_OPTION_ROM_EX_OS:
454 		ql_read_flash(ha, cmd, mode);
455 		break;
456 	case EXT_CC_UPDATE_OPTION_ROM_OS:
457 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
458 		ql_write_flash(ha, cmd, mode);
459 		break;
460 	case EXT_CC_LOOPBACK_OS:
461 		ql_diagnostic_loopback(ha, cmd, mode);
462 		break;
463 	case EXT_CC_GET_VPD_OS:
464 		ql_read_vpd(ha, cmd, mode);
465 		break;
466 	case EXT_CC_SET_VPD_OS:
467 		ql_write_vpd(ha, cmd, mode);
468 		break;
469 	case EXT_CC_GET_FCACHE_OS:
470 		ql_get_fcache(ha, cmd, mode);
471 		break;
472 	case EXT_CC_GET_FCACHE_EX_OS:
473 		ql_get_fcache_ex(ha, cmd, mode);
474 		break;
475 	case EXT_CC_GET_SFP_DATA_OS:
476 		ql_get_sfp(ha, cmd, mode);
477 		break;
478 	case EXT_CC_PORT_PARAM_OS:
479 		ql_port_param(ha, cmd, mode);
480 		break;
481 	case EXT_CC_GET_PCI_DATA_OS:
482 		ql_get_pci_data(ha, cmd, mode);
483 		break;
484 	case EXT_CC_GET_FWEXTTRACE_OS:
485 		ql_get_fwexttrace(ha, cmd, mode);
486 		break;
487 	case EXT_CC_GET_FWFCETRACE_OS:
488 		ql_get_fwfcetrace(ha, cmd, mode);
489 		break;
490 	case EXT_CC_MENLO_RESET:
491 		ql_menlo_reset(ha, cmd, mode);
492 		break;
493 	case EXT_CC_MENLO_GET_FW_VERSION:
494 		ql_menlo_get_fw_version(ha, cmd, mode);
495 		break;
496 	case EXT_CC_MENLO_UPDATE_FW:
497 		ql_menlo_update_fw(ha, cmd, mode);
498 		break;
499 	case EXT_CC_MENLO_MANAGE_INFO:
500 		ql_menlo_manage_info(ha, cmd, mode);
501 		break;
502 	case EXT_CC_GET_VP_CNT_ID_OS:
503 		ql_get_vp_cnt_id(ha, cmd, mode);
504 		break;
505 	case EXT_CC_VPORT_CMD_OS:
506 		ql_vp_ioctl(ha, cmd, mode);
507 		break;
508 	case EXT_CC_ACCESS_FLASH_OS:
509 		ql_access_flash(ha, cmd, mode);
510 		break;
511 	case EXT_CC_RESET_FW_OS:
512 		ql_reset_cmd(ha, cmd);
513 		break;
514 	default:
515 		/* function not supported. */
516 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
517 
518 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
519 		cmd->ResponseLen = 0;
520 		break;
521 	}
522 
523 	/* Return results to caller */
524 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
525 		EL(ha, "failed, sdm_return\n");
526 		return (EFAULT);
527 	}
528 
529 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
530 
531 	return (0);
532 }
533 
534 /*
535  * ql_sdm_setup
536  *	Make a local copy of the EXT_IOCTL struct and validate it.
537  *
538  * Input:
539  *	ha:		adapter state pointer.
540  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
541  *	arg:		Address of application EXT_IOCTL cmd data
542  *	mode:		flags
543  *	val_sig:	Pointer to a function to validate the ioctl signature.
544  *
545  * Returns:
546  *	0:		success
547  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
548  *	EINVAL:		Invalid version, signature.
549  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
550  *
551  * Context:
552  *	Kernel context.
553  */
554 static int
555 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
556     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
557 {
558 	int		rval;
559 	EXT_IOCTL	*cmd;
560 
561 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
562 
563 	/* Allocate local memory for EXT_IOCTL. */
564 	*cmd_struct = NULL;
565 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
566 	if (cmd == NULL) {
567 		EL(ha, "failed, kmem_zalloc\n");
568 		return (ENOMEM);
569 	}
570 	/* Get argument structure. */
571 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
572 	if (rval != 0) {
573 		EL(ha, "failed, ddi_copyin\n");
574 		rval = EFAULT;
575 	} else {
576 		/*
577 		 * Check signature and the version.
578 		 * If either are not valid then neither is the
579 		 * structure so don't attempt to return any error status
580 		 * because we can't trust what caller's arg points to.
581 		 * Just return the errno.
582 		 */
583 		if (val_sig(cmd) == 0) {
584 			EL(ha, "failed, signature\n");
585 			rval = EINVAL;
586 		} else if (cmd->Version > EXT_VERSION) {
587 			EL(ha, "failed, version\n");
588 			rval = EINVAL;
589 		}
590 	}
591 
592 	if (rval == 0) {
593 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
594 		*cmd_struct = cmd;
595 		cmd->Status = EXT_STATUS_OK;
596 		cmd->DetailStatus = 0;
597 	} else {
598 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
599 	}
600 
601 	return (rval);
602 }
603 
604 /*
605  * ql_validate_signature
606  *	Validate the signature string for an external ioctl call.
607  *
608  * Input:
609  *	sg:	Pointer to EXT_IOCTL signature to validate.
610  *
611  * Returns:
612  *	B_TRUE:		Signature is valid.
613  *	B_FALSE:	Signature is NOT valid.
614  *
615  * Context:
616  *	Kernel context.
617  */
618 static boolean_t
619 ql_validate_signature(EXT_IOCTL *cmd_struct)
620 {
621 	/*
622 	 * Check signature.
623 	 *
624 	 * If signature is not valid then neither is the rest of
625 	 * the structure (e.g., can't trust it), so don't attempt
626 	 * to return any error status other than the errno.
627 	 */
628 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
629 		QL_PRINT_2(CE_CONT, "failed,\n");
630 		return (B_FALSE);
631 	}
632 
633 	return (B_TRUE);
634 }
635 
636 /*
637  * ql_sdm_return
638  *	Copies return data/status to application land for
639  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
640  *
641  * Input:
642  *	ha:		adapter state pointer.
643  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
644  *	ioctl_code:	ioctl function to perform
645  *	arg:		EXT_IOCTL cmd data in application land.
646  *	mode:		flags
647  *
648  * Returns:
649  *	0:	success
650  *	EFAULT:	Copy out error.
651  *
652  * Context:
653  *	Kernel context.
654  */
655 /* ARGSUSED */
656 static int
657 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
658 {
659 	int	rval = 0;
660 
661 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
662 
663 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
664 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
665 	    mode);
666 
667 	rval |= ddi_copyout((void *)&cmd->Status,
668 	    (void *)&(((EXT_IOCTL*)arg)->Status),
669 	    sizeof (cmd->Status), mode);
670 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
671 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
672 	    sizeof (cmd->DetailStatus), mode);
673 
674 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
675 
676 	if (rval != 0) {
677 		/* Some copyout operation failed */
678 		EL(ha, "failed, ddi_copyout\n");
679 		return (EFAULT);
680 	}
681 
682 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
683 
684 	return (0);
685 }
686 
687 /*
688  * ql_query
689  *	Performs all EXT_CC_QUERY functions.
690  *
691  * Input:
692  *	ha:	adapter state pointer.
693  *	cmd:	Local EXT_IOCTL cmd struct pointer.
694  *	mode:	flags.
695  *
696  * Returns:
697  *	None, request status indicated in cmd->Status.
698  *
699  * Context:
700  *	Kernel context.
701  */
702 static void
703 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
704 {
705 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
706 	    cmd->SubCode);
707 
708 	/* case off on command subcode */
709 	switch (cmd->SubCode) {
710 	case EXT_SC_QUERY_HBA_NODE:
711 		ql_qry_hba_node(ha, cmd, mode);
712 		break;
713 	case EXT_SC_QUERY_HBA_PORT:
714 		ql_qry_hba_port(ha, cmd, mode);
715 		break;
716 	case EXT_SC_QUERY_DISC_PORT:
717 		ql_qry_disc_port(ha, cmd, mode);
718 		break;
719 	case EXT_SC_QUERY_DISC_TGT:
720 		ql_qry_disc_tgt(ha, cmd, mode);
721 		break;
722 	case EXT_SC_QUERY_DRIVER:
723 		ql_qry_driver(ha, cmd, mode);
724 		break;
725 	case EXT_SC_QUERY_FW:
726 		ql_qry_fw(ha, cmd, mode);
727 		break;
728 	case EXT_SC_QUERY_CHIP:
729 		ql_qry_chip(ha, cmd, mode);
730 		break;
731 	case EXT_SC_QUERY_CNA_PORT:
732 		ql_qry_cna_port(ha, cmd, mode);
733 		break;
734 	case EXT_SC_QUERY_DISC_LUN:
735 	default:
736 		/* function not supported. */
737 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
738 		EL(ha, "failed, Unsupported Subcode=%xh\n",
739 		    cmd->SubCode);
740 		break;
741 	}
742 
743 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
744 }
745 
746 /*
747  * ql_qry_hba_node
748  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
749  *
750  * Input:
751  *	ha:	adapter state pointer.
752  *	cmd:	EXT_IOCTL cmd struct pointer.
753  *	mode:	flags.
754  *
755  * Returns:
756  *	None, request status indicated in cmd->Status.
757  *
758  * Context:
759  *	Kernel context.
760  */
761 static void
762 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
763 {
764 	EXT_HBA_NODE	tmp_node = {0};
765 	uint_t		len;
766 	caddr_t		bufp;
767 	ql_mbx_data_t	mr;
768 
769 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
770 
771 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
772 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
773 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
774 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
775 		    "Len=%xh\n", cmd->ResponseLen);
776 		cmd->ResponseLen = 0;
777 		return;
778 	}
779 
780 	/* fill in the values */
781 
782 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
783 	    EXT_DEF_WWN_NAME_SIZE);
784 
785 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
786 
787 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
788 
789 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
790 
791 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
792 
793 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
794 		size_t		verlen;
795 		uint16_t	w;
796 		char		*tmpptr;
797 
798 		verlen = strlen((char *)(tmp_node.DriverVersion));
799 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
800 			EL(ha, "failed, No room for fpga version string\n");
801 		} else {
802 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
803 			    (uint16_t *)
804 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
805 
806 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
807 			if (tmpptr == NULL) {
808 				EL(ha, "Unable to insert fpga version str\n");
809 			} else {
810 				(void) sprintf(tmpptr, "%d.%d",
811 				    ((w & 0xf0) >> 4), (w & 0x0f));
812 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
813 			}
814 		}
815 	}
816 	(void) ql_get_fw_version(ha, &mr);
817 
818 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
819 	    mr.mb[1], mr.mb[2], mr.mb[3]);
820 
821 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
822 		switch (mr.mb[6]) {
823 		case FWATTRIB_EF:
824 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
825 			break;
826 		case FWATTRIB_TP:
827 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
828 			break;
829 		case FWATTRIB_IP:
830 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
831 			break;
832 		case FWATTRIB_IPX:
833 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
834 			break;
835 		case FWATTRIB_FL:
836 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
837 			break;
838 		case FWATTRIB_FPX:
839 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
840 			break;
841 		default:
842 			break;
843 		}
844 	}
845 
846 	/* FCode version. */
847 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
848 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
849 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
850 	    (int *)&len) == DDI_PROP_SUCCESS) {
851 		if (len < EXT_DEF_MAX_STR_SIZE) {
852 			bcopy(bufp, tmp_node.OptRomVersion, len);
853 		} else {
854 			bcopy(bufp, tmp_node.OptRomVersion,
855 			    EXT_DEF_MAX_STR_SIZE - 1);
856 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
857 			    '\0';
858 		}
859 		kmem_free(bufp, len);
860 	} else {
861 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
862 	}
863 	tmp_node.PortCount = 1;
864 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
865 
866 	if (ddi_copyout((void *)&tmp_node,
867 	    (void *)(uintptr_t)(cmd->ResponseAdr),
868 	    sizeof (EXT_HBA_NODE), mode) != 0) {
869 		cmd->Status = EXT_STATUS_COPY_ERR;
870 		cmd->ResponseLen = 0;
871 		EL(ha, "failed, ddi_copyout\n");
872 	} else {
873 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
874 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
875 	}
876 }
877 
878 /*
879  * ql_qry_hba_port
880  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
881  *
882  * Input:
883  *	ha:	adapter state pointer.
884  *	cmd:	EXT_IOCTL cmd struct pointer.
885  *	mode:	flags.
886  *
887  * Returns:
888  *	None, request status indicated in cmd->Status.
889  *
890  * Context:
891  *	Kernel context.
892  */
893 static void
894 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
895 {
896 	ql_link_t	*link;
897 	ql_tgt_t	*tq;
898 	ql_mbx_data_t	mr;
899 	EXT_HBA_PORT	tmp_port = {0};
900 	int		rval;
901 	uint16_t	port_cnt, tgt_cnt, index;
902 
903 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
904 
905 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
906 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
907 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
908 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
909 		    cmd->ResponseLen);
910 		cmd->ResponseLen = 0;
911 		return;
912 	}
913 
914 	/* fill in the values */
915 
916 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
917 	    EXT_DEF_WWN_NAME_SIZE);
918 	tmp_port.Id[0] = 0;
919 	tmp_port.Id[1] = ha->d_id.b.domain;
920 	tmp_port.Id[2] = ha->d_id.b.area;
921 	tmp_port.Id[3] = ha->d_id.b.al_pa;
922 
923 	/* For now we are initiator only driver */
924 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
925 
926 	if (ha->task_daemon_flags & LOOP_DOWN) {
927 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
928 	} else if (DRIVER_SUSPENDED(ha)) {
929 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
930 	} else {
931 		tmp_port.State = EXT_DEF_HBA_OK;
932 	}
933 
934 	if (ha->flags & POINT_TO_POINT) {
935 		tmp_port.Mode = EXT_DEF_P2P_MODE;
936 	} else {
937 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
938 	}
939 	/*
940 	 * fill in the portspeed values.
941 	 *
942 	 * default to not yet negotiated state
943 	 */
944 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
945 
946 	if (tmp_port.State == EXT_DEF_HBA_OK) {
947 		if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
948 			mr.mb[1] = 0;
949 			mr.mb[2] = 0;
950 			rval = ql_data_rate(ha, &mr);
951 			if (rval != QL_SUCCESS) {
952 				EL(ha, "failed, data_rate=%xh\n", rval);
953 			} else {
954 				switch (mr.mb[1]) {
955 				case IIDMA_RATE_1GB:
956 					tmp_port.PortSpeed =
957 					    EXT_DEF_PORTSPEED_1GBIT;
958 					break;
959 				case IIDMA_RATE_2GB:
960 					tmp_port.PortSpeed =
961 					    EXT_DEF_PORTSPEED_2GBIT;
962 					break;
963 				case IIDMA_RATE_4GB:
964 					tmp_port.PortSpeed =
965 					    EXT_DEF_PORTSPEED_4GBIT;
966 					break;
967 				case IIDMA_RATE_8GB:
968 					tmp_port.PortSpeed =
969 					    EXT_DEF_PORTSPEED_8GBIT;
970 					break;
971 				case IIDMA_RATE_10GB:
972 					tmp_port.PortSpeed =
973 					    EXT_DEF_PORTSPEED_10GBIT;
974 					break;
975 				default:
976 					tmp_port.PortSpeed =
977 					    EXT_DEF_PORTSPEED_UNKNOWN;
978 					EL(ha, "failed, data rate=%xh\n",
979 					    mr.mb[1]);
980 					break;
981 				}
982 			}
983 		} else {
984 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
985 		}
986 	}
987 
988 	/* Report all supported port speeds */
989 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
990 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
991 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
992 		    EXT_DEF_PORTSPEED_1GBIT);
993 		/*
994 		 * Correct supported speeds based on type of
995 		 * sfp that is present
996 		 */
997 		switch (ha->sfp_stat) {
998 		case 1:
999 			/* no sfp detected */
1000 			break;
1001 		case 2:
1002 		case 4:
1003 			/* 4GB sfp */
1004 			tmp_port.PortSupportedSpeed &=
1005 			    ~EXT_DEF_PORTSPEED_8GBIT;
1006 			break;
1007 		case 3:
1008 		case 5:
1009 			/* 8GB sfp */
1010 			tmp_port.PortSupportedSpeed &=
1011 			    ~EXT_DEF_PORTSPEED_1GBIT;
1012 			break;
1013 		default:
1014 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1015 			break;
1016 
1017 		}
1018 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
1019 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1020 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1021 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1022 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1023 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
1024 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1025 		    EXT_DEF_PORTSPEED_1GBIT);
1026 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
1027 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1028 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
1029 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1030 	} else {
1031 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1032 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1033 	}
1034 	tmp_port.LinkState2 = LSB(ha->sfp_stat);
1035 	port_cnt = 0;
1036 	tgt_cnt = 0;
1037 
1038 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1039 		for (link = ha->dev[index].first; link != NULL;
1040 		    link = link->next) {
1041 			tq = link->base_address;
1042 
1043 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1044 				continue;
1045 			}
1046 
1047 			port_cnt++;
1048 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1049 				tgt_cnt++;
1050 			}
1051 		}
1052 	}
1053 
1054 	tmp_port.DiscPortCount = port_cnt;
1055 	tmp_port.DiscTargetCount = tgt_cnt;
1056 
1057 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1058 
1059 	rval = ddi_copyout((void *)&tmp_port,
1060 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1061 	    sizeof (EXT_HBA_PORT), mode);
1062 	if (rval != 0) {
1063 		cmd->Status = EXT_STATUS_COPY_ERR;
1064 		cmd->ResponseLen = 0;
1065 		EL(ha, "failed, ddi_copyout\n");
1066 	} else {
1067 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1068 		QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1069 		    ha->instance, port_cnt, tgt_cnt);
1070 	}
1071 }
1072 
1073 /*
1074  * ql_qry_disc_port
1075  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1076  *
1077  * Input:
1078  *	ha:	adapter state pointer.
1079  *	cmd:	EXT_IOCTL cmd struct pointer.
1080  *	mode:	flags.
1081  *
1082  *	cmd->Instance = Port instance in fcport chain.
1083  *
1084  * Returns:
1085  *	None, request status indicated in cmd->Status.
1086  *
1087  * Context:
1088  *	Kernel context.
1089  */
1090 static void
1091 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1092 {
1093 	EXT_DISC_PORT	tmp_port = {0};
1094 	ql_link_t	*link;
1095 	ql_tgt_t	*tq;
1096 	uint16_t	index;
1097 	uint16_t	inst = 0;
1098 
1099 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1100 
1101 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1102 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1103 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1104 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1105 		    cmd->ResponseLen);
1106 		cmd->ResponseLen = 0;
1107 		return;
1108 	}
1109 
1110 	for (link = NULL, index = 0;
1111 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1112 		for (link = ha->dev[index].first; link != NULL;
1113 		    link = link->next) {
1114 			tq = link->base_address;
1115 
1116 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1117 				continue;
1118 			}
1119 			if (inst != cmd->Instance) {
1120 				inst++;
1121 				continue;
1122 			}
1123 
1124 			/* fill in the values */
1125 			bcopy(tq->node_name, tmp_port.WWNN,
1126 			    EXT_DEF_WWN_NAME_SIZE);
1127 			bcopy(tq->port_name, tmp_port.WWPN,
1128 			    EXT_DEF_WWN_NAME_SIZE);
1129 
1130 			break;
1131 		}
1132 	}
1133 
1134 	if (link == NULL) {
1135 		/* no matching device */
1136 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1137 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1138 		cmd->ResponseLen = 0;
1139 		return;
1140 	}
1141 
1142 	tmp_port.Id[0] = 0;
1143 	tmp_port.Id[1] = tq->d_id.b.domain;
1144 	tmp_port.Id[2] = tq->d_id.b.area;
1145 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1146 
1147 	tmp_port.Type = 0;
1148 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1149 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1150 		    EXT_DEF_INITIATOR_DEV);
1151 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1152 		(void) ql_inq_scan(ha, tq, 1);
1153 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1154 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1155 	}
1156 
1157 	if (tq->flags & TQF_FABRIC_DEVICE) {
1158 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1159 	} else {
1160 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1161 	}
1162 
1163 	tmp_port.Status = 0;
1164 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1165 
1166 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1167 
1168 	if (ddi_copyout((void *)&tmp_port,
1169 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1170 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1171 		cmd->Status = EXT_STATUS_COPY_ERR;
1172 		cmd->ResponseLen = 0;
1173 		EL(ha, "failed, ddi_copyout\n");
1174 	} else {
1175 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1176 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1177 	}
1178 }
1179 
1180 /*
1181  * ql_qry_disc_tgt
1182  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1183  *
1184  * Input:
1185  *	ha:		adapter state pointer.
1186  *	cmd:		EXT_IOCTL cmd struct pointer.
1187  *	mode:		flags.
1188  *
1189  *	cmd->Instance = Port instance in fcport chain.
1190  *
1191  * Returns:
1192  *	None, request status indicated in cmd->Status.
1193  *
1194  * Context:
1195  *	Kernel context.
1196  */
1197 static void
1198 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1199 {
1200 	EXT_DISC_TARGET	tmp_tgt = {0};
1201 	ql_link_t	*link;
1202 	ql_tgt_t	*tq;
1203 	uint16_t	index;
1204 	uint16_t	inst = 0;
1205 
1206 	QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1207 	    cmd->Instance);
1208 
1209 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1210 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1211 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1212 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1213 		    cmd->ResponseLen);
1214 		cmd->ResponseLen = 0;
1215 		return;
1216 	}
1217 
1218 	/* Scan port list for requested target and fill in the values */
1219 	for (link = NULL, index = 0;
1220 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1221 		for (link = ha->dev[index].first; link != NULL;
1222 		    link = link->next) {
1223 			tq = link->base_address;
1224 
1225 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1226 			    tq->flags & TQF_INITIATOR_DEVICE) {
1227 				continue;
1228 			}
1229 			if (inst != cmd->Instance) {
1230 				inst++;
1231 				continue;
1232 			}
1233 
1234 			/* fill in the values */
1235 			bcopy(tq->node_name, tmp_tgt.WWNN,
1236 			    EXT_DEF_WWN_NAME_SIZE);
1237 			bcopy(tq->port_name, tmp_tgt.WWPN,
1238 			    EXT_DEF_WWN_NAME_SIZE);
1239 
1240 			break;
1241 		}
1242 	}
1243 
1244 	if (link == NULL) {
1245 		/* no matching device */
1246 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1247 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1248 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1249 		cmd->ResponseLen = 0;
1250 		return;
1251 	}
1252 	tmp_tgt.Id[0] = 0;
1253 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1254 	tmp_tgt.Id[2] = tq->d_id.b.area;
1255 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1256 
1257 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1258 
1259 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1260 		(void) ql_inq_scan(ha, tq, 1);
1261 	}
1262 
1263 	tmp_tgt.Type = 0;
1264 	if (tq->flags & TQF_TAPE_DEVICE) {
1265 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1266 	}
1267 
1268 	if (tq->flags & TQF_FABRIC_DEVICE) {
1269 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1270 	} else {
1271 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1272 	}
1273 
1274 	tmp_tgt.Status = 0;
1275 
1276 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1277 
1278 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1279 
1280 	if (ddi_copyout((void *)&tmp_tgt,
1281 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1282 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1283 		cmd->Status = EXT_STATUS_COPY_ERR;
1284 		cmd->ResponseLen = 0;
1285 		EL(ha, "failed, ddi_copyout\n");
1286 	} else {
1287 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1288 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1289 	}
1290 }
1291 
1292 /*
1293  * ql_qry_fw
1294  *	Performs EXT_SC_QUERY_FW subfunction.
1295  *
1296  * Input:
1297  *	ha:	adapter state pointer.
1298  *	cmd:	EXT_IOCTL cmd struct pointer.
1299  *	mode:	flags.
1300  *
1301  * Returns:
1302  *	None, request status indicated in cmd->Status.
1303  *
1304  * Context:
1305  *	Kernel context.
1306  */
1307 static void
1308 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1309 {
1310 	ql_mbx_data_t	mr;
1311 	EXT_FW		fw_info = {0};
1312 
1313 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1314 
1315 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1316 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1317 		cmd->DetailStatus = sizeof (EXT_FW);
1318 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1319 		    cmd->ResponseLen);
1320 		cmd->ResponseLen = 0;
1321 		return;
1322 	}
1323 
1324 	(void) ql_get_fw_version(ha, &mr);
1325 
1326 	(void) sprintf((char *)(fw_info.Version), "%d.%d.%d", mr.mb[1],
1327 	    mr.mb[2], mr.mb[2]);
1328 
1329 	fw_info.Attrib = mr.mb[6];
1330 
1331 	if (ddi_copyout((void *)&fw_info,
1332 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1333 	    sizeof (EXT_FW), mode) != 0) {
1334 		cmd->Status = EXT_STATUS_COPY_ERR;
1335 		cmd->ResponseLen = 0;
1336 		EL(ha, "failed, ddi_copyout\n");
1337 		return;
1338 	} else {
1339 		cmd->ResponseLen = sizeof (EXT_FW);
1340 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1341 	}
1342 }
1343 
1344 /*
1345  * ql_qry_chip
1346  *	Performs EXT_SC_QUERY_CHIP subfunction.
1347  *
1348  * Input:
1349  *	ha:	adapter state pointer.
1350  *	cmd:	EXT_IOCTL cmd struct pointer.
1351  *	mode:	flags.
1352  *
1353  * Returns:
1354  *	None, request status indicated in cmd->Status.
1355  *
1356  * Context:
1357  *	Kernel context.
1358  */
1359 static void
1360 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1361 {
1362 	EXT_CHIP	chip = {0};
1363 
1364 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1365 
1366 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1367 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1368 		cmd->DetailStatus = sizeof (EXT_CHIP);
1369 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1370 		    cmd->ResponseLen);
1371 		cmd->ResponseLen = 0;
1372 		return;
1373 	}
1374 
1375 	chip.VendorId = ha->ven_id;
1376 	chip.DeviceId = ha->device_id;
1377 	chip.SubVendorId = ha->subven_id;
1378 	chip.SubSystemId = ha->subsys_id;
1379 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1380 	chip.IoAddrLen = 0x100;
1381 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1382 	chip.MemAddrLen = 0x100;
1383 	chip.ChipRevID = ha->rev_id;
1384 	if (ha->flags & FUNCTION_1) {
1385 		chip.FuncNo = 1;
1386 	}
1387 
1388 	if (ddi_copyout((void *)&chip,
1389 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1390 	    sizeof (EXT_CHIP), mode) != 0) {
1391 		cmd->Status = EXT_STATUS_COPY_ERR;
1392 		cmd->ResponseLen = 0;
1393 		EL(ha, "failed, ddi_copyout\n");
1394 	} else {
1395 		cmd->ResponseLen = sizeof (EXT_CHIP);
1396 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1397 	}
1398 }
1399 
1400 /*
1401  * ql_qry_driver
1402  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1403  *
1404  * Input:
1405  *	ha:	adapter state pointer.
1406  *	cmd:	EXT_IOCTL cmd struct pointer.
1407  *	mode:	flags.
1408  *
1409  * Returns:
1410  *	None, request status indicated in cmd->Status.
1411  *
1412  * Context:
1413  *	Kernel context.
1414  */
1415 static void
1416 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1417 {
1418 	EXT_DRIVER	qd = {0};
1419 
1420 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1421 
1422 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1423 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1424 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1425 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1426 		    cmd->ResponseLen);
1427 		cmd->ResponseLen = 0;
1428 		return;
1429 	}
1430 
1431 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1432 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1433 	qd.TargetsPerBus = (uint16_t)
1434 	    (CFG_IST(ha, (CFG_CTRL_242581 | CFG_EXT_FW_INTERFACE)) ?
1435 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1436 	qd.LunsPerTarget = 2030;
1437 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1438 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1439 
1440 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1441 	    sizeof (EXT_DRIVER), mode) != 0) {
1442 		cmd->Status = EXT_STATUS_COPY_ERR;
1443 		cmd->ResponseLen = 0;
1444 		EL(ha, "failed, ddi_copyout\n");
1445 	} else {
1446 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1447 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1448 	}
1449 }
1450 
1451 /*
1452  * ql_fcct
1453  *	IOCTL management server FC-CT passthrough.
1454  *
1455  * Input:
1456  *	ha:	adapter state pointer.
1457  *	cmd:	User space CT arguments pointer.
1458  *	mode:	flags.
1459  *
1460  * Returns:
1461  *	None, request status indicated in cmd->Status.
1462  *
1463  * Context:
1464  *	Kernel context.
1465  */
1466 static void
1467 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1468 {
1469 	ql_mbx_iocb_t		*pkt;
1470 	ql_mbx_data_t		mr;
1471 	dma_mem_t		*dma_mem;
1472 	caddr_t			pld;
1473 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1474 	int			rval;
1475 	ql_ct_iu_preamble_t	*ct;
1476 	ql_xioctl_t		*xp = ha->xioctl;
1477 	ql_tgt_t		tq;
1478 	uint16_t		comp_status, loop_id;
1479 
1480 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1481 
1482 	/* Get CT argument structure. */
1483 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1484 		EL(ha, "failed, No switch\n");
1485 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1486 		cmd->ResponseLen = 0;
1487 		return;
1488 	}
1489 
1490 	if (DRIVER_SUSPENDED(ha)) {
1491 		EL(ha, "failed, LOOP_NOT_READY\n");
1492 		cmd->Status = EXT_STATUS_BUSY;
1493 		cmd->ResponseLen = 0;
1494 		return;
1495 	}
1496 
1497 	/* Login management server device. */
1498 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1499 		tq.d_id.b.al_pa = 0xfa;
1500 		tq.d_id.b.area = 0xff;
1501 		tq.d_id.b.domain = 0xff;
1502 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1503 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1504 		    MANAGEMENT_SERVER_LOOP_ID);
1505 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1506 		if (rval != QL_SUCCESS) {
1507 			EL(ha, "failed, server login\n");
1508 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1509 			cmd->ResponseLen = 0;
1510 			return;
1511 		} else {
1512 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1513 		}
1514 	}
1515 
1516 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1517 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1518 
1519 	/* Allocate a DMA Memory Descriptor */
1520 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1521 	if (dma_mem == NULL) {
1522 		EL(ha, "failed, kmem_zalloc\n");
1523 		cmd->Status = EXT_STATUS_NO_MEMORY;
1524 		cmd->ResponseLen = 0;
1525 		return;
1526 	}
1527 	/* Determine maximum buffer size. */
1528 	if (cmd->RequestLen < cmd->ResponseLen) {
1529 		pld_byte_cnt = cmd->ResponseLen;
1530 	} else {
1531 		pld_byte_cnt = cmd->RequestLen;
1532 	}
1533 
1534 	/* Allocate command block. */
1535 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1536 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1537 	if (pkt == NULL) {
1538 		EL(ha, "failed, kmem_zalloc\n");
1539 		cmd->Status = EXT_STATUS_NO_MEMORY;
1540 		cmd->ResponseLen = 0;
1541 		return;
1542 	}
1543 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1544 
1545 	/* Get command payload data. */
1546 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1547 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1548 		EL(ha, "failed, get_buffer_data\n");
1549 		kmem_free(pkt, pkt_size);
1550 		cmd->Status = EXT_STATUS_COPY_ERR;
1551 		cmd->ResponseLen = 0;
1552 		return;
1553 	}
1554 
1555 	/* Get DMA memory for the IOCB */
1556 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1557 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1558 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1559 		    "alloc failed", QL_NAME, ha->instance);
1560 		kmem_free(pkt, pkt_size);
1561 		kmem_free(dma_mem, sizeof (dma_mem_t));
1562 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1563 		cmd->ResponseLen = 0;
1564 		return;
1565 	}
1566 
1567 	/* Copy out going payload data to IOCB DMA buffer. */
1568 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1569 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1570 
1571 	/* Sync IOCB DMA buffer. */
1572 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1573 	    DDI_DMA_SYNC_FORDEV);
1574 
1575 	/*
1576 	 * Setup IOCB
1577 	 */
1578 	ct = (ql_ct_iu_preamble_t *)pld;
1579 	if (CFG_IST(ha, CFG_CTRL_242581)) {
1580 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1581 		pkt->ms24.entry_count = 1;
1582 
1583 		/* Set loop ID */
1584 		pkt->ms24.n_port_hdl = (uint16_t)
1585 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1586 		    LE_16(SNS_24XX_HDL) :
1587 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1588 
1589 		/* Set ISP command timeout. */
1590 		pkt->ms24.timeout = LE_16(120);
1591 
1592 		/* Set cmd/response data segment counts. */
1593 		pkt->ms24.cmd_dseg_count = LE_16(1);
1594 		pkt->ms24.resp_dseg_count = LE_16(1);
1595 
1596 		/* Load ct cmd byte count. */
1597 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1598 
1599 		/* Load ct rsp byte count. */
1600 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1601 
1602 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1603 
1604 		/* Load MS command entry data segments. */
1605 		*long_ptr++ = (uint32_t)
1606 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1607 		*long_ptr++ = (uint32_t)
1608 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1609 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1610 
1611 		/* Load MS response entry data segments. */
1612 		*long_ptr++ = (uint32_t)
1613 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1614 		*long_ptr++ = (uint32_t)
1615 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1616 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1617 
1618 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1619 		    sizeof (ql_mbx_iocb_t));
1620 
1621 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1622 		if (comp_status == CS_DATA_UNDERRUN) {
1623 			if ((BE_16(ct->max_residual_size)) == 0) {
1624 				comp_status = CS_COMPLETE;
1625 			}
1626 		}
1627 
1628 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1629 		    0) {
1630 			EL(ha, "failed, I/O timeout or "
1631 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1632 			    pkt->sts24.entry_status,
1633 			    pkt->sts24.scsi_status_l, rval);
1634 			kmem_free(pkt, pkt_size);
1635 			ql_free_dma_resource(ha, dma_mem);
1636 			kmem_free(dma_mem, sizeof (dma_mem_t));
1637 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1638 			cmd->ResponseLen = 0;
1639 			return;
1640 		}
1641 	} else {
1642 		pkt->ms.entry_type = MS_TYPE;
1643 		pkt->ms.entry_count = 1;
1644 
1645 		/* Set loop ID */
1646 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1647 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1648 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1649 			pkt->ms.loop_id_l = LSB(loop_id);
1650 			pkt->ms.loop_id_h = MSB(loop_id);
1651 		} else {
1652 			pkt->ms.loop_id_h = LSB(loop_id);
1653 		}
1654 
1655 		/* Set ISP command timeout. */
1656 		pkt->ms.timeout = LE_16(120);
1657 
1658 		/* Set data segment counts. */
1659 		pkt->ms.cmd_dseg_count_l = 1;
1660 		pkt->ms.total_dseg_count = LE_16(2);
1661 
1662 		/* Response total byte count. */
1663 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1664 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1665 
1666 		/* Command total byte count. */
1667 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1668 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1669 
1670 		/* Load command/response data segments. */
1671 		pkt->ms.dseg_0_address[0] = (uint32_t)
1672 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1673 		pkt->ms.dseg_0_address[1] = (uint32_t)
1674 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1675 		pkt->ms.dseg_1_address[0] = (uint32_t)
1676 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1677 		pkt->ms.dseg_1_address[1] = (uint32_t)
1678 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1679 
1680 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1681 		    sizeof (ql_mbx_iocb_t));
1682 
1683 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1684 		if (comp_status == CS_DATA_UNDERRUN) {
1685 			if ((BE_16(ct->max_residual_size)) == 0) {
1686 				comp_status = CS_COMPLETE;
1687 			}
1688 		}
1689 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1690 			EL(ha, "failed, I/O timeout or "
1691 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1692 			kmem_free(pkt, pkt_size);
1693 			ql_free_dma_resource(ha, dma_mem);
1694 			kmem_free(dma_mem, sizeof (dma_mem_t));
1695 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1696 			cmd->ResponseLen = 0;
1697 			return;
1698 		}
1699 	}
1700 
1701 	/* Sync in coming DMA buffer. */
1702 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1703 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1704 	/* Copy in coming DMA data. */
1705 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1706 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1707 	    DDI_DEV_AUTOINCR);
1708 
1709 	/* Copy response payload from DMA buffer to application. */
1710 	if (cmd->ResponseLen != 0) {
1711 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1712 		    cmd->ResponseLen);
1713 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1714 
1715 		/* Send response payload. */
1716 		if (ql_send_buffer_data(pld,
1717 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1718 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1719 			EL(ha, "failed, send_buffer_data\n");
1720 			cmd->Status = EXT_STATUS_COPY_ERR;
1721 			cmd->ResponseLen = 0;
1722 		}
1723 	}
1724 
1725 	kmem_free(pkt, pkt_size);
1726 	ql_free_dma_resource(ha, dma_mem);
1727 	kmem_free(dma_mem, sizeof (dma_mem_t));
1728 
1729 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1730 }
1731 
1732 /*
1733  * ql_aen_reg
1734  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1735  *
1736  * Input:
1737  *	ha:	adapter state pointer.
1738  *	cmd:	EXT_IOCTL cmd struct pointer.
1739  *	mode:	flags.
1740  *
1741  * Returns:
1742  *	None, request status indicated in cmd->Status.
1743  *
1744  * Context:
1745  *	Kernel context.
1746  */
1747 static void
1748 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1749 {
1750 	EXT_REG_AEN	reg_struct;
1751 	int		rval = 0;
1752 	ql_xioctl_t	*xp = ha->xioctl;
1753 
1754 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1755 
1756 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &reg_struct,
1757 	    cmd->RequestLen, mode);
1758 
1759 	if (rval == 0) {
1760 		if (reg_struct.Enable) {
1761 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1762 		} else {
1763 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1764 			/* Empty the queue. */
1765 			INTR_LOCK(ha);
1766 			xp->aen_q_head = 0;
1767 			xp->aen_q_tail = 0;
1768 			INTR_UNLOCK(ha);
1769 		}
1770 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1771 	} else {
1772 		cmd->Status = EXT_STATUS_COPY_ERR;
1773 		EL(ha, "failed, ddi_copyin\n");
1774 	}
1775 }
1776 
1777 /*
1778  * ql_aen_get
1779  *	IOCTL management server Asynchronous Event Record Transfer.
1780  *
1781  * Input:
1782  *	ha:	adapter state pointer.
1783  *	cmd:	EXT_IOCTL cmd struct pointer.
1784  *	mode:	flags.
1785  *
1786  * Returns:
1787  *	None, request status indicated in cmd->Status.
1788  *
1789  * Context:
1790  *	Kernel context.
1791  */
1792 static void
1793 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1794 {
1795 	uint32_t	out_size;
1796 	EXT_ASYNC_EVENT	*tmp_q;
1797 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1798 	uint8_t		i;
1799 	uint8_t		queue_cnt;
1800 	uint8_t		request_cnt;
1801 	ql_xioctl_t	*xp = ha->xioctl;
1802 
1803 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1804 
1805 	/* Compute the number of events that can be returned */
1806 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1807 
1808 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1809 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1810 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1811 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1812 		    "Len=%xh\n", request_cnt);
1813 		cmd->ResponseLen = 0;
1814 		return;
1815 	}
1816 
1817 	/* 1st: Make a local copy of the entire queue content. */
1818 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1819 	queue_cnt = 0;
1820 
1821 	INTR_LOCK(ha);
1822 	i = xp->aen_q_head;
1823 
1824 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1825 		if (tmp_q[i].AsyncEventCode != 0) {
1826 			bcopy(&tmp_q[i], &aen[queue_cnt],
1827 			    sizeof (EXT_ASYNC_EVENT));
1828 			queue_cnt++;
1829 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1830 		}
1831 		if (i == xp->aen_q_tail) {
1832 			/* done. */
1833 			break;
1834 		}
1835 		i++;
1836 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1837 			i = 0;
1838 		}
1839 	}
1840 
1841 	/* Empty the queue. */
1842 	xp->aen_q_head = 0;
1843 	xp->aen_q_tail = 0;
1844 
1845 	INTR_UNLOCK(ha);
1846 
1847 	/* 2nd: Now transfer the queue content to user buffer */
1848 	/* Copy the entire queue to user's buffer. */
1849 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1850 	if (queue_cnt == 0) {
1851 		cmd->ResponseLen = 0;
1852 	} else if (ddi_copyout((void *)&aen[0],
1853 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1854 	    out_size, mode) != 0) {
1855 		cmd->Status = EXT_STATUS_COPY_ERR;
1856 		cmd->ResponseLen = 0;
1857 		EL(ha, "failed, ddi_copyout\n");
1858 	} else {
1859 		cmd->ResponseLen = out_size;
1860 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1861 	}
1862 }
1863 
1864 /*
1865  * ql_enqueue_aen
1866  *
1867  * Input:
1868  *	ha:		adapter state pointer.
1869  *	event_code:	async event code of the event to add to queue.
1870  *	payload:	event payload for the queue.
1871  *	INTR_LOCK must be already obtained.
1872  *
1873  * Context:
1874  *	Interrupt or Kernel context, no mailbox commands allowed.
1875  */
1876 void
1877 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1878 {
1879 	uint8_t			new_entry;	/* index to current entry */
1880 	uint16_t		*mbx;
1881 	EXT_ASYNC_EVENT		*aen_queue;
1882 	ql_xioctl_t		*xp = ha->xioctl;
1883 
1884 	QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1885 	    event_code);
1886 
1887 	if (xp == NULL) {
1888 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1889 		return;
1890 	}
1891 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1892 
1893 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1894 		/* Need to change queue pointers to make room. */
1895 
1896 		/* Increment tail for adding new entry. */
1897 		xp->aen_q_tail++;
1898 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1899 			xp->aen_q_tail = 0;
1900 		}
1901 		if (xp->aen_q_head == xp->aen_q_tail) {
1902 			/*
1903 			 * We're overwriting the oldest entry, so need to
1904 			 * update the head pointer.
1905 			 */
1906 			xp->aen_q_head++;
1907 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1908 				xp->aen_q_head = 0;
1909 			}
1910 		}
1911 	}
1912 
1913 	new_entry = xp->aen_q_tail;
1914 	aen_queue[new_entry].AsyncEventCode = event_code;
1915 
1916 	/* Update payload */
1917 	if (payload != NULL) {
1918 		switch (event_code) {
1919 		case MBA_LIP_OCCURRED:
1920 		case MBA_LOOP_UP:
1921 		case MBA_LOOP_DOWN:
1922 		case MBA_LIP_F8:
1923 		case MBA_LIP_RESET:
1924 		case MBA_PORT_UPDATE:
1925 			break;
1926 		case MBA_RSCN_UPDATE:
1927 			mbx = (uint16_t *)payload;
1928 			/* al_pa */
1929 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1930 			    LSB(mbx[2]);
1931 			/* area */
1932 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1933 			    MSB(mbx[2]);
1934 			/* domain */
1935 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1936 			    LSB(mbx[1]);
1937 			/* save in big endian */
1938 			BIG_ENDIAN_24(&aen_queue[new_entry].
1939 			    Payload.RSCN.RSCNInfo[0]);
1940 
1941 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1942 			    MSB(mbx[1]);
1943 
1944 			break;
1945 		default:
1946 			/* Not supported */
1947 			EL(ha, "failed, event code not supported=%xh\n",
1948 			    event_code);
1949 			aen_queue[new_entry].AsyncEventCode = 0;
1950 			break;
1951 		}
1952 	}
1953 
1954 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1955 }
1956 
1957 /*
1958  * ql_scsi_passthru
1959  *	IOCTL SCSI passthrough.
1960  *
1961  * Input:
1962  *	ha:	adapter state pointer.
1963  *	cmd:	User space SCSI command pointer.
1964  *	mode:	flags.
1965  *
1966  * Returns:
1967  *	None, request status indicated in cmd->Status.
1968  *
1969  * Context:
1970  *	Kernel context.
1971  */
1972 static void
1973 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1974 {
1975 	ql_mbx_iocb_t		*pkt;
1976 	ql_mbx_data_t		mr;
1977 	dma_mem_t		*dma_mem;
1978 	caddr_t			pld;
1979 	uint32_t		pkt_size, pld_size;
1980 	uint16_t		qlnt, retries, cnt, cnt2;
1981 	uint8_t			*name;
1982 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1983 	EXT_SCSI_PASSTHRU	*usp_req;
1984 	int			rval;
1985 	union _passthru {
1986 		EXT_SCSI_PASSTHRU	sp_cmd;
1987 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
1988 	} pt_req;		/* Passthru request */
1989 	uint32_t		status, sense_sz = 0;
1990 	ql_tgt_t		*tq = NULL;
1991 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
1992 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
1993 
1994 	/* SCSI request struct for SCSI passthrough IOs. */
1995 	struct {
1996 		uint16_t	lun;
1997 		uint16_t	sense_length;	/* Sense buffer size */
1998 		size_t		resid;		/* Residual */
1999 		uint8_t		*cdbp;		/* Requestor's CDB */
2000 		uint8_t		*u_sense;	/* Requestor's sense buffer */
2001 		uint8_t		cdb_len;	/* Requestor's CDB length */
2002 		uint8_t		direction;
2003 	} scsi_req;
2004 
2005 	struct {
2006 		uint8_t		*rsp_info;
2007 		uint8_t		*req_sense_data;
2008 		uint32_t	residual_length;
2009 		uint32_t	rsp_info_length;
2010 		uint32_t	req_sense_length;
2011 		uint16_t	comp_status;
2012 		uint8_t		state_flags_l;
2013 		uint8_t		state_flags_h;
2014 		uint8_t		scsi_status_l;
2015 		uint8_t		scsi_status_h;
2016 	} sts;
2017 
2018 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2019 
2020 	/* Verify Sub Code and set cnt to needed request size. */
2021 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2022 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
2023 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2024 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2025 	} else {
2026 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2027 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2028 		cmd->ResponseLen = 0;
2029 		return;
2030 	}
2031 
2032 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2033 	if (dma_mem == NULL) {
2034 		EL(ha, "failed, kmem_zalloc\n");
2035 		cmd->Status = EXT_STATUS_NO_MEMORY;
2036 		cmd->ResponseLen = 0;
2037 		return;
2038 	}
2039 	/*  Verify the size of and copy in the passthru request structure. */
2040 	if (cmd->RequestLen != pld_size) {
2041 		/* Return error */
2042 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2043 		    cmd->RequestLen, pld_size);
2044 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2045 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2046 		cmd->ResponseLen = 0;
2047 		return;
2048 	}
2049 
2050 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2051 	    pld_size, mode) != 0) {
2052 		EL(ha, "failed, ddi_copyin\n");
2053 		cmd->Status = EXT_STATUS_COPY_ERR;
2054 		cmd->ResponseLen = 0;
2055 		return;
2056 	}
2057 
2058 	/*
2059 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2060 	 * request data structure.
2061 	 */
2062 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2063 		scsi_req.lun = sp_req->TargetAddr.Lun;
2064 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2065 		scsi_req.cdbp = &sp_req->Cdb[0];
2066 		scsi_req.cdb_len = sp_req->CdbLength;
2067 		scsi_req.direction = sp_req->Direction;
2068 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2069 		scsi_req.u_sense = &usp_req->SenseData[0];
2070 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2071 
2072 		qlnt = QLNT_PORT;
2073 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2074 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2075 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2076 		tq = ql_find_port(ha, name, qlnt);
2077 	} else {
2078 		/*
2079 		 * Must be FC PASSTHRU, verified above.
2080 		 */
2081 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2082 			qlnt = QLNT_PORT;
2083 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2084 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2085 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2086 			    ha->instance, cmd->SubCode, name[0], name[1],
2087 			    name[2], name[3], name[4], name[5], name[6],
2088 			    name[7]);
2089 			tq = ql_find_port(ha, name, qlnt);
2090 		} else if (fc_req->FCScsiAddr.DestType ==
2091 		    EXT_DEF_DESTTYPE_WWNN) {
2092 			qlnt = QLNT_NODE;
2093 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2094 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2095 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2096 			    ha->instance, cmd->SubCode, name[0], name[1],
2097 			    name[2], name[3], name[4], name[5], name[6],
2098 			    name[7]);
2099 			tq = ql_find_port(ha, name, qlnt);
2100 		} else if (fc_req->FCScsiAddr.DestType ==
2101 		    EXT_DEF_DESTTYPE_PORTID) {
2102 			qlnt = QLNT_PID;
2103 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2104 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2105 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2106 			    name[0], name[1], name[2]);
2107 			tq = ql_find_port(ha, name, qlnt);
2108 		} else {
2109 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2110 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2111 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2112 			cmd->ResponseLen = 0;
2113 			return;
2114 		}
2115 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2116 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2117 		scsi_req.cdbp = &sp_req->Cdb[0];
2118 		scsi_req.cdb_len = sp_req->CdbLength;
2119 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2120 		scsi_req.u_sense = &ufc_req->SenseData[0];
2121 		scsi_req.direction = fc_req->Direction;
2122 	}
2123 
2124 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2125 		EL(ha, "failed, fc_port not found\n");
2126 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2127 		cmd->ResponseLen = 0;
2128 		return;
2129 	}
2130 
2131 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2132 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2133 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2134 		cmd->ResponseLen = 0;
2135 		return;
2136 	}
2137 
2138 	/* Allocate command block. */
2139 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2140 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2141 	    cmd->ResponseLen) {
2142 		pld_size = cmd->ResponseLen;
2143 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2144 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2145 		if (pkt == NULL) {
2146 			EL(ha, "failed, kmem_zalloc\n");
2147 			cmd->Status = EXT_STATUS_NO_MEMORY;
2148 			cmd->ResponseLen = 0;
2149 			return;
2150 		}
2151 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2152 
2153 		/* Get DMA memory for the IOCB */
2154 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2155 		    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2156 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2157 			    "alloc failed", QL_NAME, ha->instance);
2158 			kmem_free(pkt, pkt_size);
2159 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2160 			cmd->ResponseLen = 0;
2161 			return;
2162 		}
2163 
2164 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2165 			scsi_req.direction = (uint8_t)
2166 			    (CFG_IST(ha, CFG_CTRL_242581) ?
2167 			    CF_RD : CF_DATA_IN | CF_STAG);
2168 		} else {
2169 			scsi_req.direction = (uint8_t)
2170 			    (CFG_IST(ha, CFG_CTRL_242581) ?
2171 			    CF_WR : CF_DATA_OUT | CF_STAG);
2172 			cmd->ResponseLen = 0;
2173 
2174 			/* Get command payload. */
2175 			if (ql_get_buffer_data(
2176 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2177 			    pld, pld_size, mode) != pld_size) {
2178 				EL(ha, "failed, get_buffer_data\n");
2179 				cmd->Status = EXT_STATUS_COPY_ERR;
2180 
2181 				kmem_free(pkt, pkt_size);
2182 				ql_free_dma_resource(ha, dma_mem);
2183 				kmem_free(dma_mem, sizeof (dma_mem_t));
2184 				return;
2185 			}
2186 
2187 			/* Copy out going data to DMA buffer. */
2188 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2189 			    (uint8_t *)dma_mem->bp, pld_size,
2190 			    DDI_DEV_AUTOINCR);
2191 
2192 			/* Sync DMA buffer. */
2193 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2194 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2195 		}
2196 	} else {
2197 		scsi_req.direction = (uint8_t)
2198 		    (CFG_IST(ha, CFG_CTRL_242581) ? 0 : CF_STAG);
2199 		cmd->ResponseLen = 0;
2200 
2201 		pkt_size = sizeof (ql_mbx_iocb_t);
2202 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2203 		if (pkt == NULL) {
2204 			EL(ha, "failed, kmem_zalloc-2\n");
2205 			cmd->Status = EXT_STATUS_NO_MEMORY;
2206 			return;
2207 		}
2208 		pld = NULL;
2209 		pld_size = 0;
2210 	}
2211 
2212 	/* retries = ha->port_down_retry_count; */
2213 	retries = 1;
2214 	cmd->Status = EXT_STATUS_OK;
2215 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2216 
2217 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2218 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2219 
2220 	do {
2221 		if (DRIVER_SUSPENDED(ha)) {
2222 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2223 			break;
2224 		}
2225 
2226 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2227 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2228 			pkt->cmd24.entry_count = 1;
2229 
2230 			/* Set LUN number */
2231 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2232 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2233 
2234 			/* Set N_port handle */
2235 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2236 
2237 			/* Set VP Index */
2238 			pkt->cmd24.vp_index = ha->vp_index;
2239 
2240 			/* Set target ID */
2241 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2242 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2243 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2244 
2245 			/* Set ISP command timeout. */
2246 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2247 
2248 			/* Load SCSI CDB */
2249 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2250 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2251 			    DDI_DEV_AUTOINCR);
2252 			for (cnt = 0; cnt < MAX_CMDSZ;
2253 			    cnt = (uint16_t)(cnt + 4)) {
2254 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2255 				    + cnt, 4);
2256 			}
2257 
2258 			/* Set tag queue control flags */
2259 			pkt->cmd24.task = TA_STAG;
2260 
2261 			if (pld_size) {
2262 				/* Set transfer direction. */
2263 				pkt->cmd24.control_flags = scsi_req.direction;
2264 
2265 				/* Set data segment count. */
2266 				pkt->cmd24.dseg_count = LE_16(1);
2267 
2268 				/* Load total byte count. */
2269 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2270 
2271 				/* Load data descriptor. */
2272 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2273 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2274 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2275 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2276 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2277 			}
2278 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2279 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2280 			pkt->cmd3.entry_count = 1;
2281 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2282 				pkt->cmd3.target_l = LSB(tq->loop_id);
2283 				pkt->cmd3.target_h = MSB(tq->loop_id);
2284 			} else {
2285 				pkt->cmd3.target_h = LSB(tq->loop_id);
2286 			}
2287 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2288 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2289 			pkt->cmd3.control_flags_l = scsi_req.direction;
2290 			pkt->cmd3.timeout = LE_16(15);
2291 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2292 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2293 			}
2294 			if (pld_size) {
2295 				pkt->cmd3.dseg_count = LE_16(1);
2296 				pkt->cmd3.byte_count = LE_32(pld_size);
2297 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2298 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2299 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2300 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2301 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2302 			}
2303 		} else {
2304 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2305 			pkt->cmd.entry_count = 1;
2306 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2307 				pkt->cmd.target_l = LSB(tq->loop_id);
2308 				pkt->cmd.target_h = MSB(tq->loop_id);
2309 			} else {
2310 				pkt->cmd.target_h = LSB(tq->loop_id);
2311 			}
2312 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2313 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2314 			pkt->cmd.control_flags_l = scsi_req.direction;
2315 			pkt->cmd.timeout = LE_16(15);
2316 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2317 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2318 			}
2319 			if (pld_size) {
2320 				pkt->cmd.dseg_count = LE_16(1);
2321 				pkt->cmd.byte_count = LE_32(pld_size);
2322 				pkt->cmd.dseg_0_address = (uint32_t)
2323 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2324 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2325 			}
2326 		}
2327 		/* Go issue command and wait for completion. */
2328 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2329 		QL_DUMP_9(pkt, 8, pkt_size);
2330 
2331 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2332 
2333 		if (pld_size) {
2334 			/* Sync in coming DMA buffer. */
2335 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2336 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2337 			/* Copy in coming DMA data. */
2338 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2339 			    (uint8_t *)dma_mem->bp, pld_size,
2340 			    DDI_DEV_AUTOINCR);
2341 		}
2342 
2343 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2344 			pkt->sts24.entry_status = (uint8_t)
2345 			    (pkt->sts24.entry_status & 0x3c);
2346 		} else {
2347 			pkt->sts.entry_status = (uint8_t)
2348 			    (pkt->sts.entry_status & 0x7e);
2349 		}
2350 
2351 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2352 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2353 			    pkt->sts.entry_status, tq->d_id.b24);
2354 			status = QL_FUNCTION_PARAMETER_ERROR;
2355 		}
2356 
2357 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2358 		    LE_16(pkt->sts24.comp_status) :
2359 		    LE_16(pkt->sts.comp_status));
2360 
2361 		/*
2362 		 * We have verified about all the request that can be so far.
2363 		 * Now we need to start verification of our ability to
2364 		 * actually issue the CDB.
2365 		 */
2366 		if (DRIVER_SUSPENDED(ha)) {
2367 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2368 			break;
2369 		} else if (status == QL_SUCCESS &&
2370 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2371 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2372 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2373 			if (tq->flags & TQF_FABRIC_DEVICE) {
2374 				rval = ql_login_fport(ha, tq, tq->loop_id,
2375 				    LFF_NO_PLOGI, &mr);
2376 				if (rval != QL_SUCCESS) {
2377 					EL(ha, "failed, login_fport=%xh, "
2378 					    "d_id=%xh\n", rval, tq->d_id.b24);
2379 				}
2380 			} else {
2381 				rval = ql_login_lport(ha, tq, tq->loop_id,
2382 				    LLF_NONE);
2383 				if (rval != QL_SUCCESS) {
2384 					EL(ha, "failed, login_lport=%xh, "
2385 					    "d_id=%xh\n", rval, tq->d_id.b24);
2386 				}
2387 			}
2388 		} else {
2389 			break;
2390 		}
2391 
2392 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2393 
2394 	} while (retries--);
2395 
2396 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2397 		/* Cannot issue command now, maybe later */
2398 		EL(ha, "failed, suspended\n");
2399 		kmem_free(pkt, pkt_size);
2400 		ql_free_dma_resource(ha, dma_mem);
2401 		kmem_free(dma_mem, sizeof (dma_mem_t));
2402 		cmd->Status = EXT_STATUS_SUSPENDED;
2403 		cmd->ResponseLen = 0;
2404 		return;
2405 	}
2406 
2407 	if (status != QL_SUCCESS) {
2408 		/* Command error */
2409 		EL(ha, "failed, I/O\n");
2410 		kmem_free(pkt, pkt_size);
2411 		ql_free_dma_resource(ha, dma_mem);
2412 		kmem_free(dma_mem, sizeof (dma_mem_t));
2413 		cmd->Status = EXT_STATUS_ERR;
2414 		cmd->DetailStatus = status;
2415 		cmd->ResponseLen = 0;
2416 		return;
2417 	}
2418 
2419 	/* Setup status. */
2420 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2421 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2422 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2423 
2424 		/* Setup residuals. */
2425 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2426 
2427 		/* Setup state flags. */
2428 		sts.state_flags_l = pkt->sts24.state_flags_l;
2429 		sts.state_flags_h = pkt->sts24.state_flags_h;
2430 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2431 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2432 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2433 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2434 		} else {
2435 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2436 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2437 			    SF_GOT_STATUS);
2438 		}
2439 		if (scsi_req.direction & CF_WR) {
2440 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2441 			    SF_DATA_OUT);
2442 		} else if (scsi_req.direction & CF_RD) {
2443 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2444 			    SF_DATA_IN);
2445 		}
2446 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2447 
2448 		/* Setup FCP response info. */
2449 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2450 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2451 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2452 		for (cnt = 0; cnt < sts.rsp_info_length;
2453 		    cnt = (uint16_t)(cnt + 4)) {
2454 			ql_chg_endian(sts.rsp_info + cnt, 4);
2455 		}
2456 
2457 		/* Setup sense data. */
2458 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2459 			sts.req_sense_length =
2460 			    LE_32(pkt->sts24.fcp_sense_length);
2461 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2462 			    SF_ARQ_DONE);
2463 		} else {
2464 			sts.req_sense_length = 0;
2465 		}
2466 		sts.req_sense_data =
2467 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2468 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2469 		    (uintptr_t)sts.req_sense_data);
2470 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2471 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2472 		}
2473 	} else {
2474 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2475 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2476 
2477 		/* Setup residuals. */
2478 		sts.residual_length = LE_32(pkt->sts.residual_length);
2479 
2480 		/* Setup state flags. */
2481 		sts.state_flags_l = pkt->sts.state_flags_l;
2482 		sts.state_flags_h = pkt->sts.state_flags_h;
2483 
2484 		/* Setup FCP response info. */
2485 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2486 		    LE_16(pkt->sts.rsp_info_length) : 0;
2487 		sts.rsp_info = &pkt->sts.rsp_info[0];
2488 
2489 		/* Setup sense data. */
2490 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2491 		    LE_16(pkt->sts.req_sense_length) : 0;
2492 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2493 	}
2494 
2495 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2496 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2497 
2498 	switch (sts.comp_status) {
2499 	case CS_INCOMPLETE:
2500 	case CS_ABORTED:
2501 	case CS_DEVICE_UNAVAILABLE:
2502 	case CS_PORT_UNAVAILABLE:
2503 	case CS_PORT_LOGGED_OUT:
2504 	case CS_PORT_CONFIG_CHG:
2505 	case CS_PORT_BUSY:
2506 	case CS_LOOP_DOWN_ABORT:
2507 		cmd->Status = EXT_STATUS_BUSY;
2508 		break;
2509 	case CS_RESET:
2510 	case CS_QUEUE_FULL:
2511 		cmd->Status = EXT_STATUS_ERR;
2512 		break;
2513 	case CS_TIMEOUT:
2514 		cmd->Status = EXT_STATUS_ERR;
2515 		break;
2516 	case CS_DATA_OVERRUN:
2517 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2518 		break;
2519 	case CS_DATA_UNDERRUN:
2520 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2521 		break;
2522 	}
2523 
2524 	/*
2525 	 * If non data transfer commands fix tranfer counts.
2526 	 */
2527 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2528 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2529 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2530 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2531 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2532 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2533 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2534 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2535 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2536 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2537 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2538 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2539 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2540 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2541 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2542 		/*
2543 		 * Non data transfer command, clear sts_entry residual
2544 		 * length.
2545 		 */
2546 		sts.residual_length = 0;
2547 		cmd->ResponseLen = 0;
2548 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2549 			sts.comp_status = CS_COMPLETE;
2550 			cmd->Status = EXT_STATUS_OK;
2551 		}
2552 	} else {
2553 		cmd->ResponseLen = pld_size;
2554 	}
2555 
2556 	/* Correct ISP completion status */
2557 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2558 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2559 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2560 		    ha->instance);
2561 		scsi_req.resid = 0;
2562 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2563 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2564 		    ha->instance);
2565 		scsi_req.resid = sts.residual_length;
2566 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2567 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2568 
2569 			cmd->ResponseLen = (uint32_t)
2570 			    (pld_size - scsi_req.resid);
2571 		} else {
2572 			EL(ha, "failed, Transfer ERROR\n");
2573 			cmd->Status = EXT_STATUS_ERR;
2574 			cmd->ResponseLen = 0;
2575 		}
2576 	} else {
2577 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2578 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2579 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2580 		    sts.scsi_status_l);
2581 
2582 		scsi_req.resid = pld_size;
2583 		/*
2584 		 * Handle residual count on SCSI check
2585 		 * condition.
2586 		 *
2587 		 * - If Residual Under / Over is set, use the
2588 		 *   Residual Transfer Length field in IOCB.
2589 		 * - If Residual Under / Over is not set, and
2590 		 *   Transferred Data bit is set in State Flags
2591 		 *   field of IOCB, report residual value of 0
2592 		 *   (you may want to do this for tape
2593 		 *   Write-type commands only). This takes care
2594 		 *   of logical end of tape problem and does
2595 		 *   not break Unit Attention.
2596 		 * - If Residual Under / Over is not set, and
2597 		 *   Transferred Data bit is not set in State
2598 		 *   Flags, report residual value equal to
2599 		 *   original data transfer length.
2600 		 */
2601 		if (sts.scsi_status_l & STATUS_CHECK) {
2602 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2603 			cmd->DetailStatus = sts.scsi_status_l;
2604 			if (sts.scsi_status_h &
2605 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2606 				scsi_req.resid = sts.residual_length;
2607 			} else if (sts.state_flags_h &
2608 			    STATE_XFERRED_DATA) {
2609 				scsi_req.resid = 0;
2610 			}
2611 		}
2612 	}
2613 
2614 	if (sts.scsi_status_l & STATUS_CHECK &&
2615 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2616 	    sts.req_sense_length) {
2617 		/*
2618 		 * Check condition with vaild sense data flag set and sense
2619 		 * length != 0
2620 		 */
2621 		if (sts.req_sense_length > scsi_req.sense_length) {
2622 			sense_sz = scsi_req.sense_length;
2623 		} else {
2624 			sense_sz = sts.req_sense_length;
2625 		}
2626 
2627 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2628 		    tq->d_id.b24);
2629 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2630 
2631 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2632 		    (size_t)sense_sz, mode) != 0) {
2633 			EL(ha, "failed, request sense ddi_copyout\n");
2634 		}
2635 
2636 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2637 		cmd->DetailStatus = sts.scsi_status_l;
2638 	}
2639 
2640 	/* Copy response payload from DMA buffer to application. */
2641 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2642 	    cmd->ResponseLen != 0) {
2643 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2644 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2645 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2646 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2647 
2648 		/* Send response payload. */
2649 		if (ql_send_buffer_data(pld,
2650 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2651 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2652 			EL(ha, "failed, send_buffer_data\n");
2653 			cmd->Status = EXT_STATUS_COPY_ERR;
2654 			cmd->ResponseLen = 0;
2655 		}
2656 	}
2657 
2658 	if (cmd->Status != EXT_STATUS_OK) {
2659 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2660 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2661 	} else {
2662 		/*EMPTY*/
2663 		QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2664 		    ha->instance, cmd->ResponseLen);
2665 	}
2666 
2667 	kmem_free(pkt, pkt_size);
2668 	ql_free_dma_resource(ha, dma_mem);
2669 	kmem_free(dma_mem, sizeof (dma_mem_t));
2670 }
2671 
2672 /*
2673  * ql_wwpn_to_scsiaddr
2674  *
2675  * Input:
2676  *	ha:	adapter state pointer.
2677  *	cmd:	EXT_IOCTL cmd struct pointer.
2678  *	mode:	flags.
2679  *
2680  * Context:
2681  *	Kernel context.
2682  */
2683 static void
2684 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2685 {
2686 	int		status;
2687 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2688 	EXT_SCSI_ADDR	*tmp_addr;
2689 	ql_tgt_t	*tq;
2690 
2691 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2692 
2693 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2694 		/* Return error */
2695 		EL(ha, "incorrect RequestLen\n");
2696 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2697 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2698 		return;
2699 	}
2700 
2701 	status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2702 	    cmd->RequestLen, mode);
2703 
2704 	if (status != 0) {
2705 		cmd->Status = EXT_STATUS_COPY_ERR;
2706 		EL(ha, "failed, ddi_copyin\n");
2707 		return;
2708 	}
2709 
2710 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2711 
2712 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2713 		/* no matching device */
2714 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2715 		EL(ha, "failed, device not found\n");
2716 		return;
2717 	}
2718 
2719 	/* Copy out the IDs found.  For now we can only return target ID. */
2720 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2721 
2722 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2723 
2724 	if (status != 0) {
2725 		cmd->Status = EXT_STATUS_COPY_ERR;
2726 		EL(ha, "failed, ddi_copyout\n");
2727 	} else {
2728 		cmd->Status = EXT_STATUS_OK;
2729 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2730 	}
2731 }
2732 
2733 /*
2734  * ql_host_idx
2735  *	Gets host order index.
2736  *
2737  * Input:
2738  *	ha:	adapter state pointer.
2739  *	cmd:	EXT_IOCTL cmd struct pointer.
2740  *	mode:	flags.
2741  *
2742  * Returns:
2743  *	None, request status indicated in cmd->Status.
2744  *
2745  * Context:
2746  *	Kernel context.
2747  */
2748 static void
2749 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2750 {
2751 	uint16_t	idx;
2752 
2753 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2754 
2755 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2756 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2757 		cmd->DetailStatus = sizeof (uint16_t);
2758 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2759 		cmd->ResponseLen = 0;
2760 		return;
2761 	}
2762 
2763 	idx = (uint16_t)ha->instance;
2764 
2765 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2766 	    sizeof (uint16_t), mode) != 0) {
2767 		cmd->Status = EXT_STATUS_COPY_ERR;
2768 		cmd->ResponseLen = 0;
2769 		EL(ha, "failed, ddi_copyout\n");
2770 	} else {
2771 		cmd->ResponseLen = sizeof (uint16_t);
2772 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2773 	}
2774 }
2775 
2776 /*
2777  * ql_host_drvname
2778  *	Gets host driver name
2779  *
2780  * Input:
2781  *	ha:	adapter state pointer.
2782  *	cmd:	EXT_IOCTL cmd struct pointer.
2783  *	mode:	flags.
2784  *
2785  * Returns:
2786  *	None, request status indicated in cmd->Status.
2787  *
2788  * Context:
2789  *	Kernel context.
2790  */
2791 static void
2792 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2793 {
2794 
2795 	char		drvname[] = QL_NAME;
2796 	uint32_t	qlnamelen;
2797 
2798 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2799 
2800 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2801 
2802 	if (cmd->ResponseLen < qlnamelen) {
2803 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2804 		cmd->DetailStatus = qlnamelen;
2805 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2806 		    cmd->ResponseLen, qlnamelen);
2807 		cmd->ResponseLen = 0;
2808 		return;
2809 	}
2810 
2811 	if (ddi_copyout((void *)&drvname,
2812 	    (void *)(uintptr_t)(cmd->ResponseAdr),
2813 	    qlnamelen, mode) != 0) {
2814 		cmd->Status = EXT_STATUS_COPY_ERR;
2815 		cmd->ResponseLen = 0;
2816 		EL(ha, "failed, ddi_copyout\n");
2817 	} else {
2818 		cmd->ResponseLen = qlnamelen-1;
2819 	}
2820 
2821 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2822 }
2823 
2824 /*
2825  * ql_read_nvram
2826  *	Get NVRAM contents.
2827  *
2828  * Input:
2829  *	ha:	adapter state pointer.
2830  *	cmd:	EXT_IOCTL cmd struct pointer.
2831  *	mode:	flags.
2832  *
2833  * Returns:
2834  *	None, request status indicated in cmd->Status.
2835  *
2836  * Context:
2837  *	Kernel context.
2838  */
2839 static void
2840 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2841 {
2842 
2843 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2844 
2845 	if (cmd->ResponseLen < ha->nvram_cache->size) {
2846 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2847 		cmd->DetailStatus = ha->nvram_cache->size;
2848 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2849 		    cmd->ResponseLen);
2850 		cmd->ResponseLen = 0;
2851 		return;
2852 	}
2853 
2854 	/* Get NVRAM data. */
2855 	if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2856 	    mode) != 0) {
2857 		cmd->Status = EXT_STATUS_COPY_ERR;
2858 		cmd->ResponseLen = 0;
2859 		EL(ha, "failed, copy error\n");
2860 	} else {
2861 		cmd->ResponseLen = ha->nvram_cache->size;
2862 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2863 	}
2864 }
2865 
2866 /*
2867  * ql_write_nvram
2868  *	Loads NVRAM contents.
2869  *
2870  * Input:
2871  *	ha:	adapter state pointer.
2872  *	cmd:	EXT_IOCTL cmd struct pointer.
2873  *	mode:	flags.
2874  *
2875  * Returns:
2876  *	None, request status indicated in cmd->Status.
2877  *
2878  * Context:
2879  *	Kernel context.
2880  */
2881 static void
2882 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2883 {
2884 
2885 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2886 
2887 	if (cmd->RequestLen < ha->nvram_cache->size) {
2888 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2889 		cmd->DetailStatus = ha->nvram_cache->size;
2890 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2891 		    cmd->RequestLen);
2892 		return;
2893 	}
2894 
2895 	/* Load NVRAM data. */
2896 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2897 	    mode) != 0) {
2898 		cmd->Status = EXT_STATUS_COPY_ERR;
2899 		EL(ha, "failed, copy error\n");
2900 	} else {
2901 		/*EMPTY*/
2902 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2903 	}
2904 }
2905 
2906 /*
2907  * ql_write_vpd
2908  *	Loads VPD contents.
2909  *
2910  * Input:
2911  *	ha:	adapter state pointer.
2912  *	cmd:	EXT_IOCTL cmd struct pointer.
2913  *	mode:	flags.
2914  *
2915  * Returns:
2916  *	None, request status indicated in cmd->Status.
2917  *
2918  * Context:
2919  *	Kernel context.
2920  */
2921 static void
2922 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2923 {
2924 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2925 
2926 	int32_t		rval = 0;
2927 
2928 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2929 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2930 		EL(ha, "failed, invalid request for HBA\n");
2931 		return;
2932 	}
2933 
2934 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2935 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2936 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2937 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2938 		    cmd->RequestLen);
2939 		return;
2940 	}
2941 
2942 	/* Load VPD data. */
2943 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2944 	    mode)) != 0) {
2945 		cmd->Status = EXT_STATUS_COPY_ERR;
2946 		cmd->DetailStatus = rval;
2947 		EL(ha, "failed, errno=%x\n", rval);
2948 	} else {
2949 		/*EMPTY*/
2950 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2951 	}
2952 }
2953 
2954 /*
2955  * ql_read_vpd
2956  *	Dumps VPD contents.
2957  *
2958  * Input:
2959  *	ha:	adapter state pointer.
2960  *	cmd:	EXT_IOCTL cmd struct pointer.
2961  *	mode:	flags.
2962  *
2963  * Returns:
2964  *	None, request status indicated in cmd->Status.
2965  *
2966  * Context:
2967  *	Kernel context.
2968  */
2969 static void
2970 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2971 {
2972 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2973 
2974 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2975 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2976 		EL(ha, "failed, invalid request for HBA\n");
2977 		return;
2978 	}
2979 
2980 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2981 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2982 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2983 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2984 		    cmd->ResponseLen);
2985 		return;
2986 	}
2987 
2988 	/* Dump VPD data. */
2989 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2990 	    mode)) != 0) {
2991 		cmd->Status = EXT_STATUS_COPY_ERR;
2992 		EL(ha, "failed,\n");
2993 	} else {
2994 		/*EMPTY*/
2995 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2996 	}
2997 }
2998 
2999 /*
3000  * ql_get_fcache
3001  *	Dumps flash cache contents.
3002  *
3003  * Input:
3004  *	ha:	adapter state pointer.
3005  *	cmd:	EXT_IOCTL cmd struct pointer.
3006  *	mode:	flags.
3007  *
3008  * Returns:
3009  *	None, request status indicated in cmd->Status.
3010  *
3011  * Context:
3012  *	Kernel context.
3013  */
3014 static void
3015 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3016 {
3017 	uint32_t	bsize, boff, types, cpsize, hsize;
3018 	ql_fcache_t	*fptr;
3019 
3020 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3021 
3022 	CACHE_LOCK(ha);
3023 
3024 	if (ha->fcache == NULL) {
3025 		CACHE_UNLOCK(ha);
3026 		cmd->Status = EXT_STATUS_ERR;
3027 		EL(ha, "failed, adapter fcache not setup\n");
3028 		return;
3029 	}
3030 
3031 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
3032 		bsize = 100;
3033 	} else {
3034 		bsize = 400;
3035 	}
3036 
3037 	if (cmd->ResponseLen < bsize) {
3038 		CACHE_UNLOCK(ha);
3039 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3040 		cmd->DetailStatus = bsize;
3041 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3042 		    bsize, cmd->ResponseLen);
3043 		return;
3044 	}
3045 
3046 	boff = 0;
3047 	bsize = 0;
3048 	fptr = ha->fcache;
3049 
3050 	/*
3051 	 * For backwards compatibility, get one of each image type
3052 	 */
3053 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3054 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3055 		/* Get the next image */
3056 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3057 
3058 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3059 
3060 			if (ddi_copyout(fptr->buf,
3061 			    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3062 			    cpsize, mode) != 0) {
3063 				CACHE_UNLOCK(ha);
3064 				EL(ha, "ddicopy failed, done\n");
3065 				cmd->Status = EXT_STATUS_COPY_ERR;
3066 				cmd->DetailStatus = 0;
3067 				return;
3068 			}
3069 			boff += 100;
3070 			bsize += cpsize;
3071 			types &= ~(fptr->type);
3072 		}
3073 	}
3074 
3075 	/*
3076 	 * Get the firmware image -- it needs to be last in the
3077 	 * buffer at offset 300 for backwards compatibility. Also for
3078 	 * backwards compatibility, the pci header is stripped off.
3079 	 */
3080 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3081 
3082 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3083 		if (hsize > fptr->buflen) {
3084 			CACHE_UNLOCK(ha);
3085 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3086 			    hsize, fptr->buflen);
3087 			cmd->Status = EXT_STATUS_COPY_ERR;
3088 			cmd->DetailStatus = 0;
3089 			return;
3090 		}
3091 
3092 		cpsize = ((fptr->buflen - hsize) < 100 ?
3093 		    fptr->buflen - hsize : 100);
3094 
3095 		if (ddi_copyout(fptr->buf+hsize,
3096 		    (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3097 		    cpsize, mode) != 0) {
3098 			CACHE_UNLOCK(ha);
3099 			EL(ha, "fw ddicopy failed, done\n");
3100 			cmd->Status = EXT_STATUS_COPY_ERR;
3101 			cmd->DetailStatus = 0;
3102 			return;
3103 		}
3104 		bsize += 100;
3105 	}
3106 
3107 	CACHE_UNLOCK(ha);
3108 	cmd->Status = EXT_STATUS_OK;
3109 	cmd->DetailStatus = bsize;
3110 
3111 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3112 }
3113 
3114 /*
3115  * ql_get_fcache_ex
3116  *	Dumps flash cache contents.
3117  *
3118  * Input:
3119  *	ha:	adapter state pointer.
3120  *	cmd:	EXT_IOCTL cmd struct pointer.
3121  *	mode:	flags.
3122  *
3123  * Returns:
3124  *	None, request status indicated in cmd->Status.
3125  *
3126  * Context:
3127  *	Kernel context.
3128  */
3129 static void
3130 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3131 {
3132 	uint32_t	bsize = 0;
3133 	uint32_t	boff = 0;
3134 	ql_fcache_t	*fptr;
3135 
3136 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3137 
3138 	CACHE_LOCK(ha);
3139 	if (ha->fcache == NULL) {
3140 		CACHE_UNLOCK(ha);
3141 		cmd->Status = EXT_STATUS_ERR;
3142 		EL(ha, "failed, adapter fcache not setup\n");
3143 		return;
3144 	}
3145 
3146 	/* Make sure user passed enough buffer space */
3147 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3148 		bsize += FBUFSIZE;
3149 	}
3150 
3151 	if (cmd->ResponseLen < bsize) {
3152 		CACHE_UNLOCK(ha);
3153 		if (cmd->ResponseLen != 0) {
3154 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3155 			    bsize, cmd->ResponseLen);
3156 		}
3157 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3158 		cmd->DetailStatus = bsize;
3159 		return;
3160 	}
3161 
3162 	boff = 0;
3163 	fptr = ha->fcache;
3164 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3165 		/* Get the next image */
3166 		if (ddi_copyout(fptr->buf,
3167 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3168 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3169 		    mode) != 0) {
3170 			CACHE_UNLOCK(ha);
3171 			EL(ha, "failed, ddicopy at %xh, done\n", boff);
3172 			cmd->Status = EXT_STATUS_COPY_ERR;
3173 			cmd->DetailStatus = 0;
3174 			return;
3175 		}
3176 		boff += FBUFSIZE;
3177 		fptr = fptr->next;
3178 	}
3179 
3180 	CACHE_UNLOCK(ha);
3181 	cmd->Status = EXT_STATUS_OK;
3182 	cmd->DetailStatus = bsize;
3183 
3184 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3185 }
3186 
3187 /*
3188  * ql_read_flash
3189  *	Get flash contents.
3190  *
3191  * Input:
3192  *	ha:	adapter state pointer.
3193  *	cmd:	EXT_IOCTL cmd struct pointer.
3194  *	mode:	flags.
3195  *
3196  * Returns:
3197  *	None, request status indicated in cmd->Status.
3198  *
3199  * Context:
3200  *	Kernel context.
3201  */
3202 static void
3203 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3204 {
3205 	ql_xioctl_t	*xp = ha->xioctl;
3206 
3207 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3208 
3209 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3210 		EL(ha, "ql_stall_driver failed\n");
3211 		cmd->Status = EXT_STATUS_BUSY;
3212 		cmd->DetailStatus = xp->fdesc.flash_size;
3213 		cmd->ResponseLen = 0;
3214 		return;
3215 	}
3216 
3217 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3218 		cmd->Status = EXT_STATUS_ERR;
3219 		cmd->DetailStatus = xp->fdesc.flash_size;
3220 		EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3221 		    cmd->ResponseLen, xp->fdesc.flash_size);
3222 		cmd->ResponseLen = 0;
3223 	} else {
3224 		/* adjust read size to flash size */
3225 		if (cmd->ResponseLen > xp->fdesc.flash_size) {
3226 			EL(ha, "adjusting req=%xh, max=%xh\n",
3227 			    cmd->ResponseLen, xp->fdesc.flash_size);
3228 			cmd->ResponseLen = xp->fdesc.flash_size;
3229 		}
3230 
3231 		/* Get flash data. */
3232 		if (ql_flash_fcode_dump(ha,
3233 		    (void *)(uintptr_t)(cmd->ResponseAdr),
3234 		    (size_t)(cmd->ResponseLen), 0, mode) != 0) {
3235 			cmd->Status = EXT_STATUS_COPY_ERR;
3236 			cmd->ResponseLen = 0;
3237 			EL(ha, "failed,\n");
3238 		}
3239 	}
3240 
3241 	/* Resume I/O */
3242 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3243 		ql_restart_driver(ha);
3244 	} else {
3245 		EL(ha, "isp_abort_needed for restart\n");
3246 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3247 		    DRIVER_STALL);
3248 	}
3249 
3250 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3251 }
3252 
3253 /*
3254  * ql_write_flash
3255  *	Loads flash contents.
3256  *
3257  * Input:
3258  *	ha:	adapter state pointer.
3259  *	cmd:	EXT_IOCTL cmd struct pointer.
3260  *	mode:	flags.
3261  *
3262  * Returns:
3263  *	None, request status indicated in cmd->Status.
3264  *
3265  * Context:
3266  *	Kernel context.
3267  */
3268 static void
3269 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3270 {
3271 	ql_xioctl_t	*xp = ha->xioctl;
3272 
3273 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3274 
3275 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3276 		EL(ha, "ql_stall_driver failed\n");
3277 		cmd->Status = EXT_STATUS_BUSY;
3278 		cmd->DetailStatus = xp->fdesc.flash_size;
3279 		cmd->ResponseLen = 0;
3280 		return;
3281 	}
3282 
3283 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3284 		cmd->Status = EXT_STATUS_ERR;
3285 		cmd->DetailStatus = xp->fdesc.flash_size;
3286 		EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3287 		    cmd->RequestLen, xp->fdesc.flash_size);
3288 		cmd->ResponseLen = 0;
3289 	} else {
3290 		/* Load flash data. */
3291 		if (cmd->RequestLen > xp->fdesc.flash_size) {
3292 			cmd->Status = EXT_STATUS_ERR;
3293 			cmd->DetailStatus =  xp->fdesc.flash_size;
3294 			EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3295 			    cmd->RequestLen, xp->fdesc.flash_size);
3296 		} else if (ql_flash_fcode_load(ha,
3297 		    (void *)(uintptr_t)(cmd->RequestAdr),
3298 		    (size_t)(cmd->RequestLen), mode) != 0) {
3299 			cmd->Status = EXT_STATUS_COPY_ERR;
3300 			EL(ha, "failed,\n");
3301 		}
3302 	}
3303 
3304 	/* Resume I/O */
3305 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3306 		ql_restart_driver(ha);
3307 	} else {
3308 		EL(ha, "isp_abort_needed for restart\n");
3309 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3310 		    DRIVER_STALL);
3311 	}
3312 
3313 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3314 }
3315 
3316 /*
3317  * ql_diagnostic_loopback
3318  *	Performs EXT_CC_LOOPBACK Command
3319  *
3320  * Input:
3321  *	ha:	adapter state pointer.
3322  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3323  *	mode:	flags.
3324  *
3325  * Returns:
3326  *	None, request status indicated in cmd->Status.
3327  *
3328  * Context:
3329  *	Kernel context.
3330  */
3331 static void
3332 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3333 {
3334 	EXT_LOOPBACK_REQ	plbreq;
3335 	EXT_LOOPBACK_RSP	plbrsp;
3336 	ql_mbx_data_t		mr;
3337 	uint32_t		rval;
3338 	caddr_t			bp;
3339 
3340 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3341 
3342 	/* Get loop back request. */
3343 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
3344 	    (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3345 		EL(ha, "failed, ddi_copyin\n");
3346 		cmd->Status = EXT_STATUS_COPY_ERR;
3347 		cmd->ResponseLen = 0;
3348 		return;
3349 	}
3350 
3351 	/* Check transfer length fits in buffer. */
3352 	if (plbreq.BufferLength < plbreq.TransferCount &&
3353 	    plbreq.TransferCount < MAILBOX_BUFFER_SIZE) {
3354 		EL(ha, "failed, BufferLength=%d, xfercnt=%d, "
3355 		    "mailbox_buffer_size=%d\n", plbreq.BufferLength,
3356 		    plbreq.TransferCount, MAILBOX_BUFFER_SIZE);
3357 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3358 		cmd->ResponseLen = 0;
3359 		return;
3360 	}
3361 
3362 	/* Allocate command memory. */
3363 	bp = kmem_zalloc(plbreq.TransferCount, KM_SLEEP);
3364 	if (bp == NULL) {
3365 		EL(ha, "failed, kmem_zalloc\n");
3366 		cmd->Status = EXT_STATUS_NO_MEMORY;
3367 		cmd->ResponseLen = 0;
3368 		return;
3369 	}
3370 
3371 	/* Get loopback data. */
3372 	if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3373 	    bp, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3374 		EL(ha, "failed, ddi_copyin-2\n");
3375 		kmem_free(bp, plbreq.TransferCount);
3376 		cmd->Status = EXT_STATUS_COPY_ERR;
3377 		cmd->ResponseLen = 0;
3378 		return;
3379 	}
3380 
3381 	if ((ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) ||
3382 	    ql_stall_driver(ha, 0) != QL_SUCCESS) {
3383 		EL(ha, "failed, LOOP_NOT_READY\n");
3384 		kmem_free(bp, plbreq.TransferCount);
3385 		cmd->Status = EXT_STATUS_BUSY;
3386 		cmd->ResponseLen = 0;
3387 		return;
3388 	}
3389 
3390 	/* Shutdown IP. */
3391 	if (ha->flags & IP_INITIALIZED) {
3392 		(void) ql_shutdown_ip(ha);
3393 	}
3394 
3395 	/* determine topology so we can send the loopback or the echo */
3396 	/* Echo is supported on 2300's only and above */
3397 
3398 	if (!(ha->task_daemon_flags & LOOP_DOWN) &&
3399 	    (ha->topology & QL_F_PORT) &&
3400 	    ha->device_id >= 0x2300) {
3401 		QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using echo\n",
3402 		    ha->instance);
3403 		plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3404 		rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount,
3405 		    (uint16_t)(CFG_IST(ha, CFG_CTRL_81XX) ? BIT_15 : BIT_6),
3406 		    &mr);
3407 	} else {
3408 		plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3409 		rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount,
3410 		    plbreq.Options, plbreq.IterationCount, &mr);
3411 	}
3412 
3413 	ql_restart_driver(ha);
3414 
3415 	/* Restart IP if it was shutdown. */
3416 	if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3417 		(void) ql_initialize_ip(ha);
3418 		ql_isp_rcvbuf(ha);
3419 	}
3420 
3421 	if (rval != QL_SUCCESS) {
3422 		EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3423 		kmem_free(bp, plbreq.TransferCount);
3424 		cmd->Status = EXT_STATUS_MAILBOX;
3425 		cmd->DetailStatus = rval;
3426 		cmd->ResponseLen = 0;
3427 		return;
3428 	}
3429 
3430 	/* Return loopback data. */
3431 	if (ql_send_buffer_data(bp, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3432 	    plbreq.TransferCount, mode) != plbreq.TransferCount) {
3433 		EL(ha, "failed, ddi_copyout\n");
3434 		kmem_free(bp, plbreq.TransferCount);
3435 		cmd->Status = EXT_STATUS_COPY_ERR;
3436 		cmd->ResponseLen = 0;
3437 		return;
3438 	}
3439 	kmem_free(bp, plbreq.TransferCount);
3440 
3441 	/* Return loopback results. */
3442 	plbrsp.BufferAddress = plbreq.BufferAddress;
3443 	plbrsp.BufferLength = plbreq.TransferCount;
3444 	plbrsp.CompletionStatus = mr.mb[0];
3445 
3446 	if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3447 		plbrsp.CrcErrorCount = 0;
3448 		plbrsp.DisparityErrorCount = 0;
3449 		plbrsp.FrameLengthErrorCount = 0;
3450 		plbrsp.IterationCountLastError = 0;
3451 	} else {
3452 		plbrsp.CrcErrorCount = mr.mb[1];
3453 		plbrsp.DisparityErrorCount = mr.mb[2];
3454 		plbrsp.FrameLengthErrorCount = mr.mb[3];
3455 		plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18];
3456 	}
3457 
3458 	rval = ddi_copyout((void *)&plbrsp,
3459 	    (void *)(uintptr_t)cmd->ResponseAdr,
3460 	    sizeof (EXT_LOOPBACK_RSP), mode);
3461 	if (rval != 0) {
3462 		EL(ha, "failed, ddi_copyout-2\n");
3463 		cmd->Status = EXT_STATUS_COPY_ERR;
3464 		cmd->ResponseLen = 0;
3465 		return;
3466 	}
3467 	cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3468 
3469 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3470 }
3471 
3472 /*
3473  * ql_send_els_rnid
3474  *	IOCTL for extended link service RNID command.
3475  *
3476  * Input:
3477  *	ha:	adapter state pointer.
3478  *	cmd:	User space CT arguments pointer.
3479  *	mode:	flags.
3480  *
3481  * Returns:
3482  *	None, request status indicated in cmd->Status.
3483  *
3484  * Context:
3485  *	Kernel context.
3486  */
3487 static void
3488 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3489 {
3490 	EXT_RNID_REQ	tmp_rnid;
3491 	port_id_t	tmp_fcid;
3492 	caddr_t		tmp_buf, bptr;
3493 	uint32_t	copy_len;
3494 	ql_tgt_t	*tq;
3495 	EXT_RNID_DATA	rnid_data;
3496 	uint32_t	loop_ready_wait = 10 * 60 * 10;
3497 	int		rval = 0;
3498 	uint32_t	local_hba = 0;
3499 
3500 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3501 
3502 	if (DRIVER_SUSPENDED(ha)) {
3503 		EL(ha, "failed, LOOP_NOT_READY\n");
3504 		cmd->Status = EXT_STATUS_BUSY;
3505 		cmd->ResponseLen = 0;
3506 		return;
3507 	}
3508 
3509 	if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3510 		/* parameter error */
3511 		EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3512 		    cmd->RequestLen);
3513 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3514 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3515 		cmd->ResponseLen = 0;
3516 		return;
3517 	}
3518 
3519 	if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
3520 	    &tmp_rnid, cmd->RequestLen, mode) != 0) {
3521 		EL(ha, "failed, ddi_copyin\n");
3522 		cmd->Status = EXT_STATUS_COPY_ERR;
3523 		cmd->ResponseLen = 0;
3524 		return;
3525 	}
3526 
3527 	/* Find loop ID of the device */
3528 	if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3529 		bptr = CFG_IST(ha, CFG_CTRL_242581) ?
3530 		    (caddr_t)&ha->init_ctrl_blk.cb24.node_name :
3531 		    (caddr_t)&ha->init_ctrl_blk.cb.node_name;
3532 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3533 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3534 			local_hba = 1;
3535 		} else {
3536 			tq = ql_find_port(ha,
3537 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3538 		}
3539 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3540 		bptr = CFG_IST(ha, CFG_CTRL_242581) ?
3541 		    (caddr_t)&ha->init_ctrl_blk.cb24.port_name :
3542 		    (caddr_t)&ha->init_ctrl_blk.cb.port_name;
3543 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3544 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3545 			local_hba = 1;
3546 		} else {
3547 			tq = ql_find_port(ha,
3548 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3549 		}
3550 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3551 		/*
3552 		 * Copy caller's d_id to tmp space.
3553 		 */
3554 		bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3555 		    EXT_DEF_PORTID_SIZE_ACTUAL);
3556 		BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3557 
3558 		if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
3559 		    EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3560 			local_hba = 1;
3561 		} else {
3562 			tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3563 			    QLNT_PID);
3564 		}
3565 	}
3566 
3567 	/* Allocate memory for command. */
3568 	tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP);
3569 	if (tmp_buf == NULL) {
3570 		EL(ha, "failed, kmem_zalloc\n");
3571 		cmd->Status = EXT_STATUS_NO_MEMORY;
3572 		cmd->ResponseLen = 0;
3573 		return;
3574 	}
3575 
3576 	if (local_hba) {
3577 		rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3578 		if (rval != QL_SUCCESS) {
3579 			EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3580 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3581 			cmd->Status = EXT_STATUS_ERR;
3582 			cmd->ResponseLen = 0;
3583 			return;
3584 		}
3585 
3586 		/* Save gotten RNID data. */
3587 		bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3588 
3589 		/* Now build the Send RNID response */
3590 		tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3591 		tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3592 		tmp_buf[2] = 0;
3593 		tmp_buf[3] = sizeof (EXT_RNID_DATA);
3594 
3595 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3596 			bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4],
3597 			    EXT_DEF_WWN_NAME_SIZE);
3598 			bcopy(ha->init_ctrl_blk.cb24.node_name,
3599 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3600 			    EXT_DEF_WWN_NAME_SIZE);
3601 		} else {
3602 			bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4],
3603 			    EXT_DEF_WWN_NAME_SIZE);
3604 			bcopy(ha->init_ctrl_blk.cb.node_name,
3605 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3606 			    EXT_DEF_WWN_NAME_SIZE);
3607 		}
3608 
3609 		bcopy((uint8_t *)&rnid_data,
3610 		    &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3611 		    sizeof (EXT_RNID_DATA));
3612 	} else {
3613 		if (tq == NULL) {
3614 			/* no matching device */
3615 			EL(ha, "failed, device not found\n");
3616 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3617 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3618 			cmd->DetailStatus = EXT_DSTATUS_TARGET;
3619 			cmd->ResponseLen = 0;
3620 			return;
3621 		}
3622 
3623 		/* Send command */
3624 		rval = ql_send_rnid_els(ha, tq->loop_id,
3625 		    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3626 		if (rval != QL_SUCCESS) {
3627 			EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3628 			    rval, tq->loop_id);
3629 			while (LOOP_NOT_READY(ha)) {
3630 				ql_delay(ha, 100000);
3631 				if (loop_ready_wait-- == 0) {
3632 					EL(ha, "failed, loop not ready\n");
3633 					cmd->Status = EXT_STATUS_ERR;
3634 					cmd->ResponseLen = 0;
3635 				}
3636 			}
3637 			rval = ql_send_rnid_els(ha, tq->loop_id,
3638 			    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE,
3639 			    tmp_buf);
3640 			if (rval != QL_SUCCESS) {
3641 				/* error */
3642 				EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3643 				    rval, tq->loop_id);
3644 				kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3645 				cmd->Status = EXT_STATUS_ERR;
3646 				cmd->ResponseLen = 0;
3647 				return;
3648 			}
3649 		}
3650 	}
3651 
3652 	/* Copy the response */
3653 	copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3654 	    SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3655 
3656 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3657 	    copy_len, mode) != copy_len) {
3658 		cmd->Status = EXT_STATUS_COPY_ERR;
3659 		EL(ha, "failed, ddi_copyout\n");
3660 	} else {
3661 		cmd->ResponseLen = copy_len;
3662 		if (copy_len < SEND_RNID_RSP_SIZE) {
3663 			cmd->Status = EXT_STATUS_DATA_OVERRUN;
3664 			EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3665 
3666 		} else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3667 			cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3668 			EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3669 		} else {
3670 			cmd->Status = EXT_STATUS_OK;
3671 			QL_PRINT_9(CE_CONT, "(%d): done\n",
3672 			    ha->instance);
3673 		}
3674 	}
3675 
3676 	kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3677 }
3678 
3679 /*
3680  * ql_set_host_data
3681  *	Process IOCTL subcommand to set host/adapter related data.
3682  *
3683  * Input:
3684  *	ha:	adapter state pointer.
3685  *	cmd:	User space CT arguments pointer.
3686  *	mode:	flags.
3687  *
3688  * Returns:
3689  *	None, request status indicated in cmd->Status.
3690  *
3691  * Context:
3692  *	Kernel context.
3693  */
3694 static void
3695 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3696 {
3697 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3698 	    cmd->SubCode);
3699 
3700 	/*
3701 	 * case off on command subcode
3702 	 */
3703 	switch (cmd->SubCode) {
3704 	case EXT_SC_SET_RNID:
3705 		ql_set_rnid_parameters(ha, cmd, mode);
3706 		break;
3707 	case EXT_SC_RST_STATISTICS:
3708 		(void) ql_reset_statistics(ha, cmd);
3709 		break;
3710 	case EXT_SC_SET_BEACON_STATE:
3711 		ql_set_led_state(ha, cmd, mode);
3712 		break;
3713 	case EXT_SC_SET_PARMS:
3714 	case EXT_SC_SET_BUS_MODE:
3715 	case EXT_SC_SET_DR_DUMP_BUF:
3716 	case EXT_SC_SET_RISC_CODE:
3717 	case EXT_SC_SET_FLASH_RAM:
3718 	case EXT_SC_SET_LUN_BITMASK:
3719 	case EXT_SC_SET_RETRY_CNT:
3720 	case EXT_SC_SET_RTIN:
3721 	case EXT_SC_SET_FC_LUN_BITMASK:
3722 	case EXT_SC_ADD_TARGET_DEVICE:
3723 	case EXT_SC_SWAP_TARGET_DEVICE:
3724 	case EXT_SC_SET_SEL_TIMEOUT:
3725 	default:
3726 		/* function not supported. */
3727 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3728 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3729 		break;
3730 	}
3731 
3732 	if (cmd->Status != EXT_STATUS_OK) {
3733 		EL(ha, "failed, Status=%d\n", cmd->Status);
3734 	} else {
3735 		/*EMPTY*/
3736 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3737 	}
3738 }
3739 
3740 /*
3741  * ql_get_host_data
3742  *	Performs EXT_CC_GET_DATA subcommands.
3743  *
3744  * Input:
3745  *	ha:	adapter state pointer.
3746  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3747  *	mode:	flags.
3748  *
3749  * Returns:
3750  *	None, request status indicated in cmd->Status.
3751  *
3752  * Context:
3753  *	Kernel context.
3754  */
3755 static void
3756 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3757 {
3758 	int	out_size = 0;
3759 
3760 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3761 	    cmd->SubCode);
3762 
3763 	/* case off on command subcode */
3764 	switch (cmd->SubCode) {
3765 	case EXT_SC_GET_STATISTICS:
3766 		out_size = sizeof (EXT_HBA_PORT_STAT);
3767 		break;
3768 	case EXT_SC_GET_FC_STATISTICS:
3769 		out_size = sizeof (EXT_HBA_PORT_STAT);
3770 		break;
3771 	case EXT_SC_GET_PORT_SUMMARY:
3772 		out_size = sizeof (EXT_DEVICEDATA);
3773 		break;
3774 	case EXT_SC_GET_RNID:
3775 		out_size = sizeof (EXT_RNID_DATA);
3776 		break;
3777 	case EXT_SC_GET_TARGET_ID:
3778 		out_size = sizeof (EXT_DEST_ADDR);
3779 		break;
3780 	case EXT_SC_GET_BEACON_STATE:
3781 		out_size = sizeof (EXT_BEACON_CONTROL);
3782 		break;
3783 	case EXT_SC_GET_FC4_STATISTICS:
3784 		out_size = sizeof (EXT_HBA_FC4STATISTICS);
3785 		break;
3786 	case EXT_SC_GET_DCBX_PARAM:
3787 		out_size = EXT_DEF_DCBX_PARAM_BUF_SIZE;
3788 		break;
3789 	case EXT_SC_GET_SCSI_ADDR:
3790 	case EXT_SC_GET_ERR_DETECTIONS:
3791 	case EXT_SC_GET_BUS_MODE:
3792 	case EXT_SC_GET_DR_DUMP_BUF:
3793 	case EXT_SC_GET_RISC_CODE:
3794 	case EXT_SC_GET_FLASH_RAM:
3795 	case EXT_SC_GET_LINK_STATUS:
3796 	case EXT_SC_GET_LOOP_ID:
3797 	case EXT_SC_GET_LUN_BITMASK:
3798 	case EXT_SC_GET_PORT_DATABASE:
3799 	case EXT_SC_GET_PORT_DATABASE_MEM:
3800 	case EXT_SC_GET_POSITION_MAP:
3801 	case EXT_SC_GET_RETRY_CNT:
3802 	case EXT_SC_GET_RTIN:
3803 	case EXT_SC_GET_FC_LUN_BITMASK:
3804 	case EXT_SC_GET_SEL_TIMEOUT:
3805 	default:
3806 		/* function not supported. */
3807 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3808 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3809 		cmd->ResponseLen = 0;
3810 		return;
3811 	}
3812 
3813 	if (cmd->ResponseLen < out_size) {
3814 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3815 		cmd->DetailStatus = out_size;
3816 		EL(ha, "failed, ResponseLen=%xh, size=%xh\n",
3817 		    cmd->ResponseLen, out_size);
3818 		cmd->ResponseLen = 0;
3819 		return;
3820 	}
3821 
3822 	switch (cmd->SubCode) {
3823 	case EXT_SC_GET_RNID:
3824 		ql_get_rnid_parameters(ha, cmd, mode);
3825 		break;
3826 	case EXT_SC_GET_STATISTICS:
3827 		ql_get_statistics(ha, cmd, mode);
3828 		break;
3829 	case EXT_SC_GET_FC_STATISTICS:
3830 		ql_get_statistics_fc(ha, cmd, mode);
3831 		break;
3832 	case EXT_SC_GET_FC4_STATISTICS:
3833 		ql_get_statistics_fc4(ha, cmd, mode);
3834 		break;
3835 	case EXT_SC_GET_PORT_SUMMARY:
3836 		ql_get_port_summary(ha, cmd, mode);
3837 		break;
3838 	case EXT_SC_GET_TARGET_ID:
3839 		ql_get_target_id(ha, cmd, mode);
3840 		break;
3841 	case EXT_SC_GET_BEACON_STATE:
3842 		ql_get_led_state(ha, cmd, mode);
3843 		break;
3844 	case EXT_SC_GET_DCBX_PARAM:
3845 		ql_get_dcbx_parameters(ha, cmd, mode);
3846 		break;
3847 	}
3848 
3849 	if (cmd->Status != EXT_STATUS_OK) {
3850 		EL(ha, "failed, Status=%d\n", cmd->Status);
3851 	} else {
3852 		/*EMPTY*/
3853 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3854 	}
3855 }
3856 
3857 /* ******************************************************************** */
3858 /*			Helper Functions				*/
3859 /* ******************************************************************** */
3860 
3861 /*
3862  * ql_lun_count
3863  *	Get numbers of LUNS on target.
3864  *
3865  * Input:
3866  *	ha:	adapter state pointer.
3867  *	q:	device queue pointer.
3868  *
3869  * Returns:
3870  *	Number of LUNs.
3871  *
3872  * Context:
3873  *	Kernel context.
3874  */
3875 static int
3876 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
3877 {
3878 	int	cnt;
3879 
3880 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3881 
3882 	/* Bypass LUNs that failed. */
3883 	cnt = ql_report_lun(ha, tq);
3884 	if (cnt == 0) {
3885 		cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
3886 	}
3887 
3888 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3889 
3890 	return (cnt);
3891 }
3892 
3893 /*
3894  * ql_report_lun
3895  *	Get numbers of LUNS using report LUN command.
3896  *
3897  * Input:
3898  *	ha:	adapter state pointer.
3899  *	q:	target queue pointer.
3900  *
3901  * Returns:
3902  *	Number of LUNs.
3903  *
3904  * Context:
3905  *	Kernel context.
3906  */
3907 static int
3908 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
3909 {
3910 	int			rval;
3911 	uint8_t			retries;
3912 	ql_mbx_iocb_t		*pkt;
3913 	ql_rpt_lun_lst_t	*rpt;
3914 	dma_mem_t		dma_mem;
3915 	uint32_t		pkt_size, cnt;
3916 	uint16_t		comp_status;
3917 	uint8_t			scsi_status_h, scsi_status_l, *reqs;
3918 
3919 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3920 
3921 	if (DRIVER_SUSPENDED(ha)) {
3922 		EL(ha, "failed, LOOP_NOT_READY\n");
3923 		return (0);
3924 	}
3925 
3926 	pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
3927 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
3928 	if (pkt == NULL) {
3929 		EL(ha, "failed, kmem_zalloc\n");
3930 		return (0);
3931 	}
3932 	rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
3933 
3934 	/* Get DMA memory for the IOCB */
3935 	if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
3936 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
3937 		cmn_err(CE_WARN, "%s(%d): DMA memory "
3938 		    "alloc failed", QL_NAME, ha->instance);
3939 		kmem_free(pkt, pkt_size);
3940 		return (0);
3941 	}
3942 
3943 	for (retries = 0; retries < 4; retries++) {
3944 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3945 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
3946 			pkt->cmd24.entry_count = 1;
3947 
3948 			/* Set N_port handle */
3949 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
3950 
3951 			/* Set target ID */
3952 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
3953 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
3954 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
3955 
3956 			/* Set ISP command timeout. */
3957 			pkt->cmd24.timeout = LE_16(15);
3958 
3959 			/* Load SCSI CDB */
3960 			pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
3961 			pkt->cmd24.scsi_cdb[6] =
3962 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3963 			pkt->cmd24.scsi_cdb[7] =
3964 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3965 			pkt->cmd24.scsi_cdb[8] =
3966 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3967 			pkt->cmd24.scsi_cdb[9] =
3968 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3969 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
3970 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
3971 				    + cnt, 4);
3972 			}
3973 
3974 			/* Set tag queue control flags */
3975 			pkt->cmd24.task = TA_STAG;
3976 
3977 			/* Set transfer direction. */
3978 			pkt->cmd24.control_flags = CF_RD;
3979 
3980 			/* Set data segment count. */
3981 			pkt->cmd24.dseg_count = LE_16(1);
3982 
3983 			/* Load total byte count. */
3984 			/* Load data descriptor. */
3985 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
3986 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
3987 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
3988 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
3989 			pkt->cmd24.total_byte_count =
3990 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3991 			pkt->cmd24.dseg_0_length =
3992 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3993 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
3994 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
3995 			pkt->cmd3.entry_count = 1;
3996 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3997 				pkt->cmd3.target_l = LSB(tq->loop_id);
3998 				pkt->cmd3.target_h = MSB(tq->loop_id);
3999 			} else {
4000 				pkt->cmd3.target_h = LSB(tq->loop_id);
4001 			}
4002 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4003 			pkt->cmd3.timeout = LE_16(15);
4004 			pkt->cmd3.dseg_count = LE_16(1);
4005 			pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
4006 			pkt->cmd3.scsi_cdb[6] =
4007 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4008 			pkt->cmd3.scsi_cdb[7] =
4009 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4010 			pkt->cmd3.scsi_cdb[8] =
4011 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4012 			pkt->cmd3.scsi_cdb[9] =
4013 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4014 			pkt->cmd3.byte_count =
4015 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4016 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4017 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4018 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4019 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4020 			pkt->cmd3.dseg_0_length =
4021 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4022 		} else {
4023 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4024 			pkt->cmd.entry_count = 1;
4025 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4026 				pkt->cmd.target_l = LSB(tq->loop_id);
4027 				pkt->cmd.target_h = MSB(tq->loop_id);
4028 			} else {
4029 				pkt->cmd.target_h = LSB(tq->loop_id);
4030 			}
4031 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4032 			pkt->cmd.timeout = LE_16(15);
4033 			pkt->cmd.dseg_count = LE_16(1);
4034 			pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
4035 			pkt->cmd.scsi_cdb[6] =
4036 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4037 			pkt->cmd.scsi_cdb[7] =
4038 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4039 			pkt->cmd.scsi_cdb[8] =
4040 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4041 			pkt->cmd.scsi_cdb[9] =
4042 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4043 			pkt->cmd.byte_count =
4044 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4045 			pkt->cmd.dseg_0_address = (uint32_t)
4046 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4047 			pkt->cmd.dseg_0_length =
4048 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4049 		}
4050 
4051 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4052 		    sizeof (ql_mbx_iocb_t));
4053 
4054 		/* Sync in coming DMA buffer. */
4055 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4056 		    DDI_DMA_SYNC_FORKERNEL);
4057 		/* Copy in coming DMA data. */
4058 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4059 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4060 
4061 		if (CFG_IST(ha, CFG_CTRL_242581)) {
4062 			pkt->sts24.entry_status = (uint8_t)
4063 			    (pkt->sts24.entry_status & 0x3c);
4064 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4065 			scsi_status_h = pkt->sts24.scsi_status_h;
4066 			scsi_status_l = pkt->sts24.scsi_status_l;
4067 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4068 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4069 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4070 		} else {
4071 			pkt->sts.entry_status = (uint8_t)
4072 			    (pkt->sts.entry_status & 0x7e);
4073 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4074 			scsi_status_h = pkt->sts.scsi_status_h;
4075 			scsi_status_l = pkt->sts.scsi_status_l;
4076 			reqs = &pkt->sts.req_sense_data[0];
4077 		}
4078 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4079 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4080 			    pkt->sts.entry_status, tq->d_id.b24);
4081 			rval = QL_FUNCTION_PARAMETER_ERROR;
4082 		}
4083 
4084 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4085 		    scsi_status_l & STATUS_CHECK) {
4086 			/* Device underrun, treat as OK. */
4087 			if (rval == QL_SUCCESS &&
4088 			    comp_status == CS_DATA_UNDERRUN &&
4089 			    scsi_status_h & FCP_RESID_UNDER) {
4090 				break;
4091 			}
4092 
4093 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4094 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4095 			    comp_status, scsi_status_h, scsi_status_l);
4096 
4097 			if (rval == QL_SUCCESS) {
4098 				if ((comp_status == CS_TIMEOUT) ||
4099 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4100 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4101 					rval = QL_FUNCTION_TIMEOUT;
4102 					break;
4103 				}
4104 				rval = QL_FUNCTION_FAILED;
4105 			} else if (rval == QL_ABORTED) {
4106 				break;
4107 			}
4108 
4109 			if (scsi_status_l & STATUS_CHECK) {
4110 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4111 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4112 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4113 				    reqs[1], reqs[2], reqs[3], reqs[4],
4114 				    reqs[5], reqs[6], reqs[7], reqs[8],
4115 				    reqs[9], reqs[10], reqs[11], reqs[12],
4116 				    reqs[13], reqs[14], reqs[15], reqs[16],
4117 				    reqs[17]);
4118 			}
4119 		} else {
4120 			break;
4121 		}
4122 		bzero((caddr_t)pkt, pkt_size);
4123 	}
4124 
4125 	if (rval != QL_SUCCESS) {
4126 		EL(ha, "failed=%xh\n", rval);
4127 		rval = 0;
4128 	} else {
4129 		QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance);
4130 		QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4131 		rval = (int)(BE_32(rpt->hdr.len) / 8);
4132 	}
4133 
4134 	kmem_free(pkt, pkt_size);
4135 	ql_free_dma_resource(ha, &dma_mem);
4136 
4137 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4138 
4139 	return (rval);
4140 }
4141 
4142 /*
4143  * ql_inq_scan
4144  *	Get numbers of LUNS using inquiry command.
4145  *
4146  * Input:
4147  *	ha:		adapter state pointer.
4148  *	tq:		target queue pointer.
4149  *	count:		scan for the number of existing LUNs.
4150  *
4151  * Returns:
4152  *	Number of LUNs.
4153  *
4154  * Context:
4155  *	Kernel context.
4156  */
4157 static int
4158 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
4159 {
4160 	int		lun, cnt, rval;
4161 	ql_mbx_iocb_t	*pkt;
4162 	uint8_t		*inq;
4163 	uint32_t	pkt_size;
4164 
4165 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4166 
4167 	pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4168 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4169 	if (pkt == NULL) {
4170 		EL(ha, "failed, kmem_zalloc\n");
4171 		return (0);
4172 	}
4173 	inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4174 
4175 	cnt = 0;
4176 	for (lun = 0; lun < MAX_LUNS; lun++) {
4177 
4178 		if (DRIVER_SUSPENDED(ha)) {
4179 			rval = QL_LOOP_DOWN;
4180 			cnt = 0;
4181 			break;
4182 		}
4183 
4184 		rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4185 		if (rval == QL_SUCCESS) {
4186 			switch (*inq) {
4187 			case DTYPE_DIRECT:
4188 			case DTYPE_PROCESSOR:	/* Appliance. */
4189 			case DTYPE_WORM:
4190 			case DTYPE_RODIRECT:
4191 			case DTYPE_SCANNER:
4192 			case DTYPE_OPTICAL:
4193 			case DTYPE_CHANGER:
4194 			case DTYPE_ESI:
4195 				cnt++;
4196 				break;
4197 			case DTYPE_SEQUENTIAL:
4198 				cnt++;
4199 				tq->flags |= TQF_TAPE_DEVICE;
4200 				break;
4201 			default:
4202 				QL_PRINT_9(CE_CONT, "(%d): failed, "
4203 				    "unsupported device id=%xh, lun=%d, "
4204 				    "type=%xh\n", ha->instance, tq->loop_id,
4205 				    lun, *inq);
4206 				break;
4207 			}
4208 
4209 			if (*inq == DTYPE_ESI || cnt >= count) {
4210 				break;
4211 			}
4212 		} else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4213 			cnt = 0;
4214 			break;
4215 		}
4216 	}
4217 
4218 	kmem_free(pkt, pkt_size);
4219 
4220 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4221 
4222 	return (cnt);
4223 }
4224 
4225 /*
4226  * ql_inq
4227  *	Issue inquiry command.
4228  *
4229  * Input:
4230  *	ha:		adapter state pointer.
4231  *	tq:		target queue pointer.
4232  *	lun:		LUN number.
4233  *	pkt:		command and buffer pointer.
4234  *	inq_len:	amount of inquiry data.
4235  *
4236  * Returns:
4237  *	ql local function return status code.
4238  *
4239  * Context:
4240  *	Kernel context.
4241  */
4242 static int
4243 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4244     uint8_t inq_len)
4245 {
4246 	dma_mem_t	dma_mem;
4247 	int		rval, retries;
4248 	uint32_t	pkt_size, cnt;
4249 	uint16_t	comp_status;
4250 	uint8_t		scsi_status_h, scsi_status_l, *reqs;
4251 	caddr_t		inq_data;
4252 
4253 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4254 
4255 	if (DRIVER_SUSPENDED(ha)) {
4256 		EL(ha, "failed, loop down\n");
4257 		return (QL_FUNCTION_TIMEOUT);
4258 	}
4259 
4260 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4261 	bzero((caddr_t)pkt, pkt_size);
4262 
4263 	inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4264 
4265 	/* Get DMA memory for the IOCB */
4266 	if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4267 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4268 		cmn_err(CE_WARN, "%s(%d): DMA memory "
4269 		    "alloc failed", QL_NAME, ha->instance);
4270 		return (0);
4271 	}
4272 
4273 	for (retries = 0; retries < 4; retries++) {
4274 		if (CFG_IST(ha, CFG_CTRL_242581)) {
4275 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4276 			pkt->cmd24.entry_count = 1;
4277 
4278 			/* Set LUN number */
4279 			pkt->cmd24.fcp_lun[2] = LSB(lun);
4280 			pkt->cmd24.fcp_lun[3] = MSB(lun);
4281 
4282 			/* Set N_port handle */
4283 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4284 
4285 			/* Set target ID */
4286 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4287 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4288 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4289 
4290 			/* Set ISP command timeout. */
4291 			pkt->cmd24.timeout = LE_16(15);
4292 
4293 			/* Load SCSI CDB */
4294 			pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4295 			pkt->cmd24.scsi_cdb[4] = inq_len;
4296 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4297 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4298 				    + cnt, 4);
4299 			}
4300 
4301 			/* Set tag queue control flags */
4302 			pkt->cmd24.task = TA_STAG;
4303 
4304 			/* Set transfer direction. */
4305 			pkt->cmd24.control_flags = CF_RD;
4306 
4307 			/* Set data segment count. */
4308 			pkt->cmd24.dseg_count = LE_16(1);
4309 
4310 			/* Load total byte count. */
4311 			pkt->cmd24.total_byte_count = LE_32(inq_len);
4312 
4313 			/* Load data descriptor. */
4314 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
4315 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4316 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
4317 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4318 			pkt->cmd24.dseg_0_length = LE_32(inq_len);
4319 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4320 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4321 			cnt = CMD_TYPE_3_DATA_SEGMENTS;
4322 
4323 			pkt->cmd3.entry_count = 1;
4324 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4325 				pkt->cmd3.target_l = LSB(tq->loop_id);
4326 				pkt->cmd3.target_h = MSB(tq->loop_id);
4327 			} else {
4328 				pkt->cmd3.target_h = LSB(tq->loop_id);
4329 			}
4330 			pkt->cmd3.lun_l = LSB(lun);
4331 			pkt->cmd3.lun_h = MSB(lun);
4332 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4333 			pkt->cmd3.timeout = LE_16(15);
4334 			pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4335 			pkt->cmd3.scsi_cdb[4] = inq_len;
4336 			pkt->cmd3.dseg_count = LE_16(1);
4337 			pkt->cmd3.byte_count = LE_32(inq_len);
4338 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4339 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4340 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4341 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4342 			pkt->cmd3.dseg_0_length = LE_32(inq_len);
4343 		} else {
4344 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4345 			cnt = CMD_TYPE_2_DATA_SEGMENTS;
4346 
4347 			pkt->cmd.entry_count = 1;
4348 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4349 				pkt->cmd.target_l = LSB(tq->loop_id);
4350 				pkt->cmd.target_h = MSB(tq->loop_id);
4351 			} else {
4352 				pkt->cmd.target_h = LSB(tq->loop_id);
4353 			}
4354 			pkt->cmd.lun_l = LSB(lun);
4355 			pkt->cmd.lun_h = MSB(lun);
4356 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4357 			pkt->cmd.timeout = LE_16(15);
4358 			pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4359 			pkt->cmd.scsi_cdb[4] = inq_len;
4360 			pkt->cmd.dseg_count = LE_16(1);
4361 			pkt->cmd.byte_count = LE_32(inq_len);
4362 			pkt->cmd.dseg_0_address = (uint32_t)
4363 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4364 			pkt->cmd.dseg_0_length = LE_32(inq_len);
4365 		}
4366 
4367 /*		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4368 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4369 		    sizeof (ql_mbx_iocb_t));
4370 
4371 		/* Sync in coming IOCB DMA buffer. */
4372 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4373 		    DDI_DMA_SYNC_FORKERNEL);
4374 		/* Copy in coming DMA data. */
4375 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4376 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4377 
4378 		if (CFG_IST(ha, CFG_CTRL_242581)) {
4379 			pkt->sts24.entry_status = (uint8_t)
4380 			    (pkt->sts24.entry_status & 0x3c);
4381 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4382 			scsi_status_h = pkt->sts24.scsi_status_h;
4383 			scsi_status_l = pkt->sts24.scsi_status_l;
4384 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4385 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4386 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4387 		} else {
4388 			pkt->sts.entry_status = (uint8_t)
4389 			    (pkt->sts.entry_status & 0x7e);
4390 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4391 			scsi_status_h = pkt->sts.scsi_status_h;
4392 			scsi_status_l = pkt->sts.scsi_status_l;
4393 			reqs = &pkt->sts.req_sense_data[0];
4394 		}
4395 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4396 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4397 			    pkt->sts.entry_status, tq->d_id.b24);
4398 			rval = QL_FUNCTION_PARAMETER_ERROR;
4399 		}
4400 
4401 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4402 		    scsi_status_l & STATUS_CHECK) {
4403 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4404 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4405 			    comp_status, scsi_status_h, scsi_status_l);
4406 
4407 			if (rval == QL_SUCCESS) {
4408 				if ((comp_status == CS_TIMEOUT) ||
4409 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4410 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4411 					rval = QL_FUNCTION_TIMEOUT;
4412 					break;
4413 				}
4414 				rval = QL_FUNCTION_FAILED;
4415 			}
4416 
4417 			if (scsi_status_l & STATUS_CHECK) {
4418 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4419 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4420 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4421 				    reqs[1], reqs[2], reqs[3], reqs[4],
4422 				    reqs[5], reqs[6], reqs[7], reqs[8],
4423 				    reqs[9], reqs[10], reqs[11], reqs[12],
4424 				    reqs[13], reqs[14], reqs[15], reqs[16],
4425 				    reqs[17]);
4426 			}
4427 		} else {
4428 			break;
4429 		}
4430 	}
4431 	ql_free_dma_resource(ha, &dma_mem);
4432 
4433 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4434 
4435 	return (rval);
4436 }
4437 
4438 /*
4439  * ql_get_buffer_data
4440  *	Copies data from user space to kernal buffer.
4441  *
4442  * Input:
4443  *	src:	User source buffer address.
4444  *	dst:	Kernal destination buffer address.
4445  *	size:	Amount of data.
4446  *	mode:	flags.
4447  *
4448  * Returns:
4449  *	Returns number of bytes transferred.
4450  *
4451  * Context:
4452  *	Kernel context.
4453  */
4454 static uint32_t
4455 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4456 {
4457 	uint32_t	cnt;
4458 
4459 	for (cnt = 0; cnt < size; cnt++) {
4460 		if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4461 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4462 			break;
4463 		}
4464 	}
4465 
4466 	return (cnt);
4467 }
4468 
4469 /*
4470  * ql_send_buffer_data
4471  *	Copies data from kernal buffer to user space.
4472  *
4473  * Input:
4474  *	src:	Kernal source buffer address.
4475  *	dst:	User destination buffer address.
4476  *	size:	Amount of data.
4477  *	mode:	flags.
4478  *
4479  * Returns:
4480  *	Returns number of bytes transferred.
4481  *
4482  * Context:
4483  *	Kernel context.
4484  */
4485 static uint32_t
4486 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4487 {
4488 	uint32_t	cnt;
4489 
4490 	for (cnt = 0; cnt < size; cnt++) {
4491 		if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4492 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4493 			break;
4494 		}
4495 	}
4496 
4497 	return (cnt);
4498 }
4499 
4500 /*
4501  * ql_find_port
4502  *	Locates device queue.
4503  *
4504  * Input:
4505  *	ha:	adapter state pointer.
4506  *	name:	device port name.
4507  *
4508  * Returns:
4509  *	Returns target queue pointer.
4510  *
4511  * Context:
4512  *	Kernel context.
4513  */
4514 static ql_tgt_t *
4515 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type)
4516 {
4517 	ql_link_t	*link;
4518 	ql_tgt_t	*tq;
4519 	uint16_t	index;
4520 
4521 	/* Scan port list for requested target */
4522 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4523 		for (link = ha->dev[index].first; link != NULL;
4524 		    link = link->next) {
4525 			tq = link->base_address;
4526 
4527 			switch (type) {
4528 			case QLNT_LOOP_ID:
4529 				if (bcmp(name, &tq->loop_id,
4530 				    sizeof (uint16_t)) == 0) {
4531 					return (tq);
4532 				}
4533 				break;
4534 			case QLNT_PORT:
4535 				if (bcmp(name, tq->port_name, 8) == 0) {
4536 					return (tq);
4537 				}
4538 				break;
4539 			case QLNT_NODE:
4540 				if (bcmp(name, tq->node_name, 8) == 0) {
4541 					return (tq);
4542 				}
4543 				break;
4544 			case QLNT_PID:
4545 				if (bcmp(name, tq->d_id.r.d_id,
4546 				    sizeof (tq->d_id.r.d_id)) == 0) {
4547 					return (tq);
4548 				}
4549 				break;
4550 			default:
4551 				EL(ha, "failed, invalid type=%d\n",  type);
4552 				return (NULL);
4553 			}
4554 		}
4555 	}
4556 
4557 	return (NULL);
4558 }
4559 
4560 /*
4561  * ql_24xx_flash_desc
4562  *	Get flash descriptor table.
4563  *
4564  * Input:
4565  *	ha:		adapter state pointer.
4566  *
4567  * Returns:
4568  *	ql local function return status code.
4569  *
4570  * Context:
4571  *	Kernel context.
4572  */
4573 static int
4574 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4575 {
4576 	uint32_t	cnt;
4577 	uint16_t	chksum, *bp, data;
4578 	int		rval;
4579 	flash_desc_t	*fdesc;
4580 	ql_xioctl_t	*xp = ha->xioctl;
4581 
4582 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4583 
4584 	if (ha->flash_desc_addr == 0) {
4585 		EL(ha, "desc ptr=0\n");
4586 		return (QL_FUNCTION_FAILED);
4587 	}
4588 
4589 	if ((fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP)) == NULL) {
4590 		EL(ha, "kmem_zalloc=null\n");
4591 		return (QL_MEMORY_ALLOC_FAILED);
4592 	}
4593 	rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t),
4594 	    ha->flash_desc_addr << 2);
4595 	if (rval != QL_SUCCESS) {
4596 		EL(ha, "read status=%xh\n", rval);
4597 		kmem_free(fdesc, sizeof (flash_desc_t));
4598 		return (rval);
4599 	}
4600 
4601 	chksum = 0;
4602 	bp = (uint16_t *)fdesc;
4603 	for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4604 		data = *bp++;
4605 		LITTLE_ENDIAN_16(&data);
4606 		chksum += data;
4607 	}
4608 
4609 	LITTLE_ENDIAN_32(&fdesc->flash_valid);
4610 	LITTLE_ENDIAN_16(&fdesc->flash_version);
4611 	LITTLE_ENDIAN_16(&fdesc->flash_len);
4612 	LITTLE_ENDIAN_16(&fdesc->flash_checksum);
4613 	LITTLE_ENDIAN_16(&fdesc->flash_manuf);
4614 	LITTLE_ENDIAN_16(&fdesc->flash_id);
4615 	LITTLE_ENDIAN_32(&fdesc->block_size);
4616 	LITTLE_ENDIAN_32(&fdesc->alt_block_size);
4617 	LITTLE_ENDIAN_32(&fdesc->flash_size);
4618 	LITTLE_ENDIAN_32(&fdesc->write_enable_data);
4619 	LITTLE_ENDIAN_32(&fdesc->read_timeout);
4620 
4621 	/* flash size in desc table is in 1024 bytes */
4622 	fdesc->flash_size = fdesc->flash_size * 0x400;
4623 
4624 	if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD ||
4625 	    fdesc->flash_version != FLASH_DESC_VERSION) {
4626 		EL(ha, "invalid descriptor table\n");
4627 		kmem_free(fdesc, sizeof (flash_desc_t));
4628 		return (QL_FUNCTION_FAILED);
4629 	}
4630 
4631 	bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t));
4632 	kmem_free(fdesc, sizeof (flash_desc_t));
4633 
4634 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4635 
4636 	return (QL_SUCCESS);
4637 }
4638 
4639 /*
4640  * ql_setup_flash
4641  *	Gets the manufacturer and id number of the flash chip, and
4642  *	sets up the size parameter.
4643  *
4644  * Input:
4645  *	ha:	adapter state pointer.
4646  *
4647  * Returns:
4648  *	int:	ql local function return status code.
4649  *
4650  * Context:
4651  *	Kernel context.
4652  */
4653 static int
4654 ql_setup_flash(ql_adapter_state_t *ha)
4655 {
4656 	ql_xioctl_t	*xp = ha->xioctl;
4657 	int		rval = QL_SUCCESS;
4658 
4659 	if (xp->fdesc.flash_size != 0) {
4660 		return (rval);
4661 	}
4662 
4663 	if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) {
4664 		return (QL_FUNCTION_FAILED);
4665 	}
4666 
4667 	if (CFG_IST(ha, CFG_CTRL_2581)) {
4668 		/*
4669 		 * Temporarily set the ha->xioctl->fdesc.flash_size to
4670 		 * 25xx flash size to avoid failing of ql_dump_focde.
4671 		 */
4672 		ha->xioctl->fdesc.flash_size = CFG_IST(ha, CFG_CTRL_25XX) ?
4673 		    0x200000 : 0x400000;
4674 		if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4675 			EL(ha, "flash desc table ok, exit\n");
4676 			return (rval);
4677 		}
4678 		(void) ql_24xx_flash_id(ha);
4679 
4680 	} else if (CFG_IST(ha, CFG_CTRL_242581)) {
4681 		(void) ql_24xx_flash_id(ha);
4682 	} else {
4683 		ql_flash_enable(ha);
4684 
4685 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4686 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4687 		ql_write_flash_byte(ha, 0x5555, 0x90);
4688 		xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4689 
4690 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
4691 			ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4692 			ql_write_flash_byte(ha, 0x5555, 0x55);
4693 			ql_write_flash_byte(ha, 0xaaaa, 0x90);
4694 			xp->fdesc.flash_id = (uint16_t)
4695 			    ql_read_flash_byte(ha, 0x0002);
4696 		} else {
4697 			ql_write_flash_byte(ha, 0x5555, 0xaa);
4698 			ql_write_flash_byte(ha, 0x2aaa, 0x55);
4699 			ql_write_flash_byte(ha, 0x5555, 0x90);
4700 			xp->fdesc.flash_id = (uint16_t)
4701 			    ql_read_flash_byte(ha, 0x0001);
4702 		}
4703 
4704 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4705 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4706 		ql_write_flash_byte(ha, 0x5555, 0xf0);
4707 
4708 		ql_flash_disable(ha);
4709 	}
4710 
4711 	/* Default flash descriptor table. */
4712 	xp->fdesc.write_statusreg_cmd = 1;
4713 	xp->fdesc.write_enable_bits = 0;
4714 	xp->fdesc.unprotect_sector_cmd = 0;
4715 	xp->fdesc.protect_sector_cmd = 0;
4716 	xp->fdesc.write_disable_bits = 0x9c;
4717 	xp->fdesc.block_size = 0x10000;
4718 	xp->fdesc.erase_cmd = 0xd8;
4719 
4720 	switch (xp->fdesc.flash_manuf) {
4721 	case AMD_FLASH:
4722 		switch (xp->fdesc.flash_id) {
4723 		case SPAN_FLASHID_2048K:
4724 			xp->fdesc.flash_size = 0x200000;
4725 			break;
4726 		case AMD_FLASHID_1024K:
4727 			xp->fdesc.flash_size = 0x100000;
4728 			break;
4729 		case AMD_FLASHID_512K:
4730 		case AMD_FLASHID_512Kt:
4731 		case AMD_FLASHID_512Kb:
4732 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
4733 				xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
4734 			} else {
4735 				xp->fdesc.flash_size = 0x80000;
4736 			}
4737 			break;
4738 		case AMD_FLASHID_128K:
4739 			xp->fdesc.flash_size = 0x20000;
4740 			break;
4741 		default:
4742 			rval = QL_FUNCTION_FAILED;
4743 			break;
4744 		}
4745 		break;
4746 	case ST_FLASH:
4747 		switch (xp->fdesc.flash_id) {
4748 		case ST_FLASHID_128K:
4749 			xp->fdesc.flash_size = 0x20000;
4750 			break;
4751 		case ST_FLASHID_512K:
4752 			xp->fdesc.flash_size = 0x80000;
4753 			break;
4754 		case ST_FLASHID_M25PXX:
4755 			if (xp->fdesc.flash_len == 0x14) {
4756 				xp->fdesc.flash_size = 0x100000;
4757 			} else if (xp->fdesc.flash_len == 0x15) {
4758 				xp->fdesc.flash_size = 0x200000;
4759 			} else {
4760 				rval = QL_FUNCTION_FAILED;
4761 			}
4762 			break;
4763 		default:
4764 			rval = QL_FUNCTION_FAILED;
4765 			break;
4766 		}
4767 		break;
4768 	case SST_FLASH:
4769 		switch (xp->fdesc.flash_id) {
4770 		case SST_FLASHID_128K:
4771 			xp->fdesc.flash_size = 0x20000;
4772 			break;
4773 		case SST_FLASHID_1024K_A:
4774 			xp->fdesc.flash_size = 0x100000;
4775 			xp->fdesc.block_size = 0x8000;
4776 			xp->fdesc.erase_cmd = 0x52;
4777 			break;
4778 		case SST_FLASHID_1024K:
4779 		case SST_FLASHID_1024K_B:
4780 			xp->fdesc.flash_size = 0x100000;
4781 			break;
4782 		case SST_FLASHID_2048K:
4783 			xp->fdesc.flash_size = 0x200000;
4784 			break;
4785 		default:
4786 			rval = QL_FUNCTION_FAILED;
4787 			break;
4788 		}
4789 		break;
4790 	case MXIC_FLASH:
4791 		switch (xp->fdesc.flash_id) {
4792 		case MXIC_FLASHID_512K:
4793 			xp->fdesc.flash_size = 0x80000;
4794 			break;
4795 		case MXIC_FLASHID_1024K:
4796 			xp->fdesc.flash_size = 0x100000;
4797 			break;
4798 		case MXIC_FLASHID_25LXX:
4799 			if (xp->fdesc.flash_len == 0x14) {
4800 				xp->fdesc.flash_size = 0x100000;
4801 			} else if (xp->fdesc.flash_len == 0x15) {
4802 				xp->fdesc.flash_size = 0x200000;
4803 			} else {
4804 				rval = QL_FUNCTION_FAILED;
4805 			}
4806 			break;
4807 		default:
4808 			rval = QL_FUNCTION_FAILED;
4809 			break;
4810 		}
4811 		break;
4812 	case ATMEL_FLASH:
4813 		switch (xp->fdesc.flash_id) {
4814 		case ATMEL_FLASHID_1024K:
4815 			xp->fdesc.flash_size = 0x100000;
4816 			xp->fdesc.write_disable_bits = 0xbc;
4817 			xp->fdesc.unprotect_sector_cmd = 0x39;
4818 			xp->fdesc.protect_sector_cmd = 0x36;
4819 			break;
4820 		default:
4821 			rval = QL_FUNCTION_FAILED;
4822 			break;
4823 		}
4824 		break;
4825 	case WINBOND_FLASH:
4826 		switch (xp->fdesc.flash_id) {
4827 		case WINBOND_FLASHID:
4828 			if (xp->fdesc.flash_len == 0x15) {
4829 				xp->fdesc.flash_size = 0x200000;
4830 			} else if (xp->fdesc.flash_len == 0x16) {
4831 				xp->fdesc.flash_size = 0x400000;
4832 			} else if (xp->fdesc.flash_len == 0x17) {
4833 				xp->fdesc.flash_size = 0x800000;
4834 			} else {
4835 				rval = QL_FUNCTION_FAILED;
4836 			}
4837 			break;
4838 		default:
4839 			rval = QL_FUNCTION_FAILED;
4840 			break;
4841 		}
4842 		break;
4843 	case INTEL_FLASH:
4844 		switch (xp->fdesc.flash_id) {
4845 		case INTEL_FLASHID:
4846 			if (xp->fdesc.flash_len == 0x11) {
4847 				xp->fdesc.flash_size = 0x200000;
4848 			} else if (xp->fdesc.flash_len == 0x12) {
4849 				xp->fdesc.flash_size = 0x400000;
4850 			} else if (xp->fdesc.flash_len == 0x13) {
4851 				xp->fdesc.flash_size = 0x800000;
4852 			} else {
4853 				rval = QL_FUNCTION_FAILED;
4854 			}
4855 			break;
4856 		default:
4857 			rval = QL_FUNCTION_FAILED;
4858 			break;
4859 		}
4860 		break;
4861 	default:
4862 		rval = QL_FUNCTION_FAILED;
4863 		break;
4864 	}
4865 
4866 	/* Try flash table later. */
4867 	if (rval != QL_SUCCESS && CFG_IST(ha, CFG_CTRL_242581)) {
4868 		EL(ha, "no default id\n");
4869 		return (QL_SUCCESS);
4870 	}
4871 
4872 	/*
4873 	 * hack for non std 2312 and 6312 boards. hardware people need to
4874 	 * use either the 128k flash chip (original), or something larger.
4875 	 * For driver purposes, we'll treat it as a 128k flash chip.
4876 	 */
4877 	if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
4878 	    ha->device_id == 0x6322) && (xp->fdesc.flash_size > 0x20000) &&
4879 	    (CFG_IST(ha, CFG_SBUS_CARD) ==  0)) {
4880 		EL(ha, "chip exceeds max size: %xh, using 128k\n",
4881 		    xp->fdesc.flash_size);
4882 		xp->fdesc.flash_size = 0x20000;
4883 	}
4884 
4885 	if (rval == QL_SUCCESS) {
4886 		EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n",
4887 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id,
4888 		    xp->fdesc.flash_size);
4889 	} else {
4890 		EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n",
4891 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id);
4892 	}
4893 
4894 	return (rval);
4895 }
4896 
4897 /*
4898  * ql_flash_fcode_load
4899  *	Loads fcode data into flash from application.
4900  *
4901  * Input:
4902  *	ha:	adapter state pointer.
4903  *	bp:	user buffer address.
4904  *	size:	user buffer size.
4905  *	mode:	flags
4906  *
4907  * Returns:
4908  *
4909  * Context:
4910  *	Kernel context.
4911  */
4912 static int
4913 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
4914     int mode)
4915 {
4916 	uint8_t		*bfp;
4917 	ql_xioctl_t	*xp = ha->xioctl;
4918 	int		rval = 0;
4919 
4920 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4921 
4922 	if (bsize > xp->fdesc.flash_size) {
4923 		EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
4924 		    xp->fdesc.flash_size);
4925 		return (ENOMEM);
4926 	}
4927 
4928 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
4929 		EL(ha, "failed, kmem_zalloc\n");
4930 		rval = ENOMEM;
4931 	} else  {
4932 		if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
4933 			EL(ha, "failed, ddi_copyin\n");
4934 			rval = EFAULT;
4935 		} else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) {
4936 			EL(ha, "failed, load_fcode\n");
4937 			rval = EFAULT;
4938 		} else {
4939 			/* Reset caches on all adapter instances. */
4940 			ql_update_flash_caches(ha);
4941 			rval = 0;
4942 		}
4943 		kmem_free(bfp, bsize);
4944 	}
4945 
4946 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4947 
4948 	return (rval);
4949 }
4950 
4951 /*
4952  * ql_load_fcode
4953  *	Loads fcode in to flash.
4954  *
4955  * Input:
4956  *	ha:	adapter state pointer.
4957  *	dp:	data pointer.
4958  *	size:	data length.
4959  *	addr:	flash byte address.
4960  *
4961  * Returns:
4962  *	ql local function return status code.
4963  *
4964  * Context:
4965  *	Kernel context.
4966  */
4967 int
4968 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr)
4969 {
4970 	uint32_t	cnt;
4971 	int		rval;
4972 
4973 	if (CFG_IST(ha, CFG_CTRL_242581)) {
4974 		return (ql_24xx_load_flash(ha, dp, size, addr));
4975 	}
4976 
4977 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4978 
4979 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
4980 		/*
4981 		 * sbus has an additional check to make
4982 		 * sure they don't brick the HBA.
4983 		 */
4984 		if (dp[0] != 0xf1) {
4985 			EL(ha, "failed, incorrect fcode for sbus\n");
4986 			return (QL_FUNCTION_PARAMETER_ERROR);
4987 		}
4988 	}
4989 
4990 	GLOBAL_HW_LOCK();
4991 
4992 	/* Enable Flash Read/Write. */
4993 	ql_flash_enable(ha);
4994 
4995 	/* Erase flash prior to write. */
4996 	rval = ql_erase_flash(ha, 0);
4997 
4998 	if (rval == QL_SUCCESS) {
4999 		/* Write fcode data to flash. */
5000 		for (cnt = 0; cnt < (uint32_t)size; cnt++) {
5001 			/* Allow other system activity. */
5002 			if (cnt % 0x1000 == 0) {
5003 				drv_usecwait(1);
5004 			}
5005 			rval = ql_program_flash_address(ha, addr++, *dp++);
5006 			if (rval != QL_SUCCESS)
5007 				break;
5008 		}
5009 	}
5010 
5011 	ql_flash_disable(ha);
5012 
5013 	GLOBAL_HW_UNLOCK();
5014 
5015 	if (rval != QL_SUCCESS) {
5016 		EL(ha, "failed, rval=%xh\n", rval);
5017 	} else {
5018 		/*EMPTY*/
5019 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5020 	}
5021 	return (rval);
5022 }
5023 
5024 /*
5025  * ql_flash_fcode_dump
5026  *	Dumps FLASH to application.
5027  *
5028  * Input:
5029  *	ha:	adapter state pointer.
5030  *	bp:	user buffer address.
5031  *	bsize:	user buffer size
5032  *	faddr:	flash byte address
5033  *	mode:	flags
5034  *
5035  * Returns:
5036  *
5037  * Context:
5038  *	Kernel context.
5039  */
5040 static int
5041 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5042     uint32_t faddr, int mode)
5043 {
5044 	uint8_t		*bfp;
5045 	int		rval;
5046 	ql_xioctl_t	*xp = ha->xioctl;
5047 
5048 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5049 
5050 	/* adjust max read size to flash size */
5051 	if (bsize > xp->fdesc.flash_size) {
5052 		EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5053 		    xp->fdesc.flash_size);
5054 		bsize = xp->fdesc.flash_size;
5055 	}
5056 
5057 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5058 		EL(ha, "failed, kmem_zalloc\n");
5059 		rval = ENOMEM;
5060 	} else {
5061 		/* Dump Flash fcode. */
5062 		rval = ql_dump_fcode(ha, bfp, bsize, faddr);
5063 
5064 		if (rval != QL_SUCCESS) {
5065 			EL(ha, "failed, dump_fcode = %x\n", rval);
5066 			rval = EFAULT;
5067 		} else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5068 			EL(ha, "failed, ddi_copyout\n");
5069 			rval = EFAULT;
5070 		} else {
5071 			rval = 0;
5072 		}
5073 		kmem_free(bfp, bsize);
5074 	}
5075 
5076 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5077 
5078 	return (rval);
5079 }
5080 
5081 /*
5082  * ql_dump_fcode
5083  *	Dumps fcode from flash.
5084  *
5085  * Input:
5086  *	ha:		adapter state pointer.
5087  *	dp:		data pointer.
5088  *	size:		data length in bytes.
5089  *	startpos:	starting position in flash (byte address).
5090  *
5091  * Returns:
5092  *	ql local function return status code.
5093  *
5094  * Context:
5095  *	Kernel context.
5096  *
5097  */
5098 int
5099 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5100     uint32_t startpos)
5101 {
5102 	uint32_t	cnt, data, addr;
5103 	uint8_t		bp[4];
5104 	int		rval = QL_SUCCESS;
5105 
5106 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5107 
5108 	/* make sure startpos+size doesn't exceed flash */
5109 	if (size + startpos > ha->xioctl->fdesc.flash_size) {
5110 		EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5111 		    size, startpos, ha->xioctl->fdesc.flash_size);
5112 		return (QL_FUNCTION_PARAMETER_ERROR);
5113 	}
5114 
5115 	if (CFG_IST(ha, CFG_CTRL_242581)) {
5116 		/* check start addr is 32 bit aligned for 24xx */
5117 		if ((startpos & 0x3) != 0) {
5118 			rval = ql_24xx_read_flash(ha,
5119 			    ha->flash_data_addr | startpos >> 2, &data);
5120 			if (rval != QL_SUCCESS) {
5121 				EL(ha, "failed2, rval = %xh\n", rval);
5122 				return (rval);
5123 			}
5124 			bp[0] = LSB(LSW(data));
5125 			bp[1] = MSB(LSW(data));
5126 			bp[2] = LSB(MSW(data));
5127 			bp[3] = MSB(MSW(data));
5128 			while (size && startpos & 0x3) {
5129 				*dp++ = bp[startpos & 0x3];
5130 				startpos++;
5131 				size--;
5132 			}
5133 			if (size == 0) {
5134 				QL_PRINT_9(CE_CONT, "(%d): done2\n",
5135 				    ha->instance);
5136 				return (rval);
5137 			}
5138 		}
5139 
5140 		/* adjust 24xx start addr for 32 bit words */
5141 		addr = startpos / 4 | ha->flash_data_addr;
5142 	}
5143 
5144 	GLOBAL_HW_LOCK();
5145 
5146 	/* Enable Flash Read/Write. */
5147 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
5148 		ql_flash_enable(ha);
5149 	}
5150 
5151 	/* Read fcode data from flash. */
5152 	while (size) {
5153 		/* Allow other system activity. */
5154 		if (size % 0x1000 == 0) {
5155 			ql_delay(ha, 100000);
5156 		}
5157 		if (CFG_IST(ha, CFG_CTRL_242581)) {
5158 			rval = ql_24xx_read_flash(ha, addr++, &data);
5159 			if (rval != QL_SUCCESS) {
5160 				break;
5161 			}
5162 			bp[0] = LSB(LSW(data));
5163 			bp[1] = MSB(LSW(data));
5164 			bp[2] = LSB(MSW(data));
5165 			bp[3] = MSB(MSW(data));
5166 			for (cnt = 0; size && cnt < 4; size--) {
5167 				*dp++ = bp[cnt++];
5168 			}
5169 		} else {
5170 			*dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++);
5171 			size--;
5172 		}
5173 	}
5174 
5175 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
5176 		ql_flash_disable(ha);
5177 	}
5178 
5179 	GLOBAL_HW_UNLOCK();
5180 
5181 	if (rval != QL_SUCCESS) {
5182 		EL(ha, "failed, rval = %xh\n", rval);
5183 	} else {
5184 		/*EMPTY*/
5185 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5186 	}
5187 	return (rval);
5188 }
5189 
5190 /*
5191  * ql_program_flash_address
5192  *	Program flash address.
5193  *
5194  * Input:
5195  *	ha:	adapter state pointer.
5196  *	addr:	flash byte address.
5197  *	data:	data to be written to flash.
5198  *
5199  * Returns:
5200  *	ql local function return status code.
5201  *
5202  * Context:
5203  *	Kernel context.
5204  */
5205 static int
5206 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr,
5207     uint8_t data)
5208 {
5209 	int	rval;
5210 
5211 	/* Write Program Command Sequence */
5212 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5213 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5214 		ql_write_flash_byte(ha, addr, data);
5215 	} else {
5216 		ql_write_flash_byte(ha, 0x5555, 0xaa);
5217 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
5218 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5219 		ql_write_flash_byte(ha, addr, data);
5220 	}
5221 
5222 	/* Wait for write to complete. */
5223 	rval = ql_poll_flash(ha, addr, data);
5224 
5225 	if (rval != QL_SUCCESS) {
5226 		EL(ha, "failed, rval=%xh\n", rval);
5227 	}
5228 	return (rval);
5229 }
5230 
5231 /*
5232  * ql_set_rnid_parameters
5233  *	Set RNID parameters.
5234  *
5235  * Input:
5236  *	ha:	adapter state pointer.
5237  *	cmd:	User space CT arguments pointer.
5238  *	mode:	flags.
5239  */
5240 static void
5241 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5242 {
5243 	EXT_SET_RNID_REQ	tmp_set;
5244 	EXT_RNID_DATA		*tmp_buf;
5245 	int			rval = 0;
5246 
5247 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5248 
5249 	if (DRIVER_SUSPENDED(ha)) {
5250 		EL(ha, "failed, LOOP_NOT_READY\n");
5251 		cmd->Status = EXT_STATUS_BUSY;
5252 		cmd->ResponseLen = 0;
5253 		return;
5254 	}
5255 
5256 	cmd->ResponseLen = 0; /* NO response to caller. */
5257 	if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5258 		/* parameter error */
5259 		EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5260 		    cmd->RequestLen);
5261 		cmd->Status = EXT_STATUS_INVALID_PARAM;
5262 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5263 		cmd->ResponseLen = 0;
5264 		return;
5265 	}
5266 
5267 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set,
5268 	    cmd->RequestLen, mode);
5269 	if (rval != 0) {
5270 		EL(ha, "failed, ddi_copyin\n");
5271 		cmd->Status = EXT_STATUS_COPY_ERR;
5272 		cmd->ResponseLen = 0;
5273 		return;
5274 	}
5275 
5276 	/* Allocate memory for command. */
5277 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5278 	if (tmp_buf == NULL) {
5279 		EL(ha, "failed, kmem_zalloc\n");
5280 		cmd->Status = EXT_STATUS_NO_MEMORY;
5281 		cmd->ResponseLen = 0;
5282 		return;
5283 	}
5284 
5285 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5286 	    (caddr_t)tmp_buf);
5287 	if (rval != QL_SUCCESS) {
5288 		/* error */
5289 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5290 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5291 		cmd->Status = EXT_STATUS_ERR;
5292 		cmd->ResponseLen = 0;
5293 		return;
5294 	}
5295 
5296 	/* Now set the requested params. */
5297 	bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5298 	bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5299 	bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5300 
5301 	rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5302 	    (caddr_t)tmp_buf);
5303 	if (rval != QL_SUCCESS) {
5304 		/* error */
5305 		EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5306 		cmd->Status = EXT_STATUS_ERR;
5307 		cmd->ResponseLen = 0;
5308 	}
5309 
5310 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5311 
5312 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5313 }
5314 
5315 /*
5316  * ql_get_rnid_parameters
5317  *	Get RNID parameters.
5318  *
5319  * Input:
5320  *	ha:	adapter state pointer.
5321  *	cmd:	User space CT arguments pointer.
5322  *	mode:	flags.
5323  */
5324 static void
5325 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5326 {
5327 	EXT_RNID_DATA	*tmp_buf;
5328 	uint32_t	rval;
5329 
5330 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5331 
5332 	if (DRIVER_SUSPENDED(ha)) {
5333 		EL(ha, "failed, LOOP_NOT_READY\n");
5334 		cmd->Status = EXT_STATUS_BUSY;
5335 		cmd->ResponseLen = 0;
5336 		return;
5337 	}
5338 
5339 	/* Allocate memory for command. */
5340 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5341 	if (tmp_buf == NULL) {
5342 		EL(ha, "failed, kmem_zalloc\n");
5343 		cmd->Status = EXT_STATUS_NO_MEMORY;
5344 		cmd->ResponseLen = 0;
5345 		return;
5346 	}
5347 
5348 	/* Send command */
5349 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5350 	    (caddr_t)tmp_buf);
5351 	if (rval != QL_SUCCESS) {
5352 		/* error */
5353 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5354 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5355 		cmd->Status = EXT_STATUS_ERR;
5356 		cmd->ResponseLen = 0;
5357 		return;
5358 	}
5359 
5360 	/* Copy the response */
5361 	if (ql_send_buffer_data((caddr_t)tmp_buf,
5362 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
5363 	    sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5364 		EL(ha, "failed, ddi_copyout\n");
5365 		cmd->Status = EXT_STATUS_COPY_ERR;
5366 		cmd->ResponseLen = 0;
5367 	} else {
5368 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5369 		cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5370 	}
5371 
5372 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5373 }
5374 
5375 /*
5376  * ql_reset_statistics
5377  *	Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5378  *
5379  * Input:
5380  *	ha:	adapter state pointer.
5381  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5382  *
5383  * Returns:
5384  *	None, request status indicated in cmd->Status.
5385  *
5386  * Context:
5387  *	Kernel context.
5388  */
5389 static int
5390 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5391 {
5392 	ql_xioctl_t		*xp = ha->xioctl;
5393 	int			rval = 0;
5394 
5395 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5396 
5397 	if (DRIVER_SUSPENDED(ha)) {
5398 		EL(ha, "failed, LOOP_NOT_READY\n");
5399 		cmd->Status = EXT_STATUS_BUSY;
5400 		cmd->ResponseLen = 0;
5401 		return (QL_FUNCTION_SUSPENDED);
5402 	}
5403 
5404 	rval = ql_reset_link_status(ha);
5405 	if (rval != QL_SUCCESS) {
5406 		EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5407 		cmd->Status = EXT_STATUS_MAILBOX;
5408 		cmd->DetailStatus = rval;
5409 		cmd->ResponseLen = 0;
5410 	}
5411 
5412 	TASK_DAEMON_LOCK(ha);
5413 	xp->IosRequested = 0;
5414 	xp->BytesRequested = 0;
5415 	xp->IOInputRequests = 0;
5416 	xp->IOOutputRequests = 0;
5417 	xp->IOControlRequests = 0;
5418 	xp->IOInputMByteCnt = 0;
5419 	xp->IOOutputMByteCnt = 0;
5420 	xp->IOOutputByteCnt = 0;
5421 	xp->IOInputByteCnt = 0;
5422 	TASK_DAEMON_UNLOCK(ha);
5423 
5424 	INTR_LOCK(ha);
5425 	xp->ControllerErrorCount = 0;
5426 	xp->DeviceErrorCount = 0;
5427 	xp->TotalLipResets = 0;
5428 	xp->TotalInterrupts = 0;
5429 	INTR_UNLOCK(ha);
5430 
5431 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5432 
5433 	return (rval);
5434 }
5435 
5436 /*
5437  * ql_get_statistics
5438  *	Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5439  *
5440  * Input:
5441  *	ha:	adapter state pointer.
5442  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5443  *	mode:	flags.
5444  *
5445  * Returns:
5446  *	None, request status indicated in cmd->Status.
5447  *
5448  * Context:
5449  *	Kernel context.
5450  */
5451 static void
5452 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5453 {
5454 	EXT_HBA_PORT_STAT	ps = {0};
5455 	ql_link_stats_t		*ls;
5456 	int			rval;
5457 	ql_xioctl_t		*xp = ha->xioctl;
5458 	int			retry = 10;
5459 
5460 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5461 
5462 	while (ha->task_daemon_flags &
5463 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5464 		ql_delay(ha, 10000000);	/* 10 second delay */
5465 
5466 		retry--;
5467 
5468 		if (retry == 0) { /* effectively 100 seconds */
5469 			EL(ha, "failed, LOOP_NOT_READY\n");
5470 			cmd->Status = EXT_STATUS_BUSY;
5471 			cmd->ResponseLen = 0;
5472 			return;
5473 		}
5474 	}
5475 
5476 	/* Allocate memory for command. */
5477 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5478 	if (ls == NULL) {
5479 		EL(ha, "failed, kmem_zalloc\n");
5480 		cmd->Status = EXT_STATUS_NO_MEMORY;
5481 		cmd->ResponseLen = 0;
5482 		return;
5483 	}
5484 
5485 	/*
5486 	 * I think these are supposed to be port statistics
5487 	 * the loop ID or port ID should be in cmd->Instance.
5488 	 */
5489 	rval = ql_get_status_counts(ha, (uint16_t)
5490 	    (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id),
5491 	    sizeof (ql_link_stats_t), (caddr_t)ls, 0);
5492 	if (rval != QL_SUCCESS) {
5493 		EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
5494 		    ha->loop_id);
5495 		cmd->Status = EXT_STATUS_MAILBOX;
5496 		cmd->DetailStatus = rval;
5497 		cmd->ResponseLen = 0;
5498 	} else {
5499 		ps.ControllerErrorCount = xp->ControllerErrorCount;
5500 		ps.DeviceErrorCount = xp->DeviceErrorCount;
5501 		ps.IoCount = (uint32_t)(xp->IOInputRequests +
5502 		    xp->IOOutputRequests + xp->IOControlRequests);
5503 		ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt +
5504 		    xp->IOOutputMByteCnt);
5505 		ps.LipResetCount = xp->TotalLipResets;
5506 		ps.InterruptCount = xp->TotalInterrupts;
5507 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5508 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5509 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5510 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5511 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5512 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5513 
5514 		rval = ddi_copyout((void *)&ps,
5515 		    (void *)(uintptr_t)cmd->ResponseAdr,
5516 		    sizeof (EXT_HBA_PORT_STAT), mode);
5517 		if (rval != 0) {
5518 			EL(ha, "failed, ddi_copyout\n");
5519 			cmd->Status = EXT_STATUS_COPY_ERR;
5520 			cmd->ResponseLen = 0;
5521 		} else {
5522 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5523 		}
5524 	}
5525 
5526 	kmem_free(ls, sizeof (ql_link_stats_t));
5527 
5528 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5529 }
5530 
5531 /*
5532  * ql_get_statistics_fc
5533  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5534  *
5535  * Input:
5536  *	ha:	adapter state pointer.
5537  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5538  *	mode:	flags.
5539  *
5540  * Returns:
5541  *	None, request status indicated in cmd->Status.
5542  *
5543  * Context:
5544  *	Kernel context.
5545  */
5546 static void
5547 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5548 {
5549 	EXT_HBA_PORT_STAT	ps = {0};
5550 	ql_link_stats_t		*ls;
5551 	int			rval;
5552 	uint16_t		qlnt;
5553 	EXT_DEST_ADDR		pextdestaddr;
5554 	uint8_t			*name;
5555 	ql_tgt_t		*tq = NULL;
5556 	int			retry = 10;
5557 
5558 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5559 
5560 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
5561 	    (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) {
5562 		EL(ha, "failed, ddi_copyin\n");
5563 		cmd->Status = EXT_STATUS_COPY_ERR;
5564 		cmd->ResponseLen = 0;
5565 		return;
5566 	}
5567 
5568 	qlnt = QLNT_PORT;
5569 	name = pextdestaddr.DestAddr.WWPN;
5570 
5571 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5572 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
5573 	    name[5], name[6], name[7]);
5574 
5575 	tq = ql_find_port(ha, name, qlnt);
5576 
5577 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5578 		EL(ha, "failed, fc_port not found\n");
5579 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5580 		cmd->ResponseLen = 0;
5581 		return;
5582 	}
5583 
5584 	while (ha->task_daemon_flags &
5585 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE  | DRIVER_STALL)) {
5586 		ql_delay(ha, 10000000);	/* 10 second delay */
5587 
5588 		retry--;
5589 
5590 		if (retry == 0) { /* effectively 100 seconds */
5591 			EL(ha, "failed, LOOP_NOT_READY\n");
5592 			cmd->Status = EXT_STATUS_BUSY;
5593 			cmd->ResponseLen = 0;
5594 			return;
5595 		}
5596 	}
5597 
5598 	/* Allocate memory for command. */
5599 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5600 	if (ls == NULL) {
5601 		EL(ha, "failed, kmem_zalloc\n");
5602 		cmd->Status = EXT_STATUS_NO_MEMORY;
5603 		cmd->ResponseLen = 0;
5604 		return;
5605 	}
5606 
5607 	rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t),
5608 	    (caddr_t)ls, 0);
5609 	if (rval != QL_SUCCESS) {
5610 		EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval,
5611 		    tq->d_id.b24);
5612 		cmd->Status = EXT_STATUS_MAILBOX;
5613 		cmd->DetailStatus = rval;
5614 		cmd->ResponseLen = 0;
5615 	} else {
5616 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5617 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5618 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5619 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5620 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5621 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5622 
5623 		rval = ddi_copyout((void *)&ps,
5624 		    (void *)(uintptr_t)cmd->ResponseAdr,
5625 		    sizeof (EXT_HBA_PORT_STAT), mode);
5626 
5627 		if (rval != 0) {
5628 			EL(ha, "failed, ddi_copyout\n");
5629 			cmd->Status = EXT_STATUS_COPY_ERR;
5630 			cmd->ResponseLen = 0;
5631 		} else {
5632 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5633 		}
5634 	}
5635 
5636 	kmem_free(ls, sizeof (ql_link_stats_t));
5637 
5638 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5639 }
5640 
5641 /*
5642  * ql_get_statistics_fc4
5643  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5644  *
5645  * Input:
5646  *	ha:	adapter state pointer.
5647  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5648  *	mode:	flags.
5649  *
5650  * Returns:
5651  *	None, request status indicated in cmd->Status.
5652  *
5653  * Context:
5654  *	Kernel context.
5655  */
5656 static void
5657 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5658 {
5659 	uint32_t		rval;
5660 	EXT_HBA_FC4STATISTICS	fc4stats = {0};
5661 	ql_xioctl_t		*xp = ha->xioctl;
5662 
5663 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5664 
5665 	fc4stats.InputRequests = xp->IOInputRequests;
5666 	fc4stats.OutputRequests = xp->IOOutputRequests;
5667 	fc4stats.ControlRequests = xp->IOControlRequests;
5668 	fc4stats.InputMegabytes = xp->IOInputMByteCnt;
5669 	fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
5670 
5671 	rval = ddi_copyout((void *)&fc4stats,
5672 	    (void *)(uintptr_t)cmd->ResponseAdr,
5673 	    sizeof (EXT_HBA_FC4STATISTICS), mode);
5674 
5675 	if (rval != 0) {
5676 		EL(ha, "failed, ddi_copyout\n");
5677 		cmd->Status = EXT_STATUS_COPY_ERR;
5678 		cmd->ResponseLen = 0;
5679 	} else {
5680 		cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
5681 	}
5682 
5683 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5684 }
5685 
5686 /*
5687  * ql_set_led_state
5688  *	Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
5689  *
5690  * Input:
5691  *	ha:	adapter state pointer.
5692  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5693  *	mode:	flags.
5694  *
5695  * Returns:
5696  *	None, request status indicated in cmd->Status.
5697  *
5698  * Context:
5699  *	Kernel context.
5700  */
5701 static void
5702 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5703 {
5704 	EXT_BEACON_CONTROL	bstate;
5705 	uint32_t		rval;
5706 	ql_xioctl_t		*xp = ha->xioctl;
5707 
5708 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5709 
5710 	if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
5711 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5712 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5713 		EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL,"
5714 		    " Len=%xh\n", cmd->RequestLen);
5715 		cmd->ResponseLen = 0;
5716 		return;
5717 	}
5718 
5719 	if (ha->device_id < 0x2300) {
5720 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5721 		cmd->DetailStatus = 0;
5722 		EL(ha, "done - failed, Invalid function for HBA model\n");
5723 		cmd->ResponseLen = 0;
5724 		return;
5725 	}
5726 
5727 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate,
5728 	    cmd->RequestLen, mode);
5729 
5730 	if (rval != 0) {
5731 		cmd->Status = EXT_STATUS_COPY_ERR;
5732 		EL(ha, "done -  failed, ddi_copyin\n");
5733 		return;
5734 	}
5735 
5736 	switch (bstate.State) {
5737 	case EXT_DEF_GRN_BLINK_OFF:	/* turn beacon off */
5738 		if (xp->ledstate.BeaconState == BEACON_OFF) {
5739 			/* not quite an error -- LED state is already off */
5740 			cmd->Status = EXT_STATUS_OK;
5741 			EL(ha, "LED off request -- LED is already off\n");
5742 			break;
5743 		}
5744 
5745 		xp->ledstate.BeaconState = BEACON_OFF;
5746 		xp->ledstate.LEDflags = LED_ALL_OFF;
5747 
5748 		if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
5749 			cmd->Status = EXT_STATUS_MAILBOX;
5750 		} else {
5751 			cmd->Status = EXT_STATUS_OK;
5752 		}
5753 		break;
5754 
5755 	case EXT_DEF_GRN_BLINK_ON:	/* turn beacon on */
5756 		if (xp->ledstate.BeaconState == BEACON_ON) {
5757 			/* not quite an error -- LED state is already on */
5758 			cmd->Status = EXT_STATUS_OK;
5759 			EL(ha, "LED on request  - LED is already on\n");
5760 			break;
5761 		}
5762 
5763 		if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
5764 			cmd->Status = EXT_STATUS_MAILBOX;
5765 			break;
5766 		}
5767 
5768 		if (CFG_IST(ha, CFG_CTRL_242581)) {
5769 			xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
5770 		} else {
5771 			xp->ledstate.LEDflags = LED_GREEN;
5772 		}
5773 		xp->ledstate.BeaconState = BEACON_ON;
5774 
5775 		cmd->Status = EXT_STATUS_OK;
5776 		break;
5777 	default:
5778 		cmd->Status = EXT_STATUS_ERR;
5779 		EL(ha, "failed, unknown state request %xh\n", bstate.State);
5780 		break;
5781 	}
5782 
5783 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5784 }
5785 
5786 /*
5787  * ql_get_led_state
5788  *	Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
5789  *
5790  * Input:
5791  *	ha:	adapter state pointer.
5792  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5793  *	mode:	flags.
5794  *
5795  * Returns:
5796  *	None, request status indicated in cmd->Status.
5797  *
5798  * Context:
5799  *	Kernel context.
5800  */
5801 static void
5802 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5803 {
5804 	EXT_BEACON_CONTROL	bstate = {0};
5805 	uint32_t		rval;
5806 	ql_xioctl_t		*xp = ha->xioctl;
5807 
5808 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5809 
5810 	if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
5811 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5812 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5813 		EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL,"
5814 		    "Len=%xh\n", cmd->ResponseLen);
5815 		cmd->ResponseLen = 0;
5816 		return;
5817 	}
5818 
5819 	if (ha->device_id < 0x2300) {
5820 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5821 		cmd->DetailStatus = 0;
5822 		EL(ha, "done - failed, Invalid function for HBA model\n");
5823 		cmd->ResponseLen = 0;
5824 		return;
5825 	}
5826 
5827 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
5828 		cmd->Status = EXT_STATUS_BUSY;
5829 		EL(ha, "done -  failed, isp abort active\n");
5830 		cmd->ResponseLen = 0;
5831 		return;
5832 	}
5833 
5834 	/* inform the user of the current beacon state (off or on) */
5835 	bstate.State = xp->ledstate.BeaconState;
5836 
5837 	rval = ddi_copyout((void *)&bstate,
5838 	    (void *)(uintptr_t)cmd->ResponseAdr,
5839 	    sizeof (EXT_BEACON_CONTROL), mode);
5840 
5841 	if (rval != 0) {
5842 		EL(ha, "failed, ddi_copyout\n");
5843 		cmd->Status = EXT_STATUS_COPY_ERR;
5844 		cmd->ResponseLen = 0;
5845 	} else {
5846 		cmd->Status = EXT_STATUS_OK;
5847 		cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
5848 	}
5849 
5850 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5851 }
5852 
5853 /*
5854  * ql_blink_led
5855  *	Determine the next state of the LED and drive it
5856  *
5857  * Input:
5858  *	ha:	adapter state pointer.
5859  *
5860  * Context:
5861  *	Interrupt context.
5862  */
5863 void
5864 ql_blink_led(ql_adapter_state_t *ha)
5865 {
5866 	uint32_t		nextstate;
5867 	ql_xioctl_t		*xp = ha->xioctl;
5868 
5869 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5870 
5871 	if (xp->ledstate.BeaconState == BEACON_ON) {
5872 		/* determine the next led state */
5873 		if (CFG_IST(ha, CFG_CTRL_242581)) {
5874 			nextstate = (xp->ledstate.LEDflags) &
5875 			    (~(RD32_IO_REG(ha, gpiod)));
5876 		} else {
5877 			nextstate = (xp->ledstate.LEDflags) &
5878 			    (~(RD16_IO_REG(ha, gpiod)));
5879 		}
5880 
5881 		/* turn the led on or off */
5882 		ql_drive_led(ha, nextstate);
5883 	}
5884 
5885 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5886 }
5887 
5888 /*
5889  * ql_drive_led
5890  *	drive the led's as determined by LEDflags
5891  *
5892  * Input:
5893  *	ha:		adapter state pointer.
5894  *	LEDflags:	LED flags
5895  *
5896  * Context:
5897  *	Kernel/Interrupt context.
5898  */
5899 static void
5900 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
5901 {
5902 
5903 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5904 
5905 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
5906 
5907 		uint16_t	gpio_enable, gpio_data;
5908 
5909 		/* setup to send new data */
5910 		gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
5911 		gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
5912 		WRT16_IO_REG(ha, gpioe, gpio_enable);
5913 
5914 		/* read current data and clear out old led data */
5915 		gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
5916 		gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
5917 
5918 		/* set in the new led data. */
5919 		gpio_data = (uint16_t)(gpio_data | LEDflags);
5920 
5921 		/* write out the new led data */
5922 		WRT16_IO_REG(ha, gpiod, gpio_data);
5923 
5924 	} else if (CFG_IST(ha, CFG_CTRL_242581)) {
5925 
5926 		uint32_t	gpio_data;
5927 
5928 		/* setup to send new data */
5929 		gpio_data = RD32_IO_REG(ha, gpiod);
5930 		gpio_data |= LED_MASK_UPDATE_24;
5931 		WRT32_IO_REG(ha, gpiod, gpio_data);
5932 
5933 		/* read current data and clear out old led data */
5934 		gpio_data = RD32_IO_REG(ha, gpiod);
5935 		gpio_data &= ~LED_MASK_COLORS_24;
5936 
5937 		/* set in the new led data */
5938 		gpio_data |= LEDflags;
5939 
5940 		/* write out the new led data */
5941 		WRT32_IO_REG(ha, gpiod, gpio_data);
5942 
5943 	} else {
5944 		EL(ha, "unsupported HBA: %xh", ha->device_id);
5945 	}
5946 
5947 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5948 }
5949 
5950 /*
5951  * ql_setup_led
5952  *	Setup LED for driver control
5953  *
5954  * Input:
5955  *	ha:	adapter state pointer.
5956  *
5957  * Context:
5958  *	Kernel/Interrupt context.
5959  */
5960 static uint32_t
5961 ql_setup_led(ql_adapter_state_t *ha)
5962 {
5963 	uint32_t	rval;
5964 	ql_mbx_data_t	mr;
5965 
5966 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5967 
5968 	/* decouple the LED control from the fw */
5969 	rval = ql_get_firmware_option(ha, &mr);
5970 	if (rval != QL_SUCCESS) {
5971 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
5972 		return (rval);
5973 	}
5974 
5975 	/* set the appropriate options */
5976 	mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
5977 
5978 	/* send it back to the firmware */
5979 	rval = ql_set_firmware_option(ha, &mr);
5980 	if (rval != QL_SUCCESS) {
5981 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
5982 		return (rval);
5983 	}
5984 
5985 	/* initally, turn the LED's off */
5986 	ql_drive_led(ha, LED_ALL_OFF);
5987 
5988 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5989 
5990 	return (rval);
5991 }
5992 
5993 /*
5994  * ql_wrapup_led
5995  *	Return LED control to the firmware
5996  *
5997  * Input:
5998  *	ha:	adapter state pointer.
5999  *
6000  * Context:
6001  *	Kernel/Interrupt context.
6002  */
6003 static uint32_t
6004 ql_wrapup_led(ql_adapter_state_t *ha)
6005 {
6006 	uint32_t	rval;
6007 	ql_mbx_data_t	mr;
6008 
6009 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6010 
6011 	/* Turn all LED's off */
6012 	ql_drive_led(ha, LED_ALL_OFF);
6013 
6014 	if (CFG_IST(ha, CFG_CTRL_242581)) {
6015 
6016 		uint32_t	gpio_data;
6017 
6018 		/* disable the LED update mask */
6019 		gpio_data = RD32_IO_REG(ha, gpiod);
6020 		gpio_data &= ~LED_MASK_UPDATE_24;
6021 
6022 		/* write out the data */
6023 		WRT32_IO_REG(ha, gpiod, gpio_data);
6024 	}
6025 
6026 	/* give LED control back to the f/w */
6027 	rval = ql_get_firmware_option(ha, &mr);
6028 	if (rval != QL_SUCCESS) {
6029 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
6030 		return (rval);
6031 	}
6032 
6033 	mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6034 
6035 	rval = ql_set_firmware_option(ha, &mr);
6036 	if (rval != QL_SUCCESS) {
6037 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
6038 		return (rval);
6039 	}
6040 
6041 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6042 
6043 	return (rval);
6044 }
6045 
6046 /*
6047  * ql_get_port_summary
6048  *	Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6049  *
6050  *	The EXT_IOCTL->RequestAdr points to a single
6051  *	UINT32 which identifies the device type.
6052  *
6053  * Input:
6054  *	ha:	adapter state pointer.
6055  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6056  *	mode:	flags.
6057  *
6058  * Returns:
6059  *	None, request status indicated in cmd->Status.
6060  *
6061  * Context:
6062  *	Kernel context.
6063  */
6064 static void
6065 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6066 {
6067 	EXT_DEVICEDATA		dd = {0};
6068 	EXT_DEVICEDATA		*uddp;
6069 	ql_link_t		*link;
6070 	ql_tgt_t		*tq;
6071 	uint32_t		rlen, dev_type, index;
6072 	int			rval = 0;
6073 	EXT_DEVICEDATAENTRY	*uddep, *ddep;
6074 
6075 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6076 
6077 	ddep = &dd.EntryList[0];
6078 
6079 	/*
6080 	 * Get the type of device the requestor is looking for.
6081 	 *
6082 	 * We ignore this for now.
6083 	 */
6084 	rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6085 	    (void *)&dev_type, sizeof (dev_type), mode);
6086 	if (rval != 0) {
6087 		cmd->Status = EXT_STATUS_COPY_ERR;
6088 		cmd->ResponseLen = 0;
6089 		EL(ha, "failed, ddi_copyin\n");
6090 		return;
6091 	}
6092 	/*
6093 	 * Count the number of entries to be returned. Count devices
6094 	 * that are offlline, but have been persistently bound.
6095 	 */
6096 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6097 		for (link = ha->dev[index].first; link != NULL;
6098 		    link = link->next) {
6099 			tq = link->base_address;
6100 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6101 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6102 				continue;	/* Skip this one */
6103 			}
6104 			dd.TotalDevices++;
6105 		}
6106 	}
6107 	/*
6108 	 * Compute the number of entries that can be returned
6109 	 * based upon the size of caller's response buffer.
6110 	 */
6111 	dd.ReturnListEntryCount = 0;
6112 	if (dd.TotalDevices == 0) {
6113 		rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY);
6114 	} else {
6115 		rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6116 		    (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6117 	}
6118 	if (rlen > cmd->ResponseLen) {
6119 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6120 		cmd->DetailStatus = rlen;
6121 		EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6122 		    rlen, cmd->ResponseLen);
6123 		cmd->ResponseLen = 0;
6124 		return;
6125 	}
6126 	cmd->ResponseLen = 0;
6127 	uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6128 	uddep = &uddp->EntryList[0];
6129 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6130 		for (link = ha->dev[index].first; link != NULL;
6131 		    link = link->next) {
6132 			tq = link->base_address;
6133 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6134 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6135 				continue;	/* Skip this one */
6136 			}
6137 
6138 			bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6139 
6140 			bcopy(tq->node_name, ddep->NodeWWN, 8);
6141 			bcopy(tq->port_name, ddep->PortWWN, 8);
6142 
6143 			ddep->PortID[0] = tq->d_id.b.domain;
6144 			ddep->PortID[1] = tq->d_id.b.area;
6145 			ddep->PortID[2] = tq->d_id.b.al_pa;
6146 
6147 			bcopy(tq->port_name,
6148 			    (caddr_t)&ddep->TargetAddress.Target, 8);
6149 
6150 			ddep->DeviceFlags = tq->flags;
6151 			ddep->LoopID = tq->loop_id;
6152 			QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, "
6153 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6154 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6155 			    ha->instance, ddep->TargetAddress.Target,
6156 			    ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6157 			    ddep->NodeWWN[2], ddep->NodeWWN[3],
6158 			    ddep->NodeWWN[4], ddep->NodeWWN[5],
6159 			    ddep->NodeWWN[6], ddep->NodeWWN[7],
6160 			    ddep->PortWWN[0], ddep->PortWWN[1],
6161 			    ddep->PortWWN[2], ddep->PortWWN[3],
6162 			    ddep->PortWWN[4], ddep->PortWWN[5],
6163 			    ddep->PortWWN[6], ddep->PortWWN[7]);
6164 			rval = ddi_copyout((void *)ddep, (void *)uddep,
6165 			    sizeof (EXT_DEVICEDATAENTRY), mode);
6166 
6167 			if (rval != 0) {
6168 				cmd->Status = EXT_STATUS_COPY_ERR;
6169 				cmd->ResponseLen = 0;
6170 				EL(ha, "failed, ddi_copyout\n");
6171 				break;
6172 			}
6173 			dd.ReturnListEntryCount++;
6174 			uddep++;
6175 			cmd->ResponseLen += (uint32_t)
6176 			    sizeof (EXT_DEVICEDATAENTRY);
6177 		}
6178 	}
6179 	rval = ddi_copyout((void *)&dd, (void *)uddp,
6180 	    sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6181 
6182 	if (rval != 0) {
6183 		cmd->Status = EXT_STATUS_COPY_ERR;
6184 		cmd->ResponseLen = 0;
6185 		EL(ha, "failed, ddi_copyout-2\n");
6186 	} else {
6187 		cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6188 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6189 	}
6190 }
6191 
6192 /*
6193  * ql_get_target_id
6194  *	Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6195  *
6196  * Input:
6197  *	ha:	adapter state pointer.
6198  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6199  *	mode:	flags.
6200  *
6201  * Returns:
6202  *	None, request status indicated in cmd->Status.
6203  *
6204  * Context:
6205  *	Kernel context.
6206  */
6207 static void
6208 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6209 {
6210 	uint32_t		rval;
6211 	uint16_t		qlnt;
6212 	EXT_DEST_ADDR		extdestaddr = {0};
6213 	uint8_t			*name;
6214 	uint8_t			wwpn[EXT_DEF_WWN_NAME_SIZE];
6215 	ql_tgt_t		*tq;
6216 
6217 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6218 
6219 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6220 	    (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) {
6221 		EL(ha, "failed, ddi_copyin\n");
6222 		cmd->Status = EXT_STATUS_COPY_ERR;
6223 		cmd->ResponseLen = 0;
6224 		return;
6225 	}
6226 
6227 	qlnt = QLNT_PORT;
6228 	name = wwpn;
6229 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6230 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
6231 	    name[5], name[6], name[7]);
6232 
6233 	tq = ql_find_port(ha, name, qlnt);
6234 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6235 		EL(ha, "failed, fc_port not found\n");
6236 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6237 		cmd->ResponseLen = 0;
6238 		return;
6239 	}
6240 
6241 	bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6242 
6243 	rval = ddi_copyout((void *)&extdestaddr,
6244 	    (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode);
6245 	if (rval != 0) {
6246 		EL(ha, "failed, ddi_copyout\n");
6247 		cmd->Status = EXT_STATUS_COPY_ERR;
6248 		cmd->ResponseLen = 0;
6249 	}
6250 
6251 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6252 }
6253 
6254 /*
6255  * ql_setup_fcache
6256  *	Populates selected flash sections into the cache
6257  *
6258  * Input:
6259  *	ha = adapter state pointer.
6260  *
6261  * Returns:
6262  *	ql local function return status code.
6263  *
6264  * Context:
6265  *	Kernel context.
6266  *
6267  * Note:
6268  *	Driver must be in stalled state prior to entering or
6269  *	add code to this function prior to calling ql_setup_flash()
6270  */
6271 int
6272 ql_setup_fcache(ql_adapter_state_t *ha)
6273 {
6274 	int		rval;
6275 	uint32_t	freadpos = 0;
6276 	uint32_t	fw_done = 0;
6277 	ql_fcache_t	*head = NULL;
6278 	ql_fcache_t	*tail = NULL;
6279 	ql_fcache_t	*ftmp;
6280 
6281 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6282 
6283 	CACHE_LOCK(ha);
6284 
6285 	/* If we already have populated it, rtn */
6286 	if (ha->fcache != NULL) {
6287 		CACHE_UNLOCK(ha);
6288 		EL(ha, "buffer already populated\n");
6289 		return (QL_SUCCESS);
6290 	}
6291 
6292 	ql_flash_nvram_defaults(ha);
6293 
6294 	if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6295 		CACHE_UNLOCK(ha);
6296 		EL(ha, "unable to setup flash; rval=%xh\n", rval);
6297 		return (rval);
6298 	}
6299 
6300 	while (freadpos != 0xffffffff) {
6301 
6302 		/* Allocate & populate this node */
6303 
6304 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6305 			EL(ha, "node alloc failed\n");
6306 			rval = QL_FUNCTION_FAILED;
6307 			break;
6308 		}
6309 
6310 		/* link in the new node */
6311 		if (head == NULL) {
6312 			head = tail = ftmp;
6313 		} else {
6314 			tail->next = ftmp;
6315 			tail = ftmp;
6316 		}
6317 
6318 		/* Do the firmware node first for 24xx/25xx's */
6319 		if (fw_done == 0) {
6320 			if (CFG_IST(ha, CFG_CTRL_242581)) {
6321 				freadpos = ha->flash_fw_addr << 2;
6322 			}
6323 			fw_done = 1;
6324 		}
6325 
6326 		if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6327 		    freadpos)) != QL_SUCCESS) {
6328 			EL(ha, "failed, 24xx dump_fcode"
6329 			    " pos=%xh rval=%xh\n", freadpos, rval);
6330 			rval = QL_FUNCTION_FAILED;
6331 			break;
6332 		}
6333 
6334 		/* checkout the pci data / format */
6335 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6336 			EL(ha, "flash header incorrect\n");
6337 			rval = QL_FUNCTION_FAILED;
6338 			break;
6339 		}
6340 	}
6341 
6342 	if (rval != QL_SUCCESS) {
6343 		/* release all resources we have */
6344 		ftmp = head;
6345 		while (ftmp != NULL) {
6346 			tail = ftmp->next;
6347 			kmem_free(ftmp->buf, FBUFSIZE);
6348 			kmem_free(ftmp, sizeof (ql_fcache_t));
6349 			ftmp = tail;
6350 		}
6351 
6352 		EL(ha, "failed, done\n");
6353 	} else {
6354 		ha->fcache = head;
6355 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6356 	}
6357 	CACHE_UNLOCK(ha);
6358 
6359 	return (rval);
6360 }
6361 
6362 /*
6363  * ql_update_fcache
6364  *	re-populates updated flash into the fcache. If
6365  *	fcache does not exist (e.g., flash was empty/invalid on
6366  *	boot), this routine will create and the populate it.
6367  *
6368  * Input:
6369  *	ha	= adapter state pointer.
6370  *	*bpf 	= Pointer to flash buffer.
6371  *	bsize	= Size of flash buffer.
6372  *
6373  * Returns:
6374  *
6375  * Context:
6376  *	Kernel context.
6377  */
6378 void
6379 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6380 {
6381 	int		rval = QL_SUCCESS;
6382 	uint32_t	freadpos = 0;
6383 	uint32_t	fw_done = 0;
6384 	ql_fcache_t	*head = NULL;
6385 	ql_fcache_t	*tail = NULL;
6386 	ql_fcache_t	*ftmp;
6387 
6388 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6389 
6390 	while (freadpos != 0xffffffff) {
6391 
6392 		/* Allocate & populate this node */
6393 
6394 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6395 			EL(ha, "node alloc failed\n");
6396 			rval = QL_FUNCTION_FAILED;
6397 			break;
6398 		}
6399 
6400 		/* link in the new node */
6401 		if (head == NULL) {
6402 			head = tail = ftmp;
6403 		} else {
6404 			tail->next = ftmp;
6405 			tail = ftmp;
6406 		}
6407 
6408 		/* Do the firmware node first for 24xx's */
6409 		if (fw_done == 0) {
6410 			if (CFG_IST(ha, CFG_CTRL_242581)) {
6411 				freadpos = ha->flash_fw_addr << 2;
6412 			}
6413 			fw_done = 1;
6414 		}
6415 
6416 		/* read in first FBUFSIZE bytes of this flash section */
6417 		if (freadpos+FBUFSIZE > bsize) {
6418 			EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6419 			    freadpos, bsize);
6420 			rval = QL_FUNCTION_FAILED;
6421 			break;
6422 		}
6423 		bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE);
6424 
6425 		/* checkout the pci data / format */
6426 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6427 			EL(ha, "flash header incorrect\n");
6428 			rval = QL_FUNCTION_FAILED;
6429 			break;
6430 		}
6431 	}
6432 
6433 	if (rval != QL_SUCCESS) {
6434 		/*
6435 		 * release all resources we have
6436 		 */
6437 		ql_fcache_rel(head);
6438 		EL(ha, "failed, done\n");
6439 	} else {
6440 		/*
6441 		 * Release previous fcache resources and update with new
6442 		 */
6443 		CACHE_LOCK(ha);
6444 		ql_fcache_rel(ha->fcache);
6445 		ha->fcache = head;
6446 		CACHE_UNLOCK(ha);
6447 
6448 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6449 	}
6450 }
6451 
6452 /*
6453  * ql_setup_fnode
6454  *	Allocates fcache node
6455  *
6456  * Input:
6457  *	ha = adapter state pointer.
6458  *	node = point to allocated fcache node (NULL = failed)
6459  *
6460  * Returns:
6461  *
6462  * Context:
6463  *	Kernel context.
6464  *
6465  * Note:
6466  *	Driver must be in stalled state prior to entering or
6467  *	add code to this function prior to calling ql_setup_flash()
6468  */
6469 static ql_fcache_t *
6470 ql_setup_fnode(ql_adapter_state_t *ha)
6471 {
6472 	ql_fcache_t	*fnode = NULL;
6473 
6474 	if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t),
6475 	    KM_SLEEP))) == NULL) {
6476 		EL(ha, "fnode alloc failed\n");
6477 		fnode = NULL;
6478 	} else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE,
6479 	    KM_SLEEP))) == NULL) {
6480 		EL(ha, "buf alloc failed\n");
6481 		kmem_free(fnode, sizeof (ql_fcache_t));
6482 		fnode = NULL;
6483 	} else {
6484 		fnode->buflen = FBUFSIZE;
6485 	}
6486 
6487 	return (fnode);
6488 }
6489 
6490 /*
6491  * ql_fcache_rel
6492  *	Releases the fcache resources
6493  *
6494  * Input:
6495  *	ha	= adapter state pointer.
6496  *	head	= Pointer to fcache linked list
6497  *
6498  * Returns:
6499  *
6500  * Context:
6501  *	Kernel context.
6502  *
6503  */
6504 void
6505 ql_fcache_rel(ql_fcache_t *head)
6506 {
6507 	ql_fcache_t	*ftmp = head;
6508 	ql_fcache_t	*tail;
6509 
6510 	/* release all resources we have */
6511 	while (ftmp != NULL) {
6512 		tail = ftmp->next;
6513 		kmem_free(ftmp->buf, FBUFSIZE);
6514 		kmem_free(ftmp, sizeof (ql_fcache_t));
6515 		ftmp = tail;
6516 	}
6517 }
6518 
6519 /*
6520  * ql_update_flash_caches
6521  *	Updates driver flash caches
6522  *
6523  * Input:
6524  *	ha:	adapter state pointer.
6525  *
6526  * Context:
6527  *	Kernel context.
6528  */
6529 static void
6530 ql_update_flash_caches(ql_adapter_state_t *ha)
6531 {
6532 	uint32_t		len;
6533 	ql_link_t		*link;
6534 	ql_adapter_state_t	*ha2;
6535 
6536 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6537 
6538 	/* Get base path length. */
6539 	for (len = (uint32_t)strlen(ha->devpath); len; len--) {
6540 		if (ha->devpath[len] == ',' ||
6541 		    ha->devpath[len] == '@') {
6542 			break;
6543 		}
6544 	}
6545 
6546 	/* Reset fcache on all adapter instances. */
6547 	for (link = ql_hba.first; link != NULL; link = link->next) {
6548 		ha2 = link->base_address;
6549 
6550 		if (strncmp(ha->devpath, ha2->devpath, len) != 0) {
6551 			continue;
6552 		}
6553 
6554 		CACHE_LOCK(ha2);
6555 		ql_fcache_rel(ha2->fcache);
6556 		ha2->fcache = NULL;
6557 
6558 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6559 			if (ha2->vcache != NULL) {
6560 				kmem_free(ha2->vcache, QL_24XX_VPD_SIZE);
6561 				ha2->vcache = NULL;
6562 			}
6563 		}
6564 		CACHE_UNLOCK(ha2);
6565 
6566 		(void) ql_setup_fcache(ha2);
6567 	}
6568 
6569 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6570 }
6571 
6572 /*
6573  * ql_get_fbuf
6574  *	Search the fcache list for the type specified
6575  *
6576  * Input:
6577  *	fptr	= Pointer to fcache linked list
6578  *	ftype	= Type of image to be returned.
6579  *
6580  * Returns:
6581  *	Pointer to ql_fcache_t.
6582  *	NULL means not found.
6583  *
6584  * Context:
6585  *	Kernel context.
6586  *
6587  *
6588  */
6589 ql_fcache_t *
6590 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype)
6591 {
6592 	while (fptr != NULL) {
6593 		/* does this image meet criteria? */
6594 		if (ftype & fptr->type) {
6595 			break;
6596 		}
6597 		fptr = fptr->next;
6598 	}
6599 	return (fptr);
6600 }
6601 
6602 /*
6603  * ql_check_pci
6604  *
6605  *	checks the passed buffer for a valid pci signature and
6606  *	expected (and in range) pci length values.
6607  *
6608  *	For firmware type, a pci header is added since the image in
6609  *	the flash does not have one (!!!).
6610  *
6611  *	On successful pci check, nextpos adjusted to next pci header.
6612  *
6613  * Returns:
6614  *	-1 --> last pci image
6615  *	0 --> pci header valid
6616  *	1 --> pci header invalid.
6617  *
6618  * Context:
6619  *	Kernel context.
6620  */
6621 static int
6622 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
6623 {
6624 	pci_header_t	*pcih;
6625 	pci_data_t	*pcid;
6626 	uint32_t	doff;
6627 	uint8_t		*pciinfo;
6628 
6629 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6630 
6631 	if (fcache != NULL) {
6632 		pciinfo = fcache->buf;
6633 	} else {
6634 		EL(ha, "failed, null fcache ptr passed\n");
6635 		return (1);
6636 	}
6637 
6638 	if (pciinfo == NULL) {
6639 		EL(ha, "failed, null pciinfo ptr passed\n");
6640 		return (1);
6641 	}
6642 
6643 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
6644 		caddr_t	bufp;
6645 		uint_t	len;
6646 
6647 		if (pciinfo[0] != SBUS_CODE_FCODE) {
6648 			EL(ha, "failed, unable to detect sbus fcode\n");
6649 			return (1);
6650 		}
6651 		fcache->type = FTYPE_FCODE;
6652 
6653 		/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
6654 		if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
6655 		    PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
6656 		    DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
6657 		    (int *)&len) == DDI_PROP_SUCCESS) {
6658 
6659 			(void) snprintf(fcache->verstr,
6660 			    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
6661 			kmem_free(bufp, len);
6662 		}
6663 
6664 		*nextpos = 0xffffffff;
6665 
6666 		QL_PRINT_9(CE_CONT, "(%d): CFG_SBUS_CARD, done\n",
6667 		    ha->instance);
6668 
6669 		return (0);
6670 	}
6671 
6672 	if (*nextpos == ha->flash_fw_addr << 2) {
6673 
6674 		pci_header_t	fwh = {0};
6675 		pci_data_t	fwd = {0};
6676 		uint8_t		*buf, *bufp;
6677 
6678 		/*
6679 		 * Build a pci header for the firmware module
6680 		 */
6681 		if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
6682 		    NULL) {
6683 			EL(ha, "failed, unable to allocate buffer\n");
6684 			return (1);
6685 		}
6686 
6687 		fwh.signature[0] = PCI_HEADER0;
6688 		fwh.signature[1] = PCI_HEADER1;
6689 		fwh.dataoffset[0] = LSB(sizeof (pci_header_t));
6690 		fwh.dataoffset[1] = MSB(sizeof (pci_header_t));
6691 
6692 		fwd.signature[0] = 'P';
6693 		fwd.signature[1] = 'C';
6694 		fwd.signature[2] = 'I';
6695 		fwd.signature[3] = 'R';
6696 		fwd.codetype = PCI_CODE_FW;
6697 		fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
6698 		fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
6699 
6700 		bufp = buf;
6701 		bcopy(&fwh, bufp, sizeof (pci_header_t));
6702 		bufp += sizeof (pci_header_t);
6703 		bcopy(&fwd, bufp, sizeof (pci_data_t));
6704 		bufp += sizeof (pci_data_t);
6705 
6706 		bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
6707 		    sizeof (pci_data_t)));
6708 		bcopy(buf, fcache->buf, FBUFSIZE);
6709 
6710 		fcache->type = FTYPE_FW;
6711 
6712 		(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6713 		    "%d.%02d.%02d", fcache->buf[19], fcache->buf[23],
6714 		    fcache->buf[27]);
6715 
6716 		*nextpos = CFG_IST(ha, CFG_CTRL_81XX) ? 0x200000 : 0;
6717 		kmem_free(buf, FBUFSIZE);
6718 
6719 		QL_PRINT_9(CE_CONT, "(%d): FTYPE_FW, done\n", ha->instance);
6720 
6721 		return (0);
6722 	}
6723 
6724 	/* get to the pci header image length */
6725 	pcih = (pci_header_t *)pciinfo;
6726 
6727 	doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
6728 
6729 	/* some header section sanity check */
6730 	if (pcih->signature[0] != PCI_HEADER0 ||
6731 	    pcih->signature[1] != PCI_HEADER1 || doff > 50) {
6732 		EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
6733 		    pcih->signature[0], pcih->signature[1], doff);
6734 		return (1);
6735 	}
6736 
6737 	pcid = (pci_data_t *)(pciinfo + doff);
6738 
6739 	/* a slight sanity data section check */
6740 	if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
6741 	    pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
6742 		EL(ha, "failed, data sig mismatch!\n");
6743 		return (1);
6744 	}
6745 
6746 	if (pcid->indicator == PCI_IND_LAST_IMAGE) {
6747 		EL(ha, "last image\n");
6748 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6749 			ql_flash_layout_table(ha, *nextpos +
6750 			    (pcid->imagelength[0] | (pcid->imagelength[1] <<
6751 			    8)) * PCI_SECTOR_SIZE);
6752 			(void) ql_24xx_flash_desc(ha);
6753 		}
6754 		*nextpos = 0xffffffff;
6755 	} else {
6756 		/* adjust the next flash read start position */
6757 		*nextpos += (pcid->imagelength[0] |
6758 		    (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
6759 	}
6760 
6761 	switch (pcid->codetype) {
6762 	case PCI_CODE_X86PC:
6763 		fcache->type = FTYPE_BIOS;
6764 		break;
6765 	case PCI_CODE_FCODE:
6766 		fcache->type = FTYPE_FCODE;
6767 		break;
6768 	case PCI_CODE_EFI:
6769 		fcache->type = FTYPE_EFI;
6770 		break;
6771 	case PCI_CODE_HPPA:
6772 		fcache->type = FTYPE_HPPA;
6773 		break;
6774 	default:
6775 		fcache->type = FTYPE_UNKNOWN;
6776 		break;
6777 	}
6778 
6779 	(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6780 	    "%d.%02d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
6781 
6782 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6783 
6784 	return (0);
6785 }
6786 
6787 /*
6788  * ql_flash_layout_table
6789  *	Obtains flash addresses from table
6790  *
6791  * Input:
6792  *	ha:		adapter state pointer.
6793  *	flt_paddr:	flash layout pointer address.
6794  *
6795  * Context:
6796  *	Kernel context.
6797  */
6798 static void
6799 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr)
6800 {
6801 	ql_flt_ptr_t	*fptr;
6802 	ql_flt_hdr_t	*fhdr;
6803 	ql_flt_region_t	*frgn;
6804 	uint8_t		*bp, *eaddr;
6805 	int		rval;
6806 	uint32_t	len, faddr, cnt;
6807 	uint16_t	chksum, w16;
6808 
6809 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6810 
6811 	/* Process flash layout table header */
6812 	if ((bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP)) == NULL) {
6813 		EL(ha, "kmem_zalloc=null\n");
6814 		return;
6815 	}
6816 
6817 	/* Process pointer to flash layout table */
6818 	if ((rval = ql_dump_fcode(ha, bp, sizeof (ql_flt_ptr_t), flt_paddr)) !=
6819 	    QL_SUCCESS) {
6820 		EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr,
6821 		    rval);
6822 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6823 		return;
6824 	}
6825 	fptr = (ql_flt_ptr_t *)bp;
6826 
6827 	/* Verify pointer to flash layout table. */
6828 	for (chksum = 0, cnt = 0; cnt < sizeof (ql_flt_ptr_t); cnt += 2) {
6829 		w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6830 		chksum += w16;
6831 	}
6832 	if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' ||
6833 	    fptr->sig[2] != 'L' || fptr->sig[3] != 'T') {
6834 		EL(ha, "ptr chksum=%xh, sig=%c%c%c%c\n", chksum, fptr->sig[0],
6835 		    fptr->sig[1], fptr->sig[2], fptr->sig[3]);
6836 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6837 		return;
6838 	}
6839 	faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2],
6840 	    fptr->addr[3]);
6841 
6842 	/* Process flash layout table. */
6843 	if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) !=
6844 	    QL_SUCCESS) {
6845 		EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval);
6846 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6847 		return;
6848 	}
6849 	fhdr = (ql_flt_hdr_t *)bp;
6850 
6851 	/* Verify flash layout table. */
6852 	len = (uint16_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) +
6853 	    sizeof (ql_flt_hdr_t));
6854 	if (len > FLASH_LAYOUT_TABLE_SIZE) {
6855 		chksum = 0xffff;
6856 	} else {
6857 		for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
6858 			w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6859 			chksum += w16;
6860 		}
6861 	}
6862 	w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]);
6863 	if (chksum != 0 || w16 != 1) {
6864 		EL(ha, "table chksum=%xh, version=%d\n", chksum, w16);
6865 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6866 		return;
6867 	}
6868 
6869 	/* Process flash layout table regions */
6870 	eaddr = bp + sizeof (ql_flt_hdr_t) + len;
6871 	for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t));
6872 	    (uint8_t *)frgn < eaddr; frgn++) {
6873 		faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1],
6874 		    frgn->beg_addr[2], frgn->beg_addr[3]);
6875 		faddr >>= 2;
6876 
6877 		switch (frgn->region) {
6878 		case FLASH_FW_REGION:
6879 			ha->flash_fw_addr = faddr;
6880 			QL_PRINT_9(CE_CONT, "(%d): flash_fw_addr=%xh\n",
6881 			    ha->instance, faddr);
6882 			break;
6883 		case FLASH_GOLDEN_FW_REGION:
6884 			ha->flash_golden_fw_addr = faddr;
6885 			QL_PRINT_9(CE_CONT, "(%d): flash_golden_fw_addr=%xh\n",
6886 			    ha->instance, faddr);
6887 			break;
6888 		case FLASH_VPD_0_REGION:
6889 			if (!(ha->flags & FUNCTION_1)) {
6890 				ha->flash_vpd_addr = faddr;
6891 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
6892 				    "\n", ha->instance, faddr);
6893 			}
6894 			break;
6895 		case FLASH_NVRAM_0_REGION:
6896 			if (!(ha->flags & FUNCTION_1)) {
6897 				ha->flash_nvram_addr = faddr;
6898 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
6899 				    "%xh\n", ha->instance, faddr);
6900 			}
6901 			break;
6902 		case FLASH_VPD_1_REGION:
6903 			if (ha->flags & FUNCTION_1) {
6904 				ha->flash_vpd_addr = faddr;
6905 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
6906 				    "\n", ha->instance, faddr);
6907 			}
6908 			break;
6909 		case FLASH_NVRAM_1_REGION:
6910 			if (ha->flags & FUNCTION_1) {
6911 				ha->flash_nvram_addr = faddr;
6912 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
6913 				    "%xh\n", ha->instance, faddr);
6914 			}
6915 			break;
6916 		case FLASH_DESC_TABLE_REGION:
6917 			ha->flash_desc_addr = faddr;
6918 			QL_PRINT_9(CE_CONT, "(%d): flash_desc_addr=%xh\n",
6919 			    ha->instance, faddr);
6920 			break;
6921 		case FLASH_ERROR_LOG_0_REGION:
6922 			if (!(ha->flags & FUNCTION_1)) {
6923 				ha->flash_errlog_start = faddr;
6924 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
6925 				    "%xh\n", ha->instance, faddr);
6926 			}
6927 			break;
6928 		case FLASH_ERROR_LOG_1_REGION:
6929 			if (ha->flags & FUNCTION_1) {
6930 				ha->flash_errlog_start = faddr;
6931 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
6932 				    "%xh\n", ha->instance, faddr);
6933 			}
6934 			break;
6935 		default:
6936 			break;
6937 		}
6938 	}
6939 	kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6940 
6941 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6942 }
6943 
6944 /*
6945  * ql_flash_nvram_defaults
6946  *	Flash default addresses.
6947  *
6948  * Input:
6949  *	ha:		adapter state pointer.
6950  *
6951  * Returns:
6952  *	ql local function return status code.
6953  *
6954  * Context:
6955  *	Kernel context.
6956  */
6957 static void
6958 ql_flash_nvram_defaults(ql_adapter_state_t *ha)
6959 {
6960 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6961 
6962 	if (ha->flags & FUNCTION_1) {
6963 		if (CFG_IST(ha, CFG_CTRL_2300)) {
6964 			ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR;
6965 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
6966 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
6967 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
6968 			ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR;
6969 			ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR;
6970 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1;
6971 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
6972 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
6973 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
6974 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
6975 			ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR;
6976 			ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR;
6977 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1;
6978 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
6979 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
6980 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
6981 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
6982 			ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR;
6983 			ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR;
6984 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1;
6985 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
6986 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
6987 		} else {
6988 			EL(ha, "unassigned flash fn1 addr: %x\n",
6989 			    ha->device_id);
6990 		}
6991 	} else {
6992 		if (CFG_IST(ha, CFG_CTRL_2200)) {
6993 			ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR;
6994 			ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR;
6995 		} else if (CFG_IST(ha, CFG_CTRL_2300) ||
6996 		    (CFG_IST(ha, CFG_CTRL_6322))) {
6997 			ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR;
6998 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
6999 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
7000 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7001 			ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR;
7002 			ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR;
7003 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0;
7004 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7005 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7006 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7007 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7008 			ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR;
7009 			ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR;
7010 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0;
7011 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7012 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7013 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7014 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7015 			ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR;
7016 			ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR;
7017 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0;
7018 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7019 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7020 		} else {
7021 			EL(ha, "unassigned flash fn1 addr: %x\n",
7022 			    ha->device_id);
7023 		}
7024 	}
7025 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7026 }
7027 
7028 /*
7029  * ql_get_sfp
7030  *	Returns sfp data to sdmapi caller
7031  *
7032  * Input:
7033  *	ha:	adapter state pointer.
7034  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7035  *	mode:	flags.
7036  *
7037  * Returns:
7038  *	None, request status indicated in cmd->Status.
7039  *
7040  * Context:
7041  *	Kernel context.
7042  */
7043 static void
7044 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7045 {
7046 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7047 
7048 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
7049 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7050 		EL(ha, "failed, invalid request for HBA\n");
7051 		return;
7052 	}
7053 
7054 	if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
7055 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7056 		cmd->DetailStatus = QL_24XX_SFP_SIZE;
7057 		EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
7058 		    cmd->ResponseLen);
7059 		return;
7060 	}
7061 
7062 	/* Dump SFP data in user buffer */
7063 	if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7064 	    mode)) != 0) {
7065 		cmd->Status = EXT_STATUS_COPY_ERR;
7066 		EL(ha, "failed, copy error\n");
7067 	} else {
7068 		cmd->Status = EXT_STATUS_OK;
7069 	}
7070 
7071 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7072 }
7073 
7074 /*
7075  * ql_dump_sfp
7076  *	Dumps SFP.
7077  *
7078  * Input:
7079  *	ha:	adapter state pointer.
7080  *	bp:	buffer address.
7081  *	mode:	flags
7082  *
7083  * Returns:
7084  *
7085  * Context:
7086  *	Kernel context.
7087  */
7088 static int
7089 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
7090 {
7091 	dma_mem_t	mem;
7092 	uint32_t	cnt;
7093 	int		rval2, rval = 0;
7094 	uint32_t	dxfer;
7095 
7096 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7097 
7098 	/* Get memory for SFP. */
7099 
7100 	if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
7101 	    QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
7102 		EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
7103 		return (ENOMEM);
7104 	}
7105 
7106 	for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
7107 		rval2 = ql_read_sfp(ha, &mem,
7108 		    (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
7109 		    (uint16_t)(cnt & 0xff));
7110 		if (rval2 != QL_SUCCESS) {
7111 			EL(ha, "failed, read_sfp=%xh\n", rval2);
7112 			rval = EFAULT;
7113 			break;
7114 		}
7115 
7116 		/* copy the data back */
7117 		if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
7118 		    mode)) != mem.size) {
7119 			/* ddi copy error */
7120 			EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
7121 			rval = EFAULT;
7122 			break;
7123 		}
7124 
7125 		/* adjust the buffer pointer */
7126 		bp = (caddr_t)bp + mem.size;
7127 	}
7128 
7129 	ql_free_phys(ha, &mem);
7130 
7131 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7132 
7133 	return (rval);
7134 }
7135 
7136 /*
7137  * ql_port_param
7138  *	Retrieves or sets the firmware port speed settings
7139  *
7140  * Input:
7141  *	ha:	adapter state pointer.
7142  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7143  *	mode:	flags.
7144  *
7145  * Returns:
7146  *	None, request status indicated in cmd->Status.
7147  *
7148  * Context:
7149  *	Kernel context.
7150  *
7151  */
7152 static void
7153 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7154 {
7155 	uint8_t			*name;
7156 	ql_tgt_t		*tq;
7157 	EXT_PORT_PARAM		port_param = {0};
7158 	uint32_t		rval = QL_SUCCESS;
7159 	uint32_t		idma_rate;
7160 
7161 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7162 
7163 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7164 		EL(ha, "invalid request for this HBA\n");
7165 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7166 		cmd->ResponseLen = 0;
7167 		return;
7168 	}
7169 
7170 	if (LOOP_NOT_READY(ha)) {
7171 		EL(ha, "failed, loop not ready\n");
7172 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
7173 		cmd->ResponseLen = 0;
7174 		return;
7175 	}
7176 
7177 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7178 	    (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) {
7179 		EL(ha, "failed, ddi_copyin\n");
7180 		cmd->Status = EXT_STATUS_COPY_ERR;
7181 		cmd->ResponseLen = 0;
7182 		return;
7183 	}
7184 
7185 	if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
7186 		EL(ha, "Unsupported dest lookup type: %xh\n",
7187 		    port_param.FCScsiAddr.DestType);
7188 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7189 		cmd->ResponseLen = 0;
7190 		return;
7191 	}
7192 
7193 	name = port_param.FCScsiAddr.DestAddr.WWPN;
7194 
7195 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
7196 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
7197 	    name[5], name[6], name[7]);
7198 
7199 	tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
7200 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
7201 		EL(ha, "failed, fc_port not found\n");
7202 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7203 		cmd->ResponseLen = 0;
7204 		return;
7205 	}
7206 
7207 	cmd->Status = EXT_STATUS_OK;
7208 	cmd->DetailStatus = EXT_STATUS_OK;
7209 
7210 	switch (port_param.Mode) {
7211 	case EXT_IIDMA_MODE_GET:
7212 		/*
7213 		 * Report the firmware's port rate for the wwpn
7214 		 */
7215 		rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7216 		    port_param.Mode);
7217 
7218 		if (rval != QL_SUCCESS) {
7219 			EL(ha, "iidma get failed: %xh\n", rval);
7220 			cmd->Status = EXT_STATUS_MAILBOX;
7221 			cmd->DetailStatus = rval;
7222 			cmd->ResponseLen = 0;
7223 		} else {
7224 			switch (idma_rate) {
7225 			case IIDMA_RATE_1GB:
7226 				port_param.Speed =
7227 				    EXT_DEF_PORTSPEED_1GBIT;
7228 				break;
7229 			case IIDMA_RATE_2GB:
7230 				port_param.Speed =
7231 				    EXT_DEF_PORTSPEED_2GBIT;
7232 				break;
7233 			case IIDMA_RATE_4GB:
7234 				port_param.Speed =
7235 				    EXT_DEF_PORTSPEED_4GBIT;
7236 				break;
7237 			case IIDMA_RATE_8GB:
7238 				port_param.Speed =
7239 				    EXT_DEF_PORTSPEED_8GBIT;
7240 				break;
7241 			case IIDMA_RATE_10GB:
7242 				port_param.Speed =
7243 				    EXT_DEF_PORTSPEED_10GBIT;
7244 				break;
7245 			default:
7246 				port_param.Speed =
7247 				    EXT_DEF_PORTSPEED_UNKNOWN;
7248 				EL(ha, "failed, Port speed rate=%xh\n",
7249 				    idma_rate);
7250 				break;
7251 			}
7252 
7253 			/* Copy back the data */
7254 			rval = ddi_copyout((void *)&port_param,
7255 			    (void *)(uintptr_t)cmd->ResponseAdr,
7256 			    sizeof (EXT_PORT_PARAM), mode);
7257 
7258 			if (rval != 0) {
7259 				cmd->Status = EXT_STATUS_COPY_ERR;
7260 				cmd->ResponseLen = 0;
7261 				EL(ha, "failed, ddi_copyout\n");
7262 			} else {
7263 				cmd->ResponseLen = (uint32_t)
7264 				    sizeof (EXT_PORT_PARAM);
7265 			}
7266 		}
7267 		break;
7268 
7269 	case EXT_IIDMA_MODE_SET:
7270 		/*
7271 		 * Set the firmware's port rate for the wwpn
7272 		 */
7273 		switch (port_param.Speed) {
7274 		case EXT_DEF_PORTSPEED_1GBIT:
7275 			idma_rate = IIDMA_RATE_1GB;
7276 			break;
7277 		case EXT_DEF_PORTSPEED_2GBIT:
7278 			idma_rate = IIDMA_RATE_2GB;
7279 			break;
7280 		case EXT_DEF_PORTSPEED_4GBIT:
7281 			idma_rate = IIDMA_RATE_4GB;
7282 			break;
7283 		case EXT_DEF_PORTSPEED_8GBIT:
7284 			idma_rate = IIDMA_RATE_8GB;
7285 			break;
7286 		case EXT_DEF_PORTSPEED_10GBIT:
7287 			port_param.Speed = IIDMA_RATE_10GB;
7288 			break;
7289 		default:
7290 			EL(ha, "invalid set iidma rate: %x\n",
7291 			    port_param.Speed);
7292 			cmd->Status = EXT_STATUS_INVALID_PARAM;
7293 			cmd->ResponseLen = 0;
7294 			rval = QL_PARAMETER_ERROR;
7295 			break;
7296 		}
7297 
7298 		if (rval == QL_SUCCESS) {
7299 			rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7300 			    port_param.Mode);
7301 			if (rval != QL_SUCCESS) {
7302 				EL(ha, "iidma set failed: %xh\n", rval);
7303 				cmd->Status = EXT_STATUS_MAILBOX;
7304 				cmd->DetailStatus = rval;
7305 				cmd->ResponseLen = 0;
7306 			}
7307 		}
7308 		break;
7309 	default:
7310 		EL(ha, "invalid mode specified: %x\n", port_param.Mode);
7311 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7312 		cmd->ResponseLen = 0;
7313 		cmd->DetailStatus = 0;
7314 		break;
7315 	}
7316 
7317 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7318 }
7319 
7320 /*
7321  * ql_get_fwexttrace
7322  *	Dumps f/w extended trace buffer
7323  *
7324  * Input:
7325  *	ha:	adapter state pointer.
7326  *	bp:	buffer address.
7327  *	mode:	flags
7328  *
7329  * Returns:
7330  *
7331  * Context:
7332  *	Kernel context.
7333  */
7334 /* ARGSUSED */
7335 static void
7336 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7337 {
7338 	int	rval;
7339 	caddr_t	payload;
7340 
7341 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7342 
7343 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7344 		EL(ha, "invalid request for this HBA\n");
7345 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7346 		cmd->ResponseLen = 0;
7347 		return;
7348 	}
7349 
7350 	if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
7351 	    (ha->fwexttracebuf.bp == NULL)) {
7352 		EL(ha, "f/w extended trace is not enabled\n");
7353 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7354 		cmd->ResponseLen = 0;
7355 		return;
7356 	}
7357 
7358 	if (cmd->ResponseLen < FWEXTSIZE) {
7359 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7360 		cmd->DetailStatus = FWEXTSIZE;
7361 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n",
7362 		    cmd->ResponseLen, FWEXTSIZE);
7363 		cmd->ResponseLen = 0;
7364 		return;
7365 	}
7366 
7367 	/* Time Stamp */
7368 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP);
7369 	if (rval != QL_SUCCESS) {
7370 		EL(ha, "f/w extended trace insert"
7371 		    "time stamp failed: %xh\n", rval);
7372 		cmd->Status = EXT_STATUS_ERR;
7373 		cmd->ResponseLen = 0;
7374 		return;
7375 	}
7376 
7377 	/* Disable Tracing */
7378 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE);
7379 	if (rval != QL_SUCCESS) {
7380 		EL(ha, "f/w extended trace disable failed: %xh\n", rval);
7381 		cmd->Status = EXT_STATUS_ERR;
7382 		cmd->ResponseLen = 0;
7383 		return;
7384 	}
7385 
7386 	/* Allocate payload buffer */
7387 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7388 	if (payload == NULL) {
7389 		EL(ha, "failed, kmem_zalloc\n");
7390 		cmd->Status = EXT_STATUS_NO_MEMORY;
7391 		cmd->ResponseLen = 0;
7392 		return;
7393 	}
7394 
7395 	/* Sync DMA buffer. */
7396 	(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
7397 	    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
7398 
7399 	/* Copy trace buffer data. */
7400 	ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload,
7401 	    (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE,
7402 	    DDI_DEV_AUTOINCR);
7403 
7404 	/* Send payload to application. */
7405 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7406 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7407 		EL(ha, "failed, send_buffer_data\n");
7408 		cmd->Status = EXT_STATUS_COPY_ERR;
7409 		cmd->ResponseLen = 0;
7410 	} else {
7411 		cmd->Status = EXT_STATUS_OK;
7412 	}
7413 
7414 	kmem_free(payload, FWEXTSIZE);
7415 
7416 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7417 }
7418 
7419 /*
7420  * ql_get_fwfcetrace
7421  *	Dumps f/w fibre channel event trace buffer
7422  *
7423  * Input:
7424  *	ha:	adapter state pointer.
7425  *	bp:	buffer address.
7426  *	mode:	flags
7427  *
7428  * Returns:
7429  *
7430  * Context:
7431  *	Kernel context.
7432  */
7433 /* ARGSUSED */
7434 static void
7435 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7436 {
7437 	int	rval;
7438 	caddr_t	payload;
7439 
7440 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7441 
7442 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7443 		EL(ha, "invalid request for this HBA\n");
7444 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7445 		cmd->ResponseLen = 0;
7446 		return;
7447 	}
7448 
7449 	if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
7450 	    (ha->fwfcetracebuf.bp == NULL)) {
7451 		EL(ha, "f/w FCE trace is not enabled\n");
7452 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7453 		cmd->ResponseLen = 0;
7454 		return;
7455 	}
7456 
7457 	if (cmd->ResponseLen < FWFCESIZE) {
7458 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7459 		cmd->DetailStatus = FWFCESIZE;
7460 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n",
7461 		    cmd->ResponseLen, FWFCESIZE);
7462 		cmd->ResponseLen = 0;
7463 		return;
7464 	}
7465 
7466 	/* Disable Tracing */
7467 	rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE);
7468 	if (rval != QL_SUCCESS) {
7469 		EL(ha, "f/w FCE trace disable failed: %xh\n", rval);
7470 		cmd->Status = EXT_STATUS_ERR;
7471 		cmd->ResponseLen = 0;
7472 		return;
7473 	}
7474 
7475 	/* Allocate payload buffer */
7476 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7477 	if (payload == NULL) {
7478 		EL(ha, "failed, kmem_zalloc\n");
7479 		cmd->Status = EXT_STATUS_NO_MEMORY;
7480 		cmd->ResponseLen = 0;
7481 		return;
7482 	}
7483 
7484 	/* Sync DMA buffer. */
7485 	(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
7486 	    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
7487 
7488 	/* Copy trace buffer data. */
7489 	ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)payload,
7490 	    (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE,
7491 	    DDI_DEV_AUTOINCR);
7492 
7493 	/* Send payload to application. */
7494 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7495 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7496 		EL(ha, "failed, send_buffer_data\n");
7497 		cmd->Status = EXT_STATUS_COPY_ERR;
7498 		cmd->ResponseLen = 0;
7499 	} else {
7500 		cmd->Status = EXT_STATUS_OK;
7501 	}
7502 
7503 	kmem_free(payload, FWFCESIZE);
7504 
7505 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7506 }
7507 
7508 /*
7509  * ql_get_pci_data
7510  *	Retrieves pci config space data
7511  *
7512  * Input:
7513  *	ha:	adapter state pointer.
7514  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7515  *	mode:	flags.
7516  *
7517  * Returns:
7518  *	None, request status indicated in cmd->Status.
7519  *
7520  * Context:
7521  *	Kernel context.
7522  *
7523  */
7524 static void
7525 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7526 {
7527 	uint8_t		cap_ptr;
7528 	uint8_t		cap_id;
7529 	uint32_t	buf_size = 256;
7530 
7531 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7532 
7533 	/*
7534 	 * First check the "Capabilities List" bit of the status register.
7535 	 */
7536 	if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
7537 		/*
7538 		 * Now get the capability pointer
7539 		 */
7540 		cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
7541 		while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
7542 			/*
7543 			 * Check for the pcie capability.
7544 			 */
7545 			cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
7546 			if (cap_id == PCI_CAP_ID_PCI_E) {
7547 				buf_size = 4096;
7548 				break;
7549 			}
7550 			cap_ptr = (uint8_t)ql_pci_config_get8(ha,
7551 			    (cap_ptr + PCI_CAP_NEXT_PTR));
7552 		}
7553 	}
7554 
7555 	if (cmd->ResponseLen < buf_size) {
7556 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7557 		cmd->DetailStatus = buf_size;
7558 		EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
7559 		    cmd->ResponseLen);
7560 		return;
7561 	}
7562 
7563 	/* Dump PCI config data. */
7564 	if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7565 	    buf_size, mode)) != 0) {
7566 		cmd->Status = EXT_STATUS_COPY_ERR;
7567 		cmd->DetailStatus = 0;
7568 		EL(ha, "failed, copy err pci_dump\n");
7569 	} else {
7570 		cmd->Status = EXT_STATUS_OK;
7571 		cmd->DetailStatus = buf_size;
7572 	}
7573 
7574 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7575 }
7576 
7577 /*
7578  * ql_pci_dump
7579  *	Dumps PCI config data to application buffer.
7580  *
7581  * Input:
7582  *	ha = adapter state pointer.
7583  *	bp = user buffer address.
7584  *
7585  * Returns:
7586  *
7587  * Context:
7588  *	Kernel context.
7589  */
7590 int
7591 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
7592 {
7593 	uint32_t	pci_os;
7594 	uint32_t	*ptr32, *org_ptr32;
7595 
7596 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7597 
7598 	ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
7599 	if (ptr32 == NULL) {
7600 		EL(ha, "failed kmem_zalloc\n");
7601 		return (ENOMEM);
7602 	}
7603 
7604 	/* store the initial value of ptr32 */
7605 	org_ptr32 = ptr32;
7606 	for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
7607 		*ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
7608 		LITTLE_ENDIAN_32(ptr32);
7609 		ptr32++;
7610 	}
7611 
7612 	if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
7613 	    0) {
7614 		EL(ha, "failed ddi_copyout\n");
7615 		kmem_free(org_ptr32, pci_size);
7616 		return (EFAULT);
7617 	}
7618 
7619 	QL_DUMP_9(org_ptr32, 8, pci_size);
7620 
7621 	kmem_free(org_ptr32, pci_size);
7622 
7623 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7624 
7625 	return (0);
7626 }
7627 
7628 /*
7629  * ql_menlo_reset
7630  *	Reset Menlo
7631  *
7632  * Input:
7633  *	ha:	adapter state pointer.
7634  *	bp:	buffer address.
7635  *	mode:	flags
7636  *
7637  * Returns:
7638  *
7639  * Context:
7640  *	Kernel context.
7641  */
7642 static void
7643 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7644 {
7645 	EXT_MENLO_RESET	rst;
7646 	ql_mbx_data_t	mr;
7647 	int		rval;
7648 
7649 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7650 
7651 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7652 		EL(ha, "failed, invalid request for HBA\n");
7653 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7654 		cmd->ResponseLen = 0;
7655 		return;
7656 	}
7657 
7658 	/*
7659 	 * TODO: only vp_index 0 can do this (?)
7660 	 */
7661 
7662 	/*  Verify the size of request structure. */
7663 	if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) {
7664 		/* Return error */
7665 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7666 		    sizeof (EXT_MENLO_RESET));
7667 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7668 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7669 		cmd->ResponseLen = 0;
7670 		return;
7671 	}
7672 
7673 	/* Get reset request. */
7674 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7675 	    (void *)&rst, sizeof (EXT_MENLO_RESET), mode) != 0) {
7676 		EL(ha, "failed, ddi_copyin\n");
7677 		cmd->Status = EXT_STATUS_COPY_ERR;
7678 		cmd->ResponseLen = 0;
7679 		return;
7680 	}
7681 
7682 	/* Wait for I/O to stop and daemon to stall. */
7683 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
7684 		EL(ha, "ql_stall_driver failed\n");
7685 		ql_restart_hba(ha);
7686 		cmd->Status = EXT_STATUS_BUSY;
7687 		cmd->ResponseLen = 0;
7688 		return;
7689 	}
7690 
7691 	rval = ql_reset_menlo(ha, &mr, rst.Flags);
7692 	if (rval != QL_SUCCESS) {
7693 		EL(ha, "failed, status=%xh\n", rval);
7694 		cmd->Status = EXT_STATUS_MAILBOX;
7695 		cmd->DetailStatus = rval;
7696 		cmd->ResponseLen = 0;
7697 	} else if (mr.mb[1] != 0) {
7698 		EL(ha, "failed, substatus=%d\n", mr.mb[1]);
7699 		cmd->Status = EXT_STATUS_ERR;
7700 		cmd->DetailStatus = mr.mb[1];
7701 		cmd->ResponseLen = 0;
7702 	}
7703 
7704 	ql_restart_hba(ha);
7705 
7706 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7707 }
7708 
7709 /*
7710  * ql_menlo_get_fw_version
7711  *	Get Menlo firmware version.
7712  *
7713  * Input:
7714  *	ha:	adapter state pointer.
7715  *	bp:	buffer address.
7716  *	mode:	flags
7717  *
7718  * Returns:
7719  *
7720  * Context:
7721  *	Kernel context.
7722  */
7723 static void
7724 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7725 {
7726 	int				rval;
7727 	ql_mbx_iocb_t			*pkt;
7728 	EXT_MENLO_GET_FW_VERSION	ver = {0};
7729 
7730 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7731 
7732 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7733 		EL(ha, "failed, invalid request for HBA\n");
7734 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7735 		cmd->ResponseLen = 0;
7736 		return;
7737 	}
7738 
7739 	if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) {
7740 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7741 		cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION);
7742 		EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
7743 		    sizeof (EXT_MENLO_GET_FW_VERSION));
7744 		cmd->ResponseLen = 0;
7745 		return;
7746 	}
7747 
7748 	/* Allocate packet. */
7749 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7750 	if (pkt == NULL) {
7751 		EL(ha, "failed, kmem_zalloc\n");
7752 		cmd->Status = EXT_STATUS_NO_MEMORY;
7753 		cmd->ResponseLen = 0;
7754 		return;
7755 	}
7756 
7757 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
7758 	pkt->mvfy.entry_count = 1;
7759 	pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW);
7760 
7761 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7762 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
7763 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
7764 	ver.FwVersion = LE_32(pkt->mvfy.fw_version);
7765 
7766 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
7767 	    pkt->mvfy.options_status != CS_COMPLETE) {
7768 		/* Command error */
7769 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7770 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
7771 		    pkt->mvfy.failure_code);
7772 		cmd->Status = EXT_STATUS_ERR;
7773 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7774 		    QL_FUNCTION_FAILED;
7775 		cmd->ResponseLen = 0;
7776 	} else if (ddi_copyout((void *)&ver,
7777 	    (void *)(uintptr_t)cmd->ResponseAdr,
7778 	    sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) {
7779 		EL(ha, "failed, ddi_copyout\n");
7780 		cmd->Status = EXT_STATUS_COPY_ERR;
7781 		cmd->ResponseLen = 0;
7782 	} else {
7783 		cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION);
7784 	}
7785 
7786 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7787 
7788 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7789 }
7790 
7791 /*
7792  * ql_menlo_update_fw
7793  *	Get Menlo update firmware.
7794  *
7795  * Input:
7796  *	ha:	adapter state pointer.
7797  *	bp:	buffer address.
7798  *	mode:	flags
7799  *
7800  * Returns:
7801  *
7802  * Context:
7803  *	Kernel context.
7804  */
7805 static void
7806 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7807 {
7808 	ql_mbx_iocb_t		*pkt;
7809 	dma_mem_t		*dma_mem;
7810 	EXT_MENLO_UPDATE_FW	fw;
7811 	uint32_t		*ptr32;
7812 	int			rval;
7813 
7814 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7815 
7816 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7817 		EL(ha, "failed, invalid request for HBA\n");
7818 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7819 		cmd->ResponseLen = 0;
7820 		return;
7821 	}
7822 
7823 	/*
7824 	 * TODO: only vp_index 0 can do this (?)
7825 	 */
7826 
7827 	/*  Verify the size of request structure. */
7828 	if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) {
7829 		/* Return error */
7830 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7831 		    sizeof (EXT_MENLO_UPDATE_FW));
7832 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7833 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7834 		cmd->ResponseLen = 0;
7835 		return;
7836 	}
7837 
7838 	/* Get update fw request. */
7839 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw,
7840 	    sizeof (EXT_MENLO_UPDATE_FW), mode) != 0) {
7841 		EL(ha, "failed, ddi_copyin\n");
7842 		cmd->Status = EXT_STATUS_COPY_ERR;
7843 		cmd->ResponseLen = 0;
7844 		return;
7845 	}
7846 
7847 	/* Wait for I/O to stop and daemon to stall. */
7848 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
7849 		EL(ha, "ql_stall_driver failed\n");
7850 		ql_restart_hba(ha);
7851 		cmd->Status = EXT_STATUS_BUSY;
7852 		cmd->ResponseLen = 0;
7853 		return;
7854 	}
7855 
7856 	/* Allocate packet. */
7857 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
7858 	if (dma_mem == NULL) {
7859 		EL(ha, "failed, kmem_zalloc\n");
7860 		cmd->Status = EXT_STATUS_NO_MEMORY;
7861 		cmd->ResponseLen = 0;
7862 		return;
7863 	}
7864 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7865 	if (pkt == NULL) {
7866 		EL(ha, "failed, kmem_zalloc\n");
7867 		kmem_free(dma_mem, sizeof (dma_mem_t));
7868 		ql_restart_hba(ha);
7869 		cmd->Status = EXT_STATUS_NO_MEMORY;
7870 		cmd->ResponseLen = 0;
7871 		return;
7872 	}
7873 
7874 	/* Get DMA memory for the IOCB */
7875 	if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
7876 	    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
7877 		cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
7878 		    "alloc failed", QL_NAME, ha->instance);
7879 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7880 		kmem_free(dma_mem, sizeof (dma_mem_t));
7881 		ql_restart_hba(ha);
7882 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
7883 		cmd->ResponseLen = 0;
7884 		return;
7885 	}
7886 
7887 	/* Get firmware data. */
7888 	if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp,
7889 	    fw.TotalByteCount, mode) != fw.TotalByteCount) {
7890 		EL(ha, "failed, get_buffer_data\n");
7891 		ql_free_dma_resource(ha, dma_mem);
7892 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7893 		kmem_free(dma_mem, sizeof (dma_mem_t));
7894 		ql_restart_hba(ha);
7895 		cmd->Status = EXT_STATUS_COPY_ERR;
7896 		cmd->ResponseLen = 0;
7897 		return;
7898 	}
7899 
7900 	/* Sync DMA buffer. */
7901 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
7902 	    DDI_DMA_SYNC_FORDEV);
7903 
7904 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
7905 	pkt->mvfy.entry_count = 1;
7906 	pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
7907 	ptr32 = dma_mem->bp;
7908 	pkt->mvfy.fw_version = LE_32(ptr32[2]);
7909 	pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
7910 	pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
7911 	pkt->mvfy.dseg_count = LE_16(1);
7912 	pkt->mvfy.dseg_0_address[0] = (uint32_t)
7913 	    LE_32(LSD(dma_mem->cookie.dmac_laddress));
7914 	pkt->mvfy.dseg_0_address[1] = (uint32_t)
7915 	    LE_32(MSD(dma_mem->cookie.dmac_laddress));
7916 	pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount);
7917 
7918 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7919 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
7920 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
7921 
7922 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
7923 	    pkt->mvfy.options_status != CS_COMPLETE) {
7924 		/* Command error */
7925 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7926 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
7927 		    pkt->mvfy.failure_code);
7928 		cmd->Status = EXT_STATUS_ERR;
7929 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7930 		    QL_FUNCTION_FAILED;
7931 		cmd->ResponseLen = 0;
7932 	}
7933 
7934 	ql_free_dma_resource(ha, dma_mem);
7935 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7936 	kmem_free(dma_mem, sizeof (dma_mem_t));
7937 	ql_restart_hba(ha);
7938 
7939 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7940 }
7941 
7942 /*
7943  * ql_menlo_manage_info
7944  *	Get Menlo manage info.
7945  *
7946  * Input:
7947  *	ha:	adapter state pointer.
7948  *	bp:	buffer address.
7949  *	mode:	flags
7950  *
7951  * Returns:
7952  *
7953  * Context:
7954  *	Kernel context.
7955  */
7956 static void
7957 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7958 {
7959 	ql_mbx_iocb_t		*pkt;
7960 	dma_mem_t		*dma_mem = NULL;
7961 	EXT_MENLO_MANAGE_INFO	info;
7962 	int			rval;
7963 
7964 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7965 
7966 
7967 	/* The call is only supported for Schultz right now */
7968 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
7969 		ql_get_xgmac_statistics(ha, cmd, mode);
7970 		QL_PRINT_9(CE_CONT, "(%d): CFG_CTRL_81XX done\n",
7971 		    ha->instance);
7972 		return;
7973 	}
7974 
7975 	if (!CFG_IST(ha, CFG_CTRL_81XX) || !CFG_IST(ha, CFG_CTRL_MENLO)) {
7976 		EL(ha, "failed, invalid request for HBA\n");
7977 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7978 		cmd->ResponseLen = 0;
7979 		return;
7980 	}
7981 
7982 	/*  Verify the size of request structure. */
7983 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
7984 		/* Return error */
7985 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7986 		    sizeof (EXT_MENLO_MANAGE_INFO));
7987 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7988 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7989 		cmd->ResponseLen = 0;
7990 		return;
7991 	}
7992 
7993 	/* Get manage info request. */
7994 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
7995 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
7996 		EL(ha, "failed, ddi_copyin\n");
7997 		cmd->Status = EXT_STATUS_COPY_ERR;
7998 		cmd->ResponseLen = 0;
7999 		return;
8000 	}
8001 
8002 	/* Allocate packet. */
8003 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8004 	if (pkt == NULL) {
8005 		EL(ha, "failed, kmem_zalloc\n");
8006 		ql_restart_driver(ha);
8007 		cmd->Status = EXT_STATUS_NO_MEMORY;
8008 		cmd->ResponseLen = 0;
8009 		return;
8010 	}
8011 
8012 	pkt->mdata.entry_type = MENLO_DATA_TYPE;
8013 	pkt->mdata.entry_count = 1;
8014 	pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
8015 
8016 	/* Get DMA memory for the IOCB */
8017 	if (info.Operation == MENLO_OP_READ_MEM ||
8018 	    info.Operation == MENLO_OP_WRITE_MEM) {
8019 		pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
8020 		pkt->mdata.parameter_1 =
8021 		    LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
8022 		dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
8023 		    KM_SLEEP);
8024 		if (dma_mem == NULL) {
8025 			EL(ha, "failed, kmem_zalloc\n");
8026 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8027 			cmd->Status = EXT_STATUS_NO_MEMORY;
8028 			cmd->ResponseLen = 0;
8029 			return;
8030 		}
8031 		if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
8032 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8033 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8034 			    "alloc failed", QL_NAME, ha->instance);
8035 			kmem_free(dma_mem, sizeof (dma_mem_t));
8036 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8037 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8038 			cmd->ResponseLen = 0;
8039 			return;
8040 		}
8041 		if (info.Operation == MENLO_OP_WRITE_MEM) {
8042 			/* Get data. */
8043 			if (ql_get_buffer_data(
8044 			    (caddr_t)(uintptr_t)info.pDataBytes,
8045 			    dma_mem->bp, info.TotalByteCount, mode) !=
8046 			    info.TotalByteCount) {
8047 				EL(ha, "failed, get_buffer_data\n");
8048 				ql_free_dma_resource(ha, dma_mem);
8049 				kmem_free(dma_mem, sizeof (dma_mem_t));
8050 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8051 				cmd->Status = EXT_STATUS_COPY_ERR;
8052 				cmd->ResponseLen = 0;
8053 				return;
8054 			}
8055 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
8056 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
8057 		}
8058 		pkt->mdata.dseg_count = LE_16(1);
8059 		pkt->mdata.dseg_0_address[0] = (uint32_t)
8060 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
8061 		pkt->mdata.dseg_0_address[1] = (uint32_t)
8062 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
8063 		pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount);
8064 	} else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
8065 		pkt->mdata.parameter_1 =
8066 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
8067 		pkt->mdata.parameter_2 =
8068 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
8069 		pkt->mdata.parameter_3 =
8070 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
8071 	} else if (info.Operation & MENLO_OP_GET_INFO) {
8072 		pkt->mdata.parameter_1 =
8073 		    LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
8074 		pkt->mdata.parameter_2 =
8075 		    LE_32(info.Parameters.ap.MenloInfo.InfoContext);
8076 	}
8077 
8078 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8079 	LITTLE_ENDIAN_16(&pkt->mdata.options_status);
8080 	LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
8081 
8082 	if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
8083 	    pkt->mdata.options_status != CS_COMPLETE) {
8084 		/* Command error */
8085 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8086 		    pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status,
8087 		    pkt->mdata.failure_code);
8088 		cmd->Status = EXT_STATUS_ERR;
8089 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8090 		    QL_FUNCTION_FAILED;
8091 		cmd->ResponseLen = 0;
8092 	} else if (info.Operation == MENLO_OP_READ_MEM) {
8093 		(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8094 		    DDI_DMA_SYNC_FORKERNEL);
8095 		if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes,
8096 		    dma_mem->bp, info.TotalByteCount, mode) !=
8097 		    info.TotalByteCount) {
8098 			cmd->Status = EXT_STATUS_COPY_ERR;
8099 			cmd->ResponseLen = 0;
8100 		}
8101 	}
8102 
8103 	ql_free_dma_resource(ha, dma_mem);
8104 	kmem_free(dma_mem, sizeof (dma_mem_t));
8105 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8106 
8107 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8108 }
8109 
8110 /*
8111  * ql_suspend_hba
8112  *	Suspends all adapter ports.
8113  *
8114  * Input:
8115  *	ha:		adapter state pointer.
8116  *	options:	BIT_0 --> leave driver stalled on exit if
8117  *				  failed.
8118  *
8119  * Returns:
8120  *	ql local function return status code.
8121  *
8122  * Context:
8123  *	Kernel context.
8124  */
8125 static int
8126 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
8127 {
8128 	ql_adapter_state_t	*ha2;
8129 	ql_link_t		*link;
8130 	int			rval = QL_SUCCESS;
8131 
8132 	/* Quiesce I/O on all adapter ports */
8133 	for (link = ql_hba.first; link != NULL; link = link->next) {
8134 		ha2 = link->base_address;
8135 
8136 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8137 			continue;
8138 		}
8139 
8140 		if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) {
8141 			EL(ha, "ql_stall_driver status=%xh\n", rval);
8142 			break;
8143 		}
8144 	}
8145 
8146 	return (rval);
8147 }
8148 
8149 /*
8150  * ql_restart_hba
8151  *	Restarts adapter.
8152  *
8153  * Input:
8154  *	ha:	adapter state pointer.
8155  *
8156  * Context:
8157  *	Kernel context.
8158  */
8159 static void
8160 ql_restart_hba(ql_adapter_state_t *ha)
8161 {
8162 	ql_adapter_state_t	*ha2;
8163 	ql_link_t		*link;
8164 
8165 	/* Resume I/O on all adapter ports */
8166 	for (link = ql_hba.first; link != NULL; link = link->next) {
8167 		ha2 = link->base_address;
8168 
8169 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8170 			continue;
8171 		}
8172 
8173 		ql_restart_driver(ha2);
8174 	}
8175 }
8176 
8177 /*
8178  * ql_get_vp_cnt_id
8179  *	Retrieves pci config space data
8180  *
8181  * Input:
8182  *	ha:	adapter state pointer.
8183  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8184  *	mode:	flags.
8185  *
8186  * Returns:
8187  *	None, request status indicated in cmd->Status.
8188  *
8189  * Context:
8190  *	Kernel context.
8191  *
8192  */
8193 static void
8194 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8195 {
8196 	ql_adapter_state_t	*vha;
8197 	PEXT_VPORT_ID_CNT	ptmp_vp;
8198 	int			id = 0;
8199 	int			rval;
8200 	char			name[MAXPATHLEN];
8201 
8202 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8203 
8204 	/*
8205 	 * To be backward compatible with older API
8206 	 * check for the size of old EXT_VPORT_ID_CNT
8207 	 */
8208 	if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) &&
8209 	    (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) {
8210 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8211 		cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
8212 		EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
8213 		    cmd->ResponseLen);
8214 		cmd->ResponseLen = 0;
8215 		return;
8216 	}
8217 
8218 	ptmp_vp = (EXT_VPORT_ID_CNT *)
8219 	    kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP);
8220 	if (ptmp_vp == NULL) {
8221 		EL(ha, "failed, kmem_zalloc\n");
8222 		cmd->ResponseLen = 0;
8223 		return;
8224 	}
8225 	vha = ha->vp_next;
8226 	while (vha != NULL) {
8227 		ptmp_vp->VpCnt++;
8228 		ptmp_vp->VpId[id] = vha->vp_index;
8229 		(void) ddi_pathname(vha->dip, name);
8230 		(void) strcpy((char *)ptmp_vp->vp_path[id], name);
8231 		ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance;
8232 		id++;
8233 		vha = vha->vp_next;
8234 	}
8235 	rval = ddi_copyout((void *)ptmp_vp,
8236 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8237 	    cmd->ResponseLen, mode);
8238 	if (rval != 0) {
8239 		cmd->Status = EXT_STATUS_COPY_ERR;
8240 		cmd->ResponseLen = 0;
8241 		EL(ha, "failed, ddi_copyout\n");
8242 	} else {
8243 		cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
8244 		QL_PRINT_9(CE_CONT, "(%d): done, vport_cnt=%d\n",
8245 		    ha->instance, ptmp_vp->VpCnt);
8246 	}
8247 
8248 }
8249 
8250 /*
8251  * ql_vp_ioctl
8252  *	Performs all EXT_CC_VPORT_CMD functions.
8253  *
8254  * Input:
8255  *	ha:	adapter state pointer.
8256  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8257  *	mode:	flags.
8258  *
8259  * Returns:
8260  *	None, request status indicated in cmd->Status.
8261  *
8262  * Context:
8263  *	Kernel context.
8264  */
8265 static void
8266 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8267 {
8268 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
8269 	    cmd->SubCode);
8270 
8271 	/* case off on command subcode */
8272 	switch (cmd->SubCode) {
8273 	case EXT_VF_SC_VPORT_GETINFO:
8274 		ql_qry_vport(ha, cmd, mode);
8275 		break;
8276 	default:
8277 		/* function not supported. */
8278 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
8279 		EL(ha, "failed, Unsupported Subcode=%xh\n",
8280 		    cmd->SubCode);
8281 		break;
8282 	}
8283 
8284 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8285 }
8286 
8287 /*
8288  * ql_qry_vport
8289  *	Performs EXT_VF_SC_VPORT_GETINFO subfunction.
8290  *
8291  * Input:
8292  *	ha:	adapter state pointer.
8293  *	cmd:	EXT_IOCTL cmd struct pointer.
8294  *	mode:	flags.
8295  *
8296  * Returns:
8297  *	None, request status indicated in cmd->Status.
8298  *
8299  * Context:
8300  *	Kernel context.
8301  */
8302 static void
8303 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
8304 {
8305 	ql_adapter_state_t	*tmp_vha;
8306 	EXT_VPORT_INFO		tmp_vport = {0};
8307 	int			max_vport;
8308 
8309 	QL_PRINT_9(CE_CONT, "(%d): started\n", vha->instance);
8310 
8311 	if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
8312 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8313 		cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
8314 		EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
8315 		    cmd->ResponseLen);
8316 		cmd->ResponseLen = 0;
8317 		return;
8318 	}
8319 
8320 	/* Fill in the vport information. */
8321 	bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
8322 	    EXT_DEF_WWN_NAME_SIZE);
8323 	bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
8324 	    EXT_DEF_WWN_NAME_SIZE);
8325 	tmp_vport.state = vha->state;
8326 
8327 	tmp_vha = vha->pha->vp_next;
8328 	while (tmp_vha != NULL) {
8329 		tmp_vport.used++;
8330 		tmp_vha = tmp_vha->vp_next;
8331 	}
8332 
8333 	max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS :
8334 	    MAX_25_VIRTUAL_PORTS);
8335 	if (max_vport > tmp_vport.used) {
8336 		tmp_vport.free = max_vport - tmp_vport.used;
8337 	}
8338 
8339 	if (ddi_copyout((void *)&tmp_vport,
8340 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8341 	    sizeof (EXT_VPORT_INFO), mode) != 0) {
8342 		cmd->Status = EXT_STATUS_COPY_ERR;
8343 		cmd->ResponseLen = 0;
8344 		EL(vha, "failed, ddi_copyout\n");
8345 	} else {
8346 		cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
8347 		QL_PRINT_9(CE_CONT, "(%d): done\n", vha->instance);
8348 	}
8349 }
8350 
8351 /*
8352  * ql_access_flash
8353  *	Performs all EXT_CC_ACCESS_FLASH_OS functions.
8354  *
8355  * Input:
8356  *	pi:	port info pointer.
8357  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8358  *	mode:	flags.
8359  *
8360  * Returns:
8361  *	None, request status indicated in cmd->Status.
8362  *
8363  * Context:
8364  *	Kernel context.
8365  */
8366 static void
8367 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8368 {
8369 	int	rval;
8370 
8371 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8372 
8373 	switch (cmd->SubCode) {
8374 	case EXT_SC_FLASH_READ:
8375 		if ((rval = ql_flash_fcode_dump(ha,
8376 		    (void *)(uintptr_t)(cmd->ResponseAdr),
8377 		    (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) {
8378 			cmd->Status = EXT_STATUS_COPY_ERR;
8379 			cmd->ResponseLen = 0;
8380 			EL(ha, "flash_fcode_dump status=%xh\n", rval);
8381 		}
8382 		break;
8383 	case EXT_SC_FLASH_WRITE:
8384 		if ((rval = ql_r_m_w_flash(ha,
8385 		    (void *)(uintptr_t)(cmd->RequestAdr),
8386 		    (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) !=
8387 		    QL_SUCCESS) {
8388 			cmd->Status = EXT_STATUS_COPY_ERR;
8389 			cmd->ResponseLen = 0;
8390 			EL(ha, "r_m_w_flash status=%xh\n", rval);
8391 		} else {
8392 			/* Reset caches on all adapter instances. */
8393 			ql_update_flash_caches(ha);
8394 		}
8395 		break;
8396 	default:
8397 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8398 		cmd->Status = EXT_STATUS_ERR;
8399 		cmd->ResponseLen = 0;
8400 		break;
8401 	}
8402 
8403 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8404 }
8405 
8406 /*
8407  * ql_reset_cmd
8408  *	Performs all EXT_CC_RESET_FW_OS functions.
8409  *
8410  * Input:
8411  *	ha:	adapter state pointer.
8412  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8413  *
8414  * Returns:
8415  *	None, request status indicated in cmd->Status.
8416  *
8417  * Context:
8418  *	Kernel context.
8419  */
8420 static void
8421 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
8422 {
8423 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8424 
8425 	switch (cmd->SubCode) {
8426 	case EXT_SC_RESET_FC_FW:
8427 		EL(ha, "isp_abort_needed\n");
8428 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
8429 		break;
8430 	case EXT_SC_RESET_MPI_FW:
8431 		if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8432 			EL(ha, "invalid request for HBA\n");
8433 			cmd->Status = EXT_STATUS_INVALID_REQUEST;
8434 			cmd->ResponseLen = 0;
8435 		} else {
8436 			/* Wait for I/O to stop and daemon to stall. */
8437 			if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8438 				EL(ha, "ql_suspend_hba failed\n");
8439 				cmd->Status = EXT_STATUS_BUSY;
8440 				cmd->ResponseLen = 0;
8441 			} else if (ql_restart_mpi(ha) != QL_SUCCESS) {
8442 				cmd->Status = EXT_STATUS_ERR;
8443 				cmd->ResponseLen = 0;
8444 			}
8445 			ql_restart_hba(ha);
8446 		}
8447 		break;
8448 	default:
8449 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8450 		cmd->Status = EXT_STATUS_ERR;
8451 		cmd->ResponseLen = 0;
8452 		break;
8453 	}
8454 
8455 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8456 }
8457 
8458 /*
8459  * ql_get_dcbx_parameters
8460  *	Get DCBX parameters.
8461  *
8462  * Input:
8463  *	ha:	adapter state pointer.
8464  *	cmd:	User space CT arguments pointer.
8465  *	mode:	flags.
8466  */
8467 static void
8468 ql_get_dcbx_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8469 {
8470 	uint8_t		*tmp_buf;
8471 	int		rval;
8472 
8473 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8474 
8475 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8476 		EL(ha, "invalid request for HBA\n");
8477 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8478 		cmd->ResponseLen = 0;
8479 		return;
8480 	}
8481 
8482 	/* Allocate memory for command. */
8483 	tmp_buf = kmem_zalloc(EXT_DEF_DCBX_PARAM_BUF_SIZE, KM_SLEEP);
8484 	if (tmp_buf == NULL) {
8485 		EL(ha, "failed, kmem_zalloc\n");
8486 		cmd->Status = EXT_STATUS_NO_MEMORY;
8487 		cmd->ResponseLen = 0;
8488 		return;
8489 	}
8490 	/* Send command */
8491 	rval = ql_get_dcbx_params(ha, EXT_DEF_DCBX_PARAM_BUF_SIZE,
8492 	    (caddr_t)tmp_buf);
8493 	if (rval != QL_SUCCESS) {
8494 		/* error */
8495 		EL(ha, "failed, get_dcbx_params_mbx=%xh\n", rval);
8496 		kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8497 		cmd->Status = EXT_STATUS_ERR;
8498 		cmd->ResponseLen = 0;
8499 		return;
8500 	}
8501 
8502 	/* Copy the response */
8503 	if (ql_send_buffer_data((caddr_t)tmp_buf,
8504 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
8505 	    EXT_DEF_DCBX_PARAM_BUF_SIZE, mode) != EXT_DEF_DCBX_PARAM_BUF_SIZE) {
8506 		EL(ha, "failed, ddi_copyout\n");
8507 		cmd->Status = EXT_STATUS_COPY_ERR;
8508 		cmd->ResponseLen = 0;
8509 	} else {
8510 		cmd->ResponseLen = EXT_DEF_DCBX_PARAM_BUF_SIZE;
8511 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8512 	}
8513 	kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8514 
8515 }
8516 
8517 /*
8518  * ql_qry_cna_port
8519  *	Performs EXT_SC_QUERY_CNA_PORT subfunction.
8520  *
8521  * Input:
8522  *	ha:	adapter state pointer.
8523  *	cmd:	EXT_IOCTL cmd struct pointer.
8524  *	mode:	flags.
8525  *
8526  * Returns:
8527  *	None, request status indicated in cmd->Status.
8528  *
8529  * Context:
8530  *	Kernel context.
8531  */
8532 static void
8533 ql_qry_cna_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8534 {
8535 	EXT_CNA_PORT	cna_port = {0};
8536 
8537 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8538 
8539 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8540 		EL(ha, "invalid request for HBA\n");
8541 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8542 		cmd->ResponseLen = 0;
8543 		return;
8544 	}
8545 
8546 	if (cmd->ResponseLen < sizeof (EXT_CNA_PORT)) {
8547 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8548 		cmd->DetailStatus = sizeof (EXT_CNA_PORT);
8549 		EL(ha, "failed, ResponseLen < EXT_CNA_PORT, Len=%xh\n",
8550 		    cmd->ResponseLen);
8551 		cmd->ResponseLen = 0;
8552 		return;
8553 	}
8554 
8555 	cna_port.VLanId = ha->fcoe_vlan_id;
8556 	cna_port.FabricParam = ha->fabric_params;
8557 	bcopy(ha->fcoe_vnport_mac, cna_port.VNPortMACAddress,
8558 	    EXT_DEF_MAC_ADDRESS_SIZE);
8559 
8560 	if (ddi_copyout((void *)&cna_port,
8561 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8562 	    sizeof (EXT_CNA_PORT), mode) != 0) {
8563 		cmd->Status = EXT_STATUS_COPY_ERR;
8564 		cmd->ResponseLen = 0;
8565 		EL(ha, "failed, ddi_copyout\n");
8566 	} else {
8567 		cmd->ResponseLen = sizeof (EXT_CNA_PORT);
8568 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8569 	}
8570 }
8571 
8572 /*
8573  * ql_get_xgmac_statistics
8574  *	Get XgMac information
8575  *
8576  * Input:
8577  *	ha:	adapter state pointer.
8578  *	cmd:	EXT_IOCTL cmd struct pointer.
8579  *	mode:	flags.
8580  *
8581  * Returns:
8582  *	None, request status indicated in cmd->Status.
8583  *
8584  * Context:
8585  *	Kernel context.
8586  */
8587 static void
8588 ql_get_xgmac_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8589 {
8590 	int			rval;
8591 	uint32_t		size;
8592 	int8_t			*tmp_buf;
8593 	EXT_MENLO_MANAGE_INFO	info;
8594 
8595 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8596 
8597 	/*  Verify the size of request structure. */
8598 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
8599 		/* Return error */
8600 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8601 		    sizeof (EXT_MENLO_MANAGE_INFO));
8602 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8603 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8604 		cmd->ResponseLen = 0;
8605 		return;
8606 	}
8607 
8608 	/* Get manage info request. */
8609 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
8610 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
8611 		EL(ha, "failed, ddi_copyin\n");
8612 		cmd->Status = EXT_STATUS_COPY_ERR;
8613 		cmd->ResponseLen = 0;
8614 		return;
8615 	}
8616 
8617 	size = info.TotalByteCount;
8618 	if (!size) {
8619 		/* parameter error */
8620 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8621 		cmd->DetailStatus = 0;
8622 		EL(ha, "failed, size=%xh\n", size);
8623 		cmd->ResponseLen = 0;
8624 		return;
8625 	}
8626 
8627 	/* Allocate memory for command. */
8628 	tmp_buf = kmem_zalloc(size, KM_SLEEP);
8629 	if (tmp_buf == NULL) {
8630 		EL(ha, "failed, kmem_zalloc\n");
8631 		cmd->Status = EXT_STATUS_NO_MEMORY;
8632 		cmd->ResponseLen = 0;
8633 		return;
8634 	}
8635 
8636 	if (!(info.Operation & MENLO_OP_GET_INFO)) {
8637 		EL(ha, "Invalid request for 81XX\n");
8638 		kmem_free(tmp_buf, size);
8639 		cmd->Status = EXT_STATUS_ERR;
8640 		cmd->ResponseLen = 0;
8641 		return;
8642 	}
8643 
8644 	rval = ql_get_xgmac_stats(ha, size, (caddr_t)tmp_buf);
8645 
8646 	if (rval != QL_SUCCESS) {
8647 		/* error */
8648 		EL(ha, "failed, get_xgmac_stats =%xh\n", rval);
8649 		kmem_free(tmp_buf, size);
8650 		cmd->Status = EXT_STATUS_ERR;
8651 		cmd->ResponseLen = 0;
8652 		return;
8653 	}
8654 
8655 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)info.pDataBytes,
8656 	    size, mode) != size) {
8657 		EL(ha, "failed, ddi_copyout\n");
8658 		cmd->Status = EXT_STATUS_COPY_ERR;
8659 		cmd->ResponseLen = 0;
8660 	} else {
8661 		cmd->ResponseLen = info.TotalByteCount;
8662 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8663 	}
8664 	kmem_free(tmp_buf, size);
8665 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8666 }
8667