1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2010 QLogic Corporation; ql_xioctl.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
61 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
62     boolean_t (*)(EXT_IOCTL *));
63 static boolean_t ql_validate_signature(EXT_IOCTL *);
64 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
65 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
66 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
67 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
90 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int);
91 
92 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
93 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
94 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
95 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
96     uint8_t);
97 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
98 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
99 static int ql_24xx_flash_desc(ql_adapter_state_t *);
100 static int ql_setup_flash(ql_adapter_state_t *);
101 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
102 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
103 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
104     uint32_t, int);
105 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
106     uint8_t);
107 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
108 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
109 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
110 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
113 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
115 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
116 static uint32_t ql_setup_led(ql_adapter_state_t *);
117 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
118 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
119 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
120 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
121 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
122 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
123 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
124 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
125 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
126 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
127 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
128 static void ql_process_flt(ql_adapter_state_t *, uint32_t);
129 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
130 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
131 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
132 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
134 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
135 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
138 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
139 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
140 static void ql_restart_hba(ql_adapter_state_t *);
141 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
143 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
144 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
145 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
146 static void ql_update_flash_caches(ql_adapter_state_t *);
147 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
148 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
149 static void ql_get_fcf_list(ql_adapter_state_t *, EXT_IOCTL *, int);
150 static void ql_get_resource_counts(ql_adapter_state_t *, EXT_IOCTL *, int);
151 static void ql_qry_adapter_versions(ql_adapter_state_t *, EXT_IOCTL *, int);
152 
153 /* ******************************************************************** */
154 /*			External IOCTL support.				*/
155 /* ******************************************************************** */
156 
157 /*
158  * ql_alloc_xioctl_resource
159  *	Allocates resources needed by module code.
160  *
161  * Input:
162  *	ha:		adapter state pointer.
163  *
164  * Returns:
165  *	SYS_ERRNO
166  *
167  * Context:
168  *	Kernel context.
169  */
170 int
171 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
172 {
173 	ql_xioctl_t	*xp;
174 
175 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
176 
177 	if (ha->xioctl != NULL) {
178 		QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
179 		    ha->instance);
180 		return (0);
181 	}
182 
183 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
184 	if (xp == NULL) {
185 		EL(ha, "failed, kmem_zalloc\n");
186 		return (ENOMEM);
187 	}
188 	ha->xioctl = xp;
189 
190 	/* Allocate AEN tracking buffer */
191 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
192 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
193 	if (xp->aen_tracking_queue == NULL) {
194 		EL(ha, "failed, kmem_zalloc-2\n");
195 		ql_free_xioctl_resource(ha);
196 		return (ENOMEM);
197 	}
198 
199 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
200 
201 	return (0);
202 }
203 
204 /*
205  * ql_free_xioctl_resource
206  *	Frees resources used by module code.
207  *
208  * Input:
209  *	ha:		adapter state pointer.
210  *
211  * Context:
212  *	Kernel context.
213  */
214 void
215 ql_free_xioctl_resource(ql_adapter_state_t *ha)
216 {
217 	ql_xioctl_t	*xp = ha->xioctl;
218 
219 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
220 
221 	if (xp == NULL) {
222 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
223 		return;
224 	}
225 
226 	if (xp->aen_tracking_queue != NULL) {
227 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
228 		    sizeof (EXT_ASYNC_EVENT));
229 		xp->aen_tracking_queue = NULL;
230 	}
231 
232 	kmem_free(xp, sizeof (ql_xioctl_t));
233 	ha->xioctl = NULL;
234 
235 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
236 }
237 
238 /*
239  * ql_xioctl
240  *	External IOCTL processing.
241  *
242  * Input:
243  *	ha:	adapter state pointer.
244  *	cmd:	function to perform
245  *	arg:	data type varies with request
246  *	mode:	flags
247  *	cred_p:	credentials pointer
248  *	rval_p:	pointer to result value
249  *
250  * Returns:
251  *	0:		success
252  *	ENXIO:		No such device or address
253  *	ENOPROTOOPT:	Protocol not available
254  *
255  * Context:
256  *	Kernel context.
257  */
258 /* ARGSUSED */
259 int
260 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
261     cred_t *cred_p, int *rval_p)
262 {
263 	int	rval;
264 
265 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
266 
267 	if (ha->xioctl == NULL) {
268 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
269 		return (ENXIO);
270 	}
271 
272 	switch (cmd) {
273 	case EXT_CC_QUERY:
274 	case EXT_CC_SEND_FCCT_PASSTHRU:
275 	case EXT_CC_REG_AEN:
276 	case EXT_CC_GET_AEN:
277 	case EXT_CC_SEND_SCSI_PASSTHRU:
278 	case EXT_CC_WWPN_TO_SCSIADDR:
279 	case EXT_CC_SEND_ELS_RNID:
280 	case EXT_CC_SET_DATA:
281 	case EXT_CC_GET_DATA:
282 	case EXT_CC_HOST_IDX:
283 	case EXT_CC_READ_NVRAM:
284 	case EXT_CC_UPDATE_NVRAM:
285 	case EXT_CC_READ_OPTION_ROM:
286 	case EXT_CC_READ_OPTION_ROM_EX:
287 	case EXT_CC_UPDATE_OPTION_ROM:
288 	case EXT_CC_UPDATE_OPTION_ROM_EX:
289 	case EXT_CC_GET_VPD:
290 	case EXT_CC_SET_VPD:
291 	case EXT_CC_LOOPBACK:
292 	case EXT_CC_GET_FCACHE:
293 	case EXT_CC_GET_FCACHE_EX:
294 	case EXT_CC_HOST_DRVNAME:
295 	case EXT_CC_GET_SFP_DATA:
296 	case EXT_CC_PORT_PARAM:
297 	case EXT_CC_GET_PCI_DATA:
298 	case EXT_CC_GET_FWEXTTRACE:
299 	case EXT_CC_GET_FWFCETRACE:
300 	case EXT_CC_GET_VP_CNT_ID:
301 	case EXT_CC_VPORT_CMD:
302 	case EXT_CC_ACCESS_FLASH:
303 	case EXT_CC_RESET_FW:
304 	case EXT_CC_MENLO_MANAGE_INFO:
305 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
306 		break;
307 	default:
308 		/* function not supported. */
309 		EL(ha, "function=%d not supported\n", cmd);
310 		rval = ENOPROTOOPT;
311 	}
312 
313 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
314 
315 	return (rval);
316 }
317 
318 /*
319  * ql_sdm_ioctl
320  *	Provides ioctl functions for SAN/Device Management functions
321  *	AKA External Ioctl functions.
322  *
323  * Input:
324  *	ha:		adapter state pointer.
325  *	ioctl_code:	ioctl function to perform
326  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
327  *	mode:		flags
328  *
329  * Returns:
330  *	0:	success
331  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
332  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
333  *		copyout of EXT_IOCTL status info failed.
334  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
335  *	EBUSY:	Device busy
336  *
337  * Context:
338  *	Kernel context.
339  */
340 static int
341 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
342 {
343 	EXT_IOCTL		*cmd;
344 	int			rval;
345 	ql_adapter_state_t	*vha;
346 
347 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
348 
349 	/* Copy argument structure (EXT_IOCTL) from application land. */
350 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
351 	    ql_validate_signature)) != 0) {
352 		/*
353 		 * a non-zero value at this time means a problem getting
354 		 * the requested information from application land, just
355 		 * return the error code and hope for the best.
356 		 */
357 		EL(ha, "failed, sdm_setup\n");
358 		return (rval);
359 	}
360 
361 	/*
362 	 * Map the physical ha ptr (which the ioctl is called with)
363 	 * to the virtual ha that the caller is addressing.
364 	 */
365 	if (ha->flags & VP_ENABLED) {
366 		/* Check that it is within range. */
367 		if (cmd->HbaSelect > (CFG_IST(ha, CFG_CTRL_2422) ?
368 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
369 			EL(ha, "Invalid HbaSelect vp index: %xh\n",
370 			    cmd->HbaSelect);
371 			cmd->Status = EXT_STATUS_INVALID_VPINDEX;
372 			cmd->ResponseLen = 0;
373 			return (rval);
374 		}
375 		/*
376 		 * Special case: HbaSelect == 0 is physical ha
377 		 */
378 		if (cmd->HbaSelect != 0) {
379 			vha = ha->vp_next;
380 			while (vha != NULL) {
381 				if (vha->vp_index == cmd->HbaSelect) {
382 					ha = vha;
383 					break;
384 				}
385 				vha = vha->vp_next;
386 			}
387 			/*
388 			 * The specified vp index may be valid(within range)
389 			 * but it's not in the list. Currently this is all
390 			 * we can say.
391 			 */
392 			if (vha == NULL) {
393 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
394 				cmd->ResponseLen = 0;
395 				return (rval);
396 			}
397 		}
398 	}
399 
400 	/*
401 	 * If driver is suspended, stalled, or powered down rtn BUSY
402 	 */
403 	if (ha->flags & ADAPTER_SUSPENDED ||
404 	    ha->task_daemon_flags & DRIVER_STALL ||
405 	    ha->power_level != PM_LEVEL_D0) {
406 		EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
407 		    "driver suspended" :
408 		    (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
409 		    "FCA powered down"));
410 		cmd->Status = EXT_STATUS_BUSY;
411 		cmd->ResponseLen = 0;
412 		rval = EBUSY;
413 
414 		/* Return results to caller */
415 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
416 			EL(ha, "failed, sdm_return\n");
417 			rval = EFAULT;
418 		}
419 		return (rval);
420 	}
421 
422 	switch (ioctl_code) {
423 	case EXT_CC_QUERY_OS:
424 		ql_query(ha, cmd, mode);
425 		break;
426 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
427 		ql_fcct(ha, cmd, mode);
428 		break;
429 	case EXT_CC_REG_AEN_OS:
430 		ql_aen_reg(ha, cmd, mode);
431 		break;
432 	case EXT_CC_GET_AEN_OS:
433 		ql_aen_get(ha, cmd, mode);
434 		break;
435 	case EXT_CC_GET_DATA_OS:
436 		ql_get_host_data(ha, cmd, mode);
437 		break;
438 	case EXT_CC_SET_DATA_OS:
439 		ql_set_host_data(ha, cmd, mode);
440 		break;
441 	case EXT_CC_SEND_ELS_RNID_OS:
442 		ql_send_els_rnid(ha, cmd, mode);
443 		break;
444 	case EXT_CC_SCSI_PASSTHRU_OS:
445 		ql_scsi_passthru(ha, cmd, mode);
446 		break;
447 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
448 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
449 		break;
450 	case EXT_CC_HOST_IDX_OS:
451 		ql_host_idx(ha, cmd, mode);
452 		break;
453 	case EXT_CC_HOST_DRVNAME_OS:
454 		ql_host_drvname(ha, cmd, mode);
455 		break;
456 	case EXT_CC_READ_NVRAM_OS:
457 		ql_read_nvram(ha, cmd, mode);
458 		break;
459 	case EXT_CC_UPDATE_NVRAM_OS:
460 		ql_write_nvram(ha, cmd, mode);
461 		break;
462 	case EXT_CC_READ_OPTION_ROM_OS:
463 	case EXT_CC_READ_OPTION_ROM_EX_OS:
464 		ql_read_flash(ha, cmd, mode);
465 		break;
466 	case EXT_CC_UPDATE_OPTION_ROM_OS:
467 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
468 		ql_write_flash(ha, cmd, mode);
469 		break;
470 	case EXT_CC_LOOPBACK_OS:
471 		ql_diagnostic_loopback(ha, cmd, mode);
472 		break;
473 	case EXT_CC_GET_VPD_OS:
474 		ql_read_vpd(ha, cmd, mode);
475 		break;
476 	case EXT_CC_SET_VPD_OS:
477 		ql_write_vpd(ha, cmd, mode);
478 		break;
479 	case EXT_CC_GET_FCACHE_OS:
480 		ql_get_fcache(ha, cmd, mode);
481 		break;
482 	case EXT_CC_GET_FCACHE_EX_OS:
483 		ql_get_fcache_ex(ha, cmd, mode);
484 		break;
485 	case EXT_CC_GET_SFP_DATA_OS:
486 		ql_get_sfp(ha, cmd, mode);
487 		break;
488 	case EXT_CC_PORT_PARAM_OS:
489 		ql_port_param(ha, cmd, mode);
490 		break;
491 	case EXT_CC_GET_PCI_DATA_OS:
492 		ql_get_pci_data(ha, cmd, mode);
493 		break;
494 	case EXT_CC_GET_FWEXTTRACE_OS:
495 		ql_get_fwexttrace(ha, cmd, mode);
496 		break;
497 	case EXT_CC_GET_FWFCETRACE_OS:
498 		ql_get_fwfcetrace(ha, cmd, mode);
499 		break;
500 	case EXT_CC_MENLO_RESET:
501 		ql_menlo_reset(ha, cmd, mode);
502 		break;
503 	case EXT_CC_MENLO_GET_FW_VERSION:
504 		ql_menlo_get_fw_version(ha, cmd, mode);
505 		break;
506 	case EXT_CC_MENLO_UPDATE_FW:
507 		ql_menlo_update_fw(ha, cmd, mode);
508 		break;
509 	case EXT_CC_MENLO_MANAGE_INFO:
510 		ql_menlo_manage_info(ha, cmd, mode);
511 		break;
512 	case EXT_CC_GET_VP_CNT_ID_OS:
513 		ql_get_vp_cnt_id(ha, cmd, mode);
514 		break;
515 	case EXT_CC_VPORT_CMD_OS:
516 		ql_vp_ioctl(ha, cmd, mode);
517 		break;
518 	case EXT_CC_ACCESS_FLASH_OS:
519 		ql_access_flash(ha, cmd, mode);
520 		break;
521 	case EXT_CC_RESET_FW_OS:
522 		ql_reset_cmd(ha, cmd);
523 		break;
524 	default:
525 		/* function not supported. */
526 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
527 
528 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
529 		cmd->ResponseLen = 0;
530 		break;
531 	}
532 
533 	/* Return results to caller */
534 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
535 		EL(ha, "failed, sdm_return\n");
536 		return (EFAULT);
537 	}
538 
539 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
540 
541 	return (0);
542 }
543 
544 /*
545  * ql_sdm_setup
546  *	Make a local copy of the EXT_IOCTL struct and validate it.
547  *
548  * Input:
549  *	ha:		adapter state pointer.
550  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
551  *	arg:		Address of application EXT_IOCTL cmd data
552  *	mode:		flags
553  *	val_sig:	Pointer to a function to validate the ioctl signature.
554  *
555  * Returns:
556  *	0:		success
557  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
558  *	EINVAL:		Invalid version, signature.
559  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
560  *
561  * Context:
562  *	Kernel context.
563  */
564 static int
565 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
566     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
567 {
568 	int		rval;
569 	EXT_IOCTL	*cmd;
570 
571 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
572 
573 	/* Allocate local memory for EXT_IOCTL. */
574 	*cmd_struct = NULL;
575 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
576 	if (cmd == NULL) {
577 		EL(ha, "failed, kmem_zalloc\n");
578 		return (ENOMEM);
579 	}
580 	/* Get argument structure. */
581 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
582 	if (rval != 0) {
583 		EL(ha, "failed, ddi_copyin\n");
584 		rval = EFAULT;
585 	} else {
586 		/*
587 		 * Check signature and the version.
588 		 * If either are not valid then neither is the
589 		 * structure so don't attempt to return any error status
590 		 * because we can't trust what caller's arg points to.
591 		 * Just return the errno.
592 		 */
593 		if (val_sig(cmd) == 0) {
594 			EL(ha, "failed, signature\n");
595 			rval = EINVAL;
596 		} else if (cmd->Version > EXT_VERSION) {
597 			EL(ha, "failed, version\n");
598 			rval = EINVAL;
599 		}
600 	}
601 
602 	if (rval == 0) {
603 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
604 		*cmd_struct = cmd;
605 		cmd->Status = EXT_STATUS_OK;
606 		cmd->DetailStatus = 0;
607 	} else {
608 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
609 	}
610 
611 	return (rval);
612 }
613 
614 /*
615  * ql_validate_signature
616  *	Validate the signature string for an external ioctl call.
617  *
618  * Input:
619  *	sg:	Pointer to EXT_IOCTL signature to validate.
620  *
621  * Returns:
622  *	B_TRUE:		Signature is valid.
623  *	B_FALSE:	Signature is NOT valid.
624  *
625  * Context:
626  *	Kernel context.
627  */
628 static boolean_t
629 ql_validate_signature(EXT_IOCTL *cmd_struct)
630 {
631 	/*
632 	 * Check signature.
633 	 *
634 	 * If signature is not valid then neither is the rest of
635 	 * the structure (e.g., can't trust it), so don't attempt
636 	 * to return any error status other than the errno.
637 	 */
638 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
639 		QL_PRINT_2(CE_CONT, "failed,\n");
640 		return (B_FALSE);
641 	}
642 
643 	return (B_TRUE);
644 }
645 
646 /*
647  * ql_sdm_return
648  *	Copies return data/status to application land for
649  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
650  *
651  * Input:
652  *	ha:		adapter state pointer.
653  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
654  *	ioctl_code:	ioctl function to perform
655  *	arg:		EXT_IOCTL cmd data in application land.
656  *	mode:		flags
657  *
658  * Returns:
659  *	0:	success
660  *	EFAULT:	Copy out error.
661  *
662  * Context:
663  *	Kernel context.
664  */
665 /* ARGSUSED */
666 static int
667 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
668 {
669 	int	rval = 0;
670 
671 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
672 
673 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
674 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
675 	    mode);
676 
677 	rval |= ddi_copyout((void *)&cmd->Status,
678 	    (void *)&(((EXT_IOCTL*)arg)->Status),
679 	    sizeof (cmd->Status), mode);
680 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
681 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
682 	    sizeof (cmd->DetailStatus), mode);
683 
684 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
685 
686 	if (rval != 0) {
687 		/* Some copyout operation failed */
688 		EL(ha, "failed, ddi_copyout\n");
689 		return (EFAULT);
690 	}
691 
692 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
693 
694 	return (0);
695 }
696 
697 /*
698  * ql_query
699  *	Performs all EXT_CC_QUERY functions.
700  *
701  * Input:
702  *	ha:	adapter state pointer.
703  *	cmd:	Local EXT_IOCTL cmd struct pointer.
704  *	mode:	flags.
705  *
706  * Returns:
707  *	None, request status indicated in cmd->Status.
708  *
709  * Context:
710  *	Kernel context.
711  */
712 static void
713 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
714 {
715 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
716 	    cmd->SubCode);
717 
718 	/* case off on command subcode */
719 	switch (cmd->SubCode) {
720 	case EXT_SC_QUERY_HBA_NODE:
721 		ql_qry_hba_node(ha, cmd, mode);
722 		break;
723 	case EXT_SC_QUERY_HBA_PORT:
724 		ql_qry_hba_port(ha, cmd, mode);
725 		break;
726 	case EXT_SC_QUERY_DISC_PORT:
727 		ql_qry_disc_port(ha, cmd, mode);
728 		break;
729 	case EXT_SC_QUERY_DISC_TGT:
730 		ql_qry_disc_tgt(ha, cmd, mode);
731 		break;
732 	case EXT_SC_QUERY_DRIVER:
733 		ql_qry_driver(ha, cmd, mode);
734 		break;
735 	case EXT_SC_QUERY_FW:
736 		ql_qry_fw(ha, cmd, mode);
737 		break;
738 	case EXT_SC_QUERY_CHIP:
739 		ql_qry_chip(ha, cmd, mode);
740 		break;
741 	case EXT_SC_QUERY_CNA_PORT:
742 		ql_qry_cna_port(ha, cmd, mode);
743 		break;
744 	case EXT_SC_QUERY_ADAPTER_VERSIONS:
745 		ql_qry_adapter_versions(ha, cmd, mode);
746 		break;
747 	case EXT_SC_QUERY_DISC_LUN:
748 	default:
749 		/* function not supported. */
750 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
751 		EL(ha, "failed, Unsupported Subcode=%xh\n",
752 		    cmd->SubCode);
753 		break;
754 	}
755 
756 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
757 }
758 
759 /*
760  * ql_qry_hba_node
761  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
762  *
763  * Input:
764  *	ha:	adapter state pointer.
765  *	cmd:	EXT_IOCTL cmd struct pointer.
766  *	mode:	flags.
767  *
768  * Returns:
769  *	None, request status indicated in cmd->Status.
770  *
771  * Context:
772  *	Kernel context.
773  */
774 static void
775 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
776 {
777 	EXT_HBA_NODE	tmp_node = {0};
778 	uint_t		len;
779 	caddr_t		bufp;
780 	ql_mbx_data_t	mr;
781 
782 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
783 
784 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
785 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
786 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
787 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
788 		    "Len=%xh\n", cmd->ResponseLen);
789 		cmd->ResponseLen = 0;
790 		return;
791 	}
792 
793 	/* fill in the values */
794 
795 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
796 	    EXT_DEF_WWN_NAME_SIZE);
797 
798 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
799 
800 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
801 
802 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
803 
804 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
805 
806 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
807 		size_t		verlen;
808 		uint16_t	w;
809 		char		*tmpptr;
810 
811 		verlen = strlen((char *)(tmp_node.DriverVersion));
812 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
813 			EL(ha, "failed, No room for fpga version string\n");
814 		} else {
815 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
816 			    (uint16_t *)
817 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
818 
819 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
820 			if (tmpptr == NULL) {
821 				EL(ha, "Unable to insert fpga version str\n");
822 			} else {
823 				(void) sprintf(tmpptr, "%d.%d",
824 				    ((w & 0xf0) >> 4), (w & 0x0f));
825 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
826 			}
827 		}
828 	}
829 	(void) ql_get_fw_version(ha, &mr, MAILBOX_TOV);
830 
831 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
832 	    mr.mb[1], mr.mb[2], mr.mb[3]);
833 
834 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
835 		switch (mr.mb[6]) {
836 		case FWATTRIB_EF:
837 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
838 			break;
839 		case FWATTRIB_TP:
840 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
841 			break;
842 		case FWATTRIB_IP:
843 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
844 			break;
845 		case FWATTRIB_IPX:
846 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
847 			break;
848 		case FWATTRIB_FL:
849 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
850 			break;
851 		case FWATTRIB_FPX:
852 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
853 			break;
854 		default:
855 			break;
856 		}
857 	}
858 
859 	/* FCode version. */
860 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
861 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
862 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
863 	    (int *)&len) == DDI_PROP_SUCCESS) {
864 		if (len < EXT_DEF_MAX_STR_SIZE) {
865 			bcopy(bufp, tmp_node.OptRomVersion, len);
866 		} else {
867 			bcopy(bufp, tmp_node.OptRomVersion,
868 			    EXT_DEF_MAX_STR_SIZE - 1);
869 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
870 			    '\0';
871 		}
872 		kmem_free(bufp, len);
873 	} else {
874 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
875 	}
876 	tmp_node.PortCount = 1;
877 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
878 
879 	if (ddi_copyout((void *)&tmp_node,
880 	    (void *)(uintptr_t)(cmd->ResponseAdr),
881 	    sizeof (EXT_HBA_NODE), mode) != 0) {
882 		cmd->Status = EXT_STATUS_COPY_ERR;
883 		cmd->ResponseLen = 0;
884 		EL(ha, "failed, ddi_copyout\n");
885 	} else {
886 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
887 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
888 	}
889 }
890 
891 /*
892  * ql_qry_hba_port
893  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
894  *
895  * Input:
896  *	ha:	adapter state pointer.
897  *	cmd:	EXT_IOCTL cmd struct pointer.
898  *	mode:	flags.
899  *
900  * Returns:
901  *	None, request status indicated in cmd->Status.
902  *
903  * Context:
904  *	Kernel context.
905  */
906 static void
907 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
908 {
909 	ql_link_t	*link;
910 	ql_tgt_t	*tq;
911 	ql_mbx_data_t	mr;
912 	EXT_HBA_PORT	tmp_port = {0};
913 	int		rval;
914 	uint16_t	port_cnt, tgt_cnt, index;
915 
916 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
917 
918 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
919 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
920 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
921 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
922 		    cmd->ResponseLen);
923 		cmd->ResponseLen = 0;
924 		return;
925 	}
926 
927 	/* fill in the values */
928 
929 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
930 	    EXT_DEF_WWN_NAME_SIZE);
931 	tmp_port.Id[0] = 0;
932 	tmp_port.Id[1] = ha->d_id.b.domain;
933 	tmp_port.Id[2] = ha->d_id.b.area;
934 	tmp_port.Id[3] = ha->d_id.b.al_pa;
935 
936 	/* For now we are initiator only driver */
937 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
938 
939 	if (ha->task_daemon_flags & LOOP_DOWN) {
940 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
941 	} else if (DRIVER_SUSPENDED(ha)) {
942 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
943 	} else {
944 		tmp_port.State = EXT_DEF_HBA_OK;
945 	}
946 
947 	if (ha->flags & POINT_TO_POINT) {
948 		tmp_port.Mode = EXT_DEF_P2P_MODE;
949 	} else {
950 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
951 	}
952 	/*
953 	 * fill in the portspeed values.
954 	 *
955 	 * default to not yet negotiated state
956 	 */
957 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
958 
959 	if (tmp_port.State == EXT_DEF_HBA_OK) {
960 		if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
961 			mr.mb[1] = 0;
962 			mr.mb[2] = 0;
963 			rval = ql_data_rate(ha, &mr);
964 			if (rval != QL_SUCCESS) {
965 				EL(ha, "failed, data_rate=%xh\n", rval);
966 			} else {
967 				switch (mr.mb[1]) {
968 				case IIDMA_RATE_1GB:
969 					tmp_port.PortSpeed =
970 					    EXT_DEF_PORTSPEED_1GBIT;
971 					break;
972 				case IIDMA_RATE_2GB:
973 					tmp_port.PortSpeed =
974 					    EXT_DEF_PORTSPEED_2GBIT;
975 					break;
976 				case IIDMA_RATE_4GB:
977 					tmp_port.PortSpeed =
978 					    EXT_DEF_PORTSPEED_4GBIT;
979 					break;
980 				case IIDMA_RATE_8GB:
981 					tmp_port.PortSpeed =
982 					    EXT_DEF_PORTSPEED_8GBIT;
983 					break;
984 				case IIDMA_RATE_10GB:
985 					tmp_port.PortSpeed =
986 					    EXT_DEF_PORTSPEED_10GBIT;
987 					break;
988 				default:
989 					tmp_port.PortSpeed =
990 					    EXT_DEF_PORTSPEED_UNKNOWN;
991 					EL(ha, "failed, data rate=%xh\n",
992 					    mr.mb[1]);
993 					break;
994 				}
995 			}
996 		} else {
997 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
998 		}
999 	}
1000 
1001 	/* Report all supported port speeds */
1002 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
1003 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
1004 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
1005 		    EXT_DEF_PORTSPEED_1GBIT);
1006 		/*
1007 		 * Correct supported speeds based on type of
1008 		 * sfp that is present
1009 		 */
1010 		switch (ha->sfp_stat) {
1011 		case 1:
1012 			/* no sfp detected */
1013 			break;
1014 		case 2:
1015 		case 4:
1016 			/* 4GB sfp */
1017 			tmp_port.PortSupportedSpeed &=
1018 			    ~EXT_DEF_PORTSPEED_8GBIT;
1019 			break;
1020 		case 3:
1021 		case 5:
1022 			/* 8GB sfp */
1023 			tmp_port.PortSupportedSpeed &=
1024 			    ~EXT_DEF_PORTSPEED_1GBIT;
1025 			break;
1026 		default:
1027 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1028 			break;
1029 
1030 		}
1031 	} else if (CFG_IST(ha, CFG_CTRL_8081)) {
1032 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1033 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1034 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1035 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1036 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
1037 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1038 		    EXT_DEF_PORTSPEED_1GBIT);
1039 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
1040 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1041 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
1042 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1043 	} else {
1044 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1045 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1046 	}
1047 	tmp_port.LinkState2 = LSB(ha->sfp_stat);
1048 	port_cnt = 0;
1049 	tgt_cnt = 0;
1050 
1051 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1052 		for (link = ha->dev[index].first; link != NULL;
1053 		    link = link->next) {
1054 			tq = link->base_address;
1055 
1056 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1057 				continue;
1058 			}
1059 
1060 			port_cnt++;
1061 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1062 				tgt_cnt++;
1063 			}
1064 		}
1065 	}
1066 
1067 	tmp_port.DiscPortCount = port_cnt;
1068 	tmp_port.DiscTargetCount = tgt_cnt;
1069 
1070 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1071 
1072 	rval = ddi_copyout((void *)&tmp_port,
1073 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1074 	    sizeof (EXT_HBA_PORT), mode);
1075 	if (rval != 0) {
1076 		cmd->Status = EXT_STATUS_COPY_ERR;
1077 		cmd->ResponseLen = 0;
1078 		EL(ha, "failed, ddi_copyout\n");
1079 	} else {
1080 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1081 		QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1082 		    ha->instance, port_cnt, tgt_cnt);
1083 	}
1084 }
1085 
1086 /*
1087  * ql_qry_disc_port
1088  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1089  *
1090  * Input:
1091  *	ha:	adapter state pointer.
1092  *	cmd:	EXT_IOCTL cmd struct pointer.
1093  *	mode:	flags.
1094  *
1095  *	cmd->Instance = Port instance in fcport chain.
1096  *
1097  * Returns:
1098  *	None, request status indicated in cmd->Status.
1099  *
1100  * Context:
1101  *	Kernel context.
1102  */
1103 static void
1104 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1105 {
1106 	EXT_DISC_PORT	tmp_port = {0};
1107 	ql_link_t	*link;
1108 	ql_tgt_t	*tq;
1109 	uint16_t	index;
1110 	uint16_t	inst = 0;
1111 
1112 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1113 
1114 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1115 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1116 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1117 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1118 		    cmd->ResponseLen);
1119 		cmd->ResponseLen = 0;
1120 		return;
1121 	}
1122 
1123 	for (link = NULL, index = 0;
1124 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1125 		for (link = ha->dev[index].first; link != NULL;
1126 		    link = link->next) {
1127 			tq = link->base_address;
1128 
1129 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1130 				continue;
1131 			}
1132 			if (inst != cmd->Instance) {
1133 				inst++;
1134 				continue;
1135 			}
1136 
1137 			/* fill in the values */
1138 			bcopy(tq->node_name, tmp_port.WWNN,
1139 			    EXT_DEF_WWN_NAME_SIZE);
1140 			bcopy(tq->port_name, tmp_port.WWPN,
1141 			    EXT_DEF_WWN_NAME_SIZE);
1142 
1143 			break;
1144 		}
1145 	}
1146 
1147 	if (link == NULL) {
1148 		/* no matching device */
1149 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1150 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1151 		cmd->ResponseLen = 0;
1152 		return;
1153 	}
1154 
1155 	tmp_port.Id[0] = 0;
1156 	tmp_port.Id[1] = tq->d_id.b.domain;
1157 	tmp_port.Id[2] = tq->d_id.b.area;
1158 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1159 
1160 	tmp_port.Type = 0;
1161 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1162 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1163 		    EXT_DEF_INITIATOR_DEV);
1164 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1165 		(void) ql_inq_scan(ha, tq, 1);
1166 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1167 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1168 	}
1169 
1170 	if (tq->flags & TQF_FABRIC_DEVICE) {
1171 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1172 	} else {
1173 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1174 	}
1175 
1176 	tmp_port.Status = 0;
1177 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1178 
1179 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1180 
1181 	if (ddi_copyout((void *)&tmp_port,
1182 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1183 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1184 		cmd->Status = EXT_STATUS_COPY_ERR;
1185 		cmd->ResponseLen = 0;
1186 		EL(ha, "failed, ddi_copyout\n");
1187 	} else {
1188 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1189 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1190 	}
1191 }
1192 
1193 /*
1194  * ql_qry_disc_tgt
1195  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1196  *
1197  * Input:
1198  *	ha:		adapter state pointer.
1199  *	cmd:		EXT_IOCTL cmd struct pointer.
1200  *	mode:		flags.
1201  *
1202  *	cmd->Instance = Port instance in fcport chain.
1203  *
1204  * Returns:
1205  *	None, request status indicated in cmd->Status.
1206  *
1207  * Context:
1208  *	Kernel context.
1209  */
1210 static void
1211 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1212 {
1213 	EXT_DISC_TARGET	tmp_tgt = {0};
1214 	ql_link_t	*link;
1215 	ql_tgt_t	*tq;
1216 	uint16_t	index;
1217 	uint16_t	inst = 0;
1218 
1219 	QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1220 	    cmd->Instance);
1221 
1222 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1223 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1224 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1225 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1226 		    cmd->ResponseLen);
1227 		cmd->ResponseLen = 0;
1228 		return;
1229 	}
1230 
1231 	/* Scan port list for requested target and fill in the values */
1232 	for (link = NULL, index = 0;
1233 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1234 		for (link = ha->dev[index].first; link != NULL;
1235 		    link = link->next) {
1236 			tq = link->base_address;
1237 
1238 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1239 			    tq->flags & TQF_INITIATOR_DEVICE) {
1240 				continue;
1241 			}
1242 			if (inst != cmd->Instance) {
1243 				inst++;
1244 				continue;
1245 			}
1246 
1247 			/* fill in the values */
1248 			bcopy(tq->node_name, tmp_tgt.WWNN,
1249 			    EXT_DEF_WWN_NAME_SIZE);
1250 			bcopy(tq->port_name, tmp_tgt.WWPN,
1251 			    EXT_DEF_WWN_NAME_SIZE);
1252 
1253 			break;
1254 		}
1255 	}
1256 
1257 	if (link == NULL) {
1258 		/* no matching device */
1259 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1260 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1261 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1262 		cmd->ResponseLen = 0;
1263 		return;
1264 	}
1265 	tmp_tgt.Id[0] = 0;
1266 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1267 	tmp_tgt.Id[2] = tq->d_id.b.area;
1268 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1269 
1270 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1271 
1272 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1273 		(void) ql_inq_scan(ha, tq, 1);
1274 	}
1275 
1276 	tmp_tgt.Type = 0;
1277 	if (tq->flags & TQF_TAPE_DEVICE) {
1278 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1279 	}
1280 
1281 	if (tq->flags & TQF_FABRIC_DEVICE) {
1282 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1283 	} else {
1284 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1285 	}
1286 
1287 	tmp_tgt.Status = 0;
1288 
1289 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1290 
1291 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1292 
1293 	if (ddi_copyout((void *)&tmp_tgt,
1294 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1295 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1296 		cmd->Status = EXT_STATUS_COPY_ERR;
1297 		cmd->ResponseLen = 0;
1298 		EL(ha, "failed, ddi_copyout\n");
1299 	} else {
1300 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1301 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1302 	}
1303 }
1304 
1305 /*
1306  * ql_qry_fw
1307  *	Performs EXT_SC_QUERY_FW subfunction.
1308  *
1309  * Input:
1310  *	ha:	adapter state pointer.
1311  *	cmd:	EXT_IOCTL cmd struct pointer.
1312  *	mode:	flags.
1313  *
1314  * Returns:
1315  *	None, request status indicated in cmd->Status.
1316  *
1317  * Context:
1318  *	Kernel context.
1319  */
1320 static void
1321 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1322 {
1323 	ql_mbx_data_t	mr;
1324 	EXT_FW		fw_info = {0};
1325 
1326 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1327 
1328 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1329 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1330 		cmd->DetailStatus = sizeof (EXT_FW);
1331 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1332 		    cmd->ResponseLen);
1333 		cmd->ResponseLen = 0;
1334 		return;
1335 	}
1336 
1337 	(void) ql_get_fw_version(ha, &mr, MAILBOX_TOV);
1338 
1339 	(void) sprintf((char *)(fw_info.Version), "%d.%d.%d", mr.mb[1],
1340 	    mr.mb[2], mr.mb[2]);
1341 
1342 	fw_info.Attrib = mr.mb[6];
1343 
1344 	if (ddi_copyout((void *)&fw_info,
1345 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1346 	    sizeof (EXT_FW), mode) != 0) {
1347 		cmd->Status = EXT_STATUS_COPY_ERR;
1348 		cmd->ResponseLen = 0;
1349 		EL(ha, "failed, ddi_copyout\n");
1350 		return;
1351 	} else {
1352 		cmd->ResponseLen = sizeof (EXT_FW);
1353 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1354 	}
1355 }
1356 
1357 /*
1358  * ql_qry_chip
1359  *	Performs EXT_SC_QUERY_CHIP subfunction.
1360  *
1361  * Input:
1362  *	ha:	adapter state pointer.
1363  *	cmd:	EXT_IOCTL cmd struct pointer.
1364  *	mode:	flags.
1365  *
1366  * Returns:
1367  *	None, request status indicated in cmd->Status.
1368  *
1369  * Context:
1370  *	Kernel context.
1371  */
1372 static void
1373 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1374 {
1375 	EXT_CHIP	chip = {0};
1376 
1377 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1378 
1379 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1380 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1381 		cmd->DetailStatus = sizeof (EXT_CHIP);
1382 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1383 		    cmd->ResponseLen);
1384 		cmd->ResponseLen = 0;
1385 		return;
1386 	}
1387 
1388 	chip.VendorId = ha->ven_id;
1389 	chip.DeviceId = ha->device_id;
1390 	chip.SubVendorId = ha->subven_id;
1391 	chip.SubSystemId = ha->subsys_id;
1392 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1393 	chip.IoAddrLen = 0x100;
1394 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1395 	chip.MemAddrLen = 0x100;
1396 	chip.ChipRevID = ha->rev_id;
1397 	if (ha->flags & FUNCTION_1) {
1398 		chip.FuncNo = 1;
1399 	}
1400 
1401 	if (ddi_copyout((void *)&chip,
1402 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1403 	    sizeof (EXT_CHIP), mode) != 0) {
1404 		cmd->Status = EXT_STATUS_COPY_ERR;
1405 		cmd->ResponseLen = 0;
1406 		EL(ha, "failed, ddi_copyout\n");
1407 	} else {
1408 		cmd->ResponseLen = sizeof (EXT_CHIP);
1409 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1410 	}
1411 }
1412 
1413 /*
1414  * ql_qry_driver
1415  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1416  *
1417  * Input:
1418  *	ha:	adapter state pointer.
1419  *	cmd:	EXT_IOCTL cmd struct pointer.
1420  *	mode:	flags.
1421  *
1422  * Returns:
1423  *	None, request status indicated in cmd->Status.
1424  *
1425  * Context:
1426  *	Kernel context.
1427  */
1428 static void
1429 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1430 {
1431 	EXT_DRIVER	qd = {0};
1432 
1433 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1434 
1435 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1436 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1437 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1438 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1439 		    cmd->ResponseLen);
1440 		cmd->ResponseLen = 0;
1441 		return;
1442 	}
1443 
1444 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1445 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1446 	qd.TargetsPerBus = (uint16_t)
1447 	    (CFG_IST(ha, (CFG_CTRL_24258081 | CFG_EXT_FW_INTERFACE)) ?
1448 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1449 	qd.LunsPerTarget = 2030;
1450 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1451 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1452 
1453 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1454 	    sizeof (EXT_DRIVER), mode) != 0) {
1455 		cmd->Status = EXT_STATUS_COPY_ERR;
1456 		cmd->ResponseLen = 0;
1457 		EL(ha, "failed, ddi_copyout\n");
1458 	} else {
1459 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1460 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1461 	}
1462 }
1463 
1464 /*
1465  * ql_fcct
1466  *	IOCTL management server FC-CT passthrough.
1467  *
1468  * Input:
1469  *	ha:	adapter state pointer.
1470  *	cmd:	User space CT arguments pointer.
1471  *	mode:	flags.
1472  *
1473  * Returns:
1474  *	None, request status indicated in cmd->Status.
1475  *
1476  * Context:
1477  *	Kernel context.
1478  */
1479 static void
1480 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1481 {
1482 	ql_mbx_iocb_t		*pkt;
1483 	ql_mbx_data_t		mr;
1484 	dma_mem_t		*dma_mem;
1485 	caddr_t			pld;
1486 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1487 	int			rval;
1488 	ql_ct_iu_preamble_t	*ct;
1489 	ql_xioctl_t		*xp = ha->xioctl;
1490 	ql_tgt_t		tq;
1491 	uint16_t		comp_status, loop_id;
1492 
1493 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1494 
1495 	/* Get CT argument structure. */
1496 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1497 		EL(ha, "failed, No switch\n");
1498 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1499 		cmd->ResponseLen = 0;
1500 		return;
1501 	}
1502 
1503 	if (DRIVER_SUSPENDED(ha)) {
1504 		EL(ha, "failed, LOOP_NOT_READY\n");
1505 		cmd->Status = EXT_STATUS_BUSY;
1506 		cmd->ResponseLen = 0;
1507 		return;
1508 	}
1509 
1510 	/* Login management server device. */
1511 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1512 		tq.d_id.b.al_pa = 0xfa;
1513 		tq.d_id.b.area = 0xff;
1514 		tq.d_id.b.domain = 0xff;
1515 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1516 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1517 		    MANAGEMENT_SERVER_LOOP_ID);
1518 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1519 		if (rval != QL_SUCCESS) {
1520 			EL(ha, "failed, server login\n");
1521 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1522 			cmd->ResponseLen = 0;
1523 			return;
1524 		} else {
1525 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1526 		}
1527 	}
1528 
1529 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1530 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1531 
1532 	/* Allocate a DMA Memory Descriptor */
1533 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1534 	if (dma_mem == NULL) {
1535 		EL(ha, "failed, kmem_zalloc\n");
1536 		cmd->Status = EXT_STATUS_NO_MEMORY;
1537 		cmd->ResponseLen = 0;
1538 		return;
1539 	}
1540 	/* Determine maximum buffer size. */
1541 	if (cmd->RequestLen < cmd->ResponseLen) {
1542 		pld_byte_cnt = cmd->ResponseLen;
1543 	} else {
1544 		pld_byte_cnt = cmd->RequestLen;
1545 	}
1546 
1547 	/* Allocate command block. */
1548 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1549 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1550 	if (pkt == NULL) {
1551 		EL(ha, "failed, kmem_zalloc\n");
1552 		cmd->Status = EXT_STATUS_NO_MEMORY;
1553 		cmd->ResponseLen = 0;
1554 		return;
1555 	}
1556 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1557 
1558 	/* Get command payload data. */
1559 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1560 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1561 		EL(ha, "failed, get_buffer_data\n");
1562 		kmem_free(pkt, pkt_size);
1563 		cmd->Status = EXT_STATUS_COPY_ERR;
1564 		cmd->ResponseLen = 0;
1565 		return;
1566 	}
1567 
1568 	/* Get DMA memory for the IOCB */
1569 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1570 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1571 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1572 		    "alloc failed", QL_NAME, ha->instance);
1573 		kmem_free(pkt, pkt_size);
1574 		kmem_free(dma_mem, sizeof (dma_mem_t));
1575 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1576 		cmd->ResponseLen = 0;
1577 		return;
1578 	}
1579 
1580 	/* Copy out going payload data to IOCB DMA buffer. */
1581 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1582 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1583 
1584 	/* Sync IOCB DMA buffer. */
1585 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1586 	    DDI_DMA_SYNC_FORDEV);
1587 
1588 	/*
1589 	 * Setup IOCB
1590 	 */
1591 	ct = (ql_ct_iu_preamble_t *)pld;
1592 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
1593 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1594 		pkt->ms24.entry_count = 1;
1595 
1596 		/* Set loop ID */
1597 		pkt->ms24.n_port_hdl = (uint16_t)
1598 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1599 		    LE_16(SNS_24XX_HDL) :
1600 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1601 
1602 		/* Set ISP command timeout. */
1603 		pkt->ms24.timeout = LE_16(120);
1604 
1605 		/* Set cmd/response data segment counts. */
1606 		pkt->ms24.cmd_dseg_count = LE_16(1);
1607 		pkt->ms24.resp_dseg_count = LE_16(1);
1608 
1609 		/* Load ct cmd byte count. */
1610 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1611 
1612 		/* Load ct rsp byte count. */
1613 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1614 
1615 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1616 
1617 		/* Load MS command entry data segments. */
1618 		*long_ptr++ = (uint32_t)
1619 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1620 		*long_ptr++ = (uint32_t)
1621 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1622 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1623 
1624 		/* Load MS response entry data segments. */
1625 		*long_ptr++ = (uint32_t)
1626 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1627 		*long_ptr++ = (uint32_t)
1628 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1629 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1630 
1631 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1632 		    sizeof (ql_mbx_iocb_t));
1633 
1634 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1635 		if (comp_status == CS_DATA_UNDERRUN) {
1636 			if ((BE_16(ct->max_residual_size)) == 0) {
1637 				comp_status = CS_COMPLETE;
1638 			}
1639 		}
1640 
1641 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1642 		    0) {
1643 			EL(ha, "failed, I/O timeout or "
1644 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1645 			    pkt->sts24.entry_status,
1646 			    pkt->sts24.scsi_status_l, rval);
1647 			kmem_free(pkt, pkt_size);
1648 			ql_free_dma_resource(ha, dma_mem);
1649 			kmem_free(dma_mem, sizeof (dma_mem_t));
1650 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1651 			cmd->ResponseLen = 0;
1652 			return;
1653 		}
1654 	} else {
1655 		pkt->ms.entry_type = MS_TYPE;
1656 		pkt->ms.entry_count = 1;
1657 
1658 		/* Set loop ID */
1659 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1660 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1661 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1662 			pkt->ms.loop_id_l = LSB(loop_id);
1663 			pkt->ms.loop_id_h = MSB(loop_id);
1664 		} else {
1665 			pkt->ms.loop_id_h = LSB(loop_id);
1666 		}
1667 
1668 		/* Set ISP command timeout. */
1669 		pkt->ms.timeout = LE_16(120);
1670 
1671 		/* Set data segment counts. */
1672 		pkt->ms.cmd_dseg_count_l = 1;
1673 		pkt->ms.total_dseg_count = LE_16(2);
1674 
1675 		/* Response total byte count. */
1676 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1677 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1678 
1679 		/* Command total byte count. */
1680 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1681 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1682 
1683 		/* Load command/response data segments. */
1684 		pkt->ms.dseg_0_address[0] = (uint32_t)
1685 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1686 		pkt->ms.dseg_0_address[1] = (uint32_t)
1687 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1688 		pkt->ms.dseg_1_address[0] = (uint32_t)
1689 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1690 		pkt->ms.dseg_1_address[1] = (uint32_t)
1691 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1692 
1693 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1694 		    sizeof (ql_mbx_iocb_t));
1695 
1696 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1697 		if (comp_status == CS_DATA_UNDERRUN) {
1698 			if ((BE_16(ct->max_residual_size)) == 0) {
1699 				comp_status = CS_COMPLETE;
1700 			}
1701 		}
1702 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1703 			EL(ha, "failed, I/O timeout or "
1704 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1705 			kmem_free(pkt, pkt_size);
1706 			ql_free_dma_resource(ha, dma_mem);
1707 			kmem_free(dma_mem, sizeof (dma_mem_t));
1708 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1709 			cmd->ResponseLen = 0;
1710 			return;
1711 		}
1712 	}
1713 
1714 	/* Sync in coming DMA buffer. */
1715 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1716 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1717 	/* Copy in coming DMA data. */
1718 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1719 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1720 	    DDI_DEV_AUTOINCR);
1721 
1722 	/* Copy response payload from DMA buffer to application. */
1723 	if (cmd->ResponseLen != 0) {
1724 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1725 		    cmd->ResponseLen);
1726 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1727 
1728 		/* Send response payload. */
1729 		if (ql_send_buffer_data(pld,
1730 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1731 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1732 			EL(ha, "failed, send_buffer_data\n");
1733 			cmd->Status = EXT_STATUS_COPY_ERR;
1734 			cmd->ResponseLen = 0;
1735 		}
1736 	}
1737 
1738 	kmem_free(pkt, pkt_size);
1739 	ql_free_dma_resource(ha, dma_mem);
1740 	kmem_free(dma_mem, sizeof (dma_mem_t));
1741 
1742 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1743 }
1744 
1745 /*
1746  * ql_aen_reg
1747  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1748  *
1749  * Input:
1750  *	ha:	adapter state pointer.
1751  *	cmd:	EXT_IOCTL cmd struct pointer.
1752  *	mode:	flags.
1753  *
1754  * Returns:
1755  *	None, request status indicated in cmd->Status.
1756  *
1757  * Context:
1758  *	Kernel context.
1759  */
1760 static void
1761 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1762 {
1763 	EXT_REG_AEN	reg_struct;
1764 	int		rval = 0;
1765 	ql_xioctl_t	*xp = ha->xioctl;
1766 
1767 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1768 
1769 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &reg_struct,
1770 	    cmd->RequestLen, mode);
1771 
1772 	if (rval == 0) {
1773 		if (reg_struct.Enable) {
1774 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1775 		} else {
1776 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1777 			/* Empty the queue. */
1778 			INTR_LOCK(ha);
1779 			xp->aen_q_head = 0;
1780 			xp->aen_q_tail = 0;
1781 			INTR_UNLOCK(ha);
1782 		}
1783 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1784 	} else {
1785 		cmd->Status = EXT_STATUS_COPY_ERR;
1786 		EL(ha, "failed, ddi_copyin\n");
1787 	}
1788 }
1789 
1790 /*
1791  * ql_aen_get
1792  *	IOCTL management server Asynchronous Event Record Transfer.
1793  *
1794  * Input:
1795  *	ha:	adapter state pointer.
1796  *	cmd:	EXT_IOCTL cmd struct pointer.
1797  *	mode:	flags.
1798  *
1799  * Returns:
1800  *	None, request status indicated in cmd->Status.
1801  *
1802  * Context:
1803  *	Kernel context.
1804  */
1805 static void
1806 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1807 {
1808 	uint32_t	out_size;
1809 	EXT_ASYNC_EVENT	*tmp_q;
1810 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1811 	uint8_t		i;
1812 	uint8_t		queue_cnt;
1813 	uint8_t		request_cnt;
1814 	ql_xioctl_t	*xp = ha->xioctl;
1815 
1816 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1817 
1818 	/* Compute the number of events that can be returned */
1819 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1820 
1821 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1822 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1823 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1824 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1825 		    "Len=%xh\n", request_cnt);
1826 		cmd->ResponseLen = 0;
1827 		return;
1828 	}
1829 
1830 	/* 1st: Make a local copy of the entire queue content. */
1831 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1832 	queue_cnt = 0;
1833 
1834 	INTR_LOCK(ha);
1835 	i = xp->aen_q_head;
1836 
1837 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1838 		if (tmp_q[i].AsyncEventCode != 0) {
1839 			bcopy(&tmp_q[i], &aen[queue_cnt],
1840 			    sizeof (EXT_ASYNC_EVENT));
1841 			queue_cnt++;
1842 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1843 		}
1844 		if (i == xp->aen_q_tail) {
1845 			/* done. */
1846 			break;
1847 		}
1848 		i++;
1849 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1850 			i = 0;
1851 		}
1852 	}
1853 
1854 	/* Empty the queue. */
1855 	xp->aen_q_head = 0;
1856 	xp->aen_q_tail = 0;
1857 
1858 	INTR_UNLOCK(ha);
1859 
1860 	/* 2nd: Now transfer the queue content to user buffer */
1861 	/* Copy the entire queue to user's buffer. */
1862 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1863 	if (queue_cnt == 0) {
1864 		cmd->ResponseLen = 0;
1865 	} else if (ddi_copyout((void *)&aen[0],
1866 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1867 	    out_size, mode) != 0) {
1868 		cmd->Status = EXT_STATUS_COPY_ERR;
1869 		cmd->ResponseLen = 0;
1870 		EL(ha, "failed, ddi_copyout\n");
1871 	} else {
1872 		cmd->ResponseLen = out_size;
1873 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1874 	}
1875 }
1876 
1877 /*
1878  * ql_enqueue_aen
1879  *
1880  * Input:
1881  *	ha:		adapter state pointer.
1882  *	event_code:	async event code of the event to add to queue.
1883  *	payload:	event payload for the queue.
1884  *	INTR_LOCK must be already obtained.
1885  *
1886  * Context:
1887  *	Interrupt or Kernel context, no mailbox commands allowed.
1888  */
1889 void
1890 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1891 {
1892 	uint8_t			new_entry;	/* index to current entry */
1893 	uint16_t		*mbx;
1894 	EXT_ASYNC_EVENT		*aen_queue;
1895 	ql_xioctl_t		*xp = ha->xioctl;
1896 
1897 	QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1898 	    event_code);
1899 
1900 	if (xp == NULL) {
1901 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1902 		return;
1903 	}
1904 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1905 
1906 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1907 		/* Need to change queue pointers to make room. */
1908 
1909 		/* Increment tail for adding new entry. */
1910 		xp->aen_q_tail++;
1911 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1912 			xp->aen_q_tail = 0;
1913 		}
1914 		if (xp->aen_q_head == xp->aen_q_tail) {
1915 			/*
1916 			 * We're overwriting the oldest entry, so need to
1917 			 * update the head pointer.
1918 			 */
1919 			xp->aen_q_head++;
1920 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1921 				xp->aen_q_head = 0;
1922 			}
1923 		}
1924 	}
1925 
1926 	new_entry = xp->aen_q_tail;
1927 	aen_queue[new_entry].AsyncEventCode = event_code;
1928 
1929 	/* Update payload */
1930 	if (payload != NULL) {
1931 		switch (event_code) {
1932 		case MBA_LIP_OCCURRED:
1933 		case MBA_LOOP_UP:
1934 		case MBA_LOOP_DOWN:
1935 		case MBA_LIP_F8:
1936 		case MBA_LIP_RESET:
1937 		case MBA_PORT_UPDATE:
1938 			break;
1939 		case MBA_RSCN_UPDATE:
1940 			mbx = (uint16_t *)payload;
1941 			/* al_pa */
1942 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1943 			    LSB(mbx[2]);
1944 			/* area */
1945 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1946 			    MSB(mbx[2]);
1947 			/* domain */
1948 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1949 			    LSB(mbx[1]);
1950 			/* save in big endian */
1951 			BIG_ENDIAN_24(&aen_queue[new_entry].
1952 			    Payload.RSCN.RSCNInfo[0]);
1953 
1954 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1955 			    MSB(mbx[1]);
1956 
1957 			break;
1958 		default:
1959 			/* Not supported */
1960 			EL(ha, "failed, event code not supported=%xh\n",
1961 			    event_code);
1962 			aen_queue[new_entry].AsyncEventCode = 0;
1963 			break;
1964 		}
1965 	}
1966 
1967 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1968 }
1969 
1970 /*
1971  * ql_scsi_passthru
1972  *	IOCTL SCSI passthrough.
1973  *
1974  * Input:
1975  *	ha:	adapter state pointer.
1976  *	cmd:	User space SCSI command pointer.
1977  *	mode:	flags.
1978  *
1979  * Returns:
1980  *	None, request status indicated in cmd->Status.
1981  *
1982  * Context:
1983  *	Kernel context.
1984  */
1985 static void
1986 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1987 {
1988 	ql_mbx_iocb_t		*pkt;
1989 	ql_mbx_data_t		mr;
1990 	dma_mem_t		*dma_mem;
1991 	caddr_t			pld;
1992 	uint32_t		pkt_size, pld_size;
1993 	uint16_t		qlnt, retries, cnt, cnt2;
1994 	uint8_t			*name;
1995 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1996 	EXT_SCSI_PASSTHRU	*usp_req;
1997 	int			rval;
1998 	union _passthru {
1999 		EXT_SCSI_PASSTHRU	sp_cmd;
2000 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
2001 	} pt_req;		/* Passthru request */
2002 	uint32_t		status, sense_sz = 0;
2003 	ql_tgt_t		*tq = NULL;
2004 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
2005 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
2006 
2007 	/* SCSI request struct for SCSI passthrough IOs. */
2008 	struct {
2009 		uint16_t	lun;
2010 		uint16_t	sense_length;	/* Sense buffer size */
2011 		size_t		resid;		/* Residual */
2012 		uint8_t		*cdbp;		/* Requestor's CDB */
2013 		uint8_t		*u_sense;	/* Requestor's sense buffer */
2014 		uint8_t		cdb_len;	/* Requestor's CDB length */
2015 		uint8_t		direction;
2016 	} scsi_req;
2017 
2018 	struct {
2019 		uint8_t		*rsp_info;
2020 		uint8_t		*req_sense_data;
2021 		uint32_t	residual_length;
2022 		uint32_t	rsp_info_length;
2023 		uint32_t	req_sense_length;
2024 		uint16_t	comp_status;
2025 		uint8_t		state_flags_l;
2026 		uint8_t		state_flags_h;
2027 		uint8_t		scsi_status_l;
2028 		uint8_t		scsi_status_h;
2029 	} sts;
2030 
2031 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2032 
2033 	/* Verify Sub Code and set cnt to needed request size. */
2034 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2035 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
2036 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2037 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2038 	} else {
2039 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2040 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2041 		cmd->ResponseLen = 0;
2042 		return;
2043 	}
2044 
2045 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2046 	if (dma_mem == NULL) {
2047 		EL(ha, "failed, kmem_zalloc\n");
2048 		cmd->Status = EXT_STATUS_NO_MEMORY;
2049 		cmd->ResponseLen = 0;
2050 		return;
2051 	}
2052 	/*  Verify the size of and copy in the passthru request structure. */
2053 	if (cmd->RequestLen != pld_size) {
2054 		/* Return error */
2055 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2056 		    cmd->RequestLen, pld_size);
2057 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2058 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2059 		cmd->ResponseLen = 0;
2060 		return;
2061 	}
2062 
2063 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2064 	    pld_size, mode) != 0) {
2065 		EL(ha, "failed, ddi_copyin\n");
2066 		cmd->Status = EXT_STATUS_COPY_ERR;
2067 		cmd->ResponseLen = 0;
2068 		return;
2069 	}
2070 
2071 	/*
2072 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2073 	 * request data structure.
2074 	 */
2075 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2076 		scsi_req.lun = sp_req->TargetAddr.Lun;
2077 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2078 		scsi_req.cdbp = &sp_req->Cdb[0];
2079 		scsi_req.cdb_len = sp_req->CdbLength;
2080 		scsi_req.direction = sp_req->Direction;
2081 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2082 		scsi_req.u_sense = &usp_req->SenseData[0];
2083 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2084 
2085 		qlnt = QLNT_PORT;
2086 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2087 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2088 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2089 		tq = ql_find_port(ha, name, qlnt);
2090 	} else {
2091 		/*
2092 		 * Must be FC PASSTHRU, verified above.
2093 		 */
2094 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2095 			qlnt = QLNT_PORT;
2096 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2097 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2098 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2099 			    ha->instance, cmd->SubCode, name[0], name[1],
2100 			    name[2], name[3], name[4], name[5], name[6],
2101 			    name[7]);
2102 			tq = ql_find_port(ha, name, qlnt);
2103 		} else if (fc_req->FCScsiAddr.DestType ==
2104 		    EXT_DEF_DESTTYPE_WWNN) {
2105 			qlnt = QLNT_NODE;
2106 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2107 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2108 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2109 			    ha->instance, cmd->SubCode, name[0], name[1],
2110 			    name[2], name[3], name[4], name[5], name[6],
2111 			    name[7]);
2112 			tq = ql_find_port(ha, name, qlnt);
2113 		} else if (fc_req->FCScsiAddr.DestType ==
2114 		    EXT_DEF_DESTTYPE_PORTID) {
2115 			qlnt = QLNT_PID;
2116 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2117 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2118 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2119 			    name[0], name[1], name[2]);
2120 			tq = ql_find_port(ha, name, qlnt);
2121 		} else {
2122 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2123 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2124 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2125 			cmd->ResponseLen = 0;
2126 			return;
2127 		}
2128 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2129 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2130 		scsi_req.cdbp = &sp_req->Cdb[0];
2131 		scsi_req.cdb_len = sp_req->CdbLength;
2132 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2133 		scsi_req.u_sense = &ufc_req->SenseData[0];
2134 		scsi_req.direction = fc_req->Direction;
2135 	}
2136 
2137 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2138 		EL(ha, "failed, fc_port not found\n");
2139 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2140 		cmd->ResponseLen = 0;
2141 		return;
2142 	}
2143 
2144 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2145 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2146 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2147 		cmd->ResponseLen = 0;
2148 		return;
2149 	}
2150 
2151 	/* Allocate command block. */
2152 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2153 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2154 	    cmd->ResponseLen) {
2155 		pld_size = cmd->ResponseLen;
2156 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2157 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2158 		if (pkt == NULL) {
2159 			EL(ha, "failed, kmem_zalloc\n");
2160 			cmd->Status = EXT_STATUS_NO_MEMORY;
2161 			cmd->ResponseLen = 0;
2162 			return;
2163 		}
2164 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2165 
2166 		/* Get DMA memory for the IOCB */
2167 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2168 		    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2169 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2170 			    "alloc failed", QL_NAME, ha->instance);
2171 			kmem_free(pkt, pkt_size);
2172 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2173 			cmd->ResponseLen = 0;
2174 			return;
2175 		}
2176 
2177 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2178 			scsi_req.direction = (uint8_t)
2179 			    (CFG_IST(ha, CFG_CTRL_24258081) ?
2180 			    CF_RD : CF_DATA_IN | CF_STAG);
2181 		} else {
2182 			scsi_req.direction = (uint8_t)
2183 			    (CFG_IST(ha, CFG_CTRL_24258081) ?
2184 			    CF_WR : CF_DATA_OUT | CF_STAG);
2185 			cmd->ResponseLen = 0;
2186 
2187 			/* Get command payload. */
2188 			if (ql_get_buffer_data(
2189 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2190 			    pld, pld_size, mode) != pld_size) {
2191 				EL(ha, "failed, get_buffer_data\n");
2192 				cmd->Status = EXT_STATUS_COPY_ERR;
2193 
2194 				kmem_free(pkt, pkt_size);
2195 				ql_free_dma_resource(ha, dma_mem);
2196 				kmem_free(dma_mem, sizeof (dma_mem_t));
2197 				return;
2198 			}
2199 
2200 			/* Copy out going data to DMA buffer. */
2201 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2202 			    (uint8_t *)dma_mem->bp, pld_size,
2203 			    DDI_DEV_AUTOINCR);
2204 
2205 			/* Sync DMA buffer. */
2206 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2207 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2208 		}
2209 	} else {
2210 		scsi_req.direction = (uint8_t)
2211 		    (CFG_IST(ha, CFG_CTRL_24258081) ? 0 : CF_STAG);
2212 		cmd->ResponseLen = 0;
2213 
2214 		pkt_size = sizeof (ql_mbx_iocb_t);
2215 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2216 		if (pkt == NULL) {
2217 			EL(ha, "failed, kmem_zalloc-2\n");
2218 			cmd->Status = EXT_STATUS_NO_MEMORY;
2219 			return;
2220 		}
2221 		pld = NULL;
2222 		pld_size = 0;
2223 	}
2224 
2225 	/* retries = ha->port_down_retry_count; */
2226 	retries = 1;
2227 	cmd->Status = EXT_STATUS_OK;
2228 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2229 
2230 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2231 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2232 
2233 	do {
2234 		if (DRIVER_SUSPENDED(ha)) {
2235 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2236 			break;
2237 		}
2238 
2239 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2240 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2241 			pkt->cmd24.entry_count = 1;
2242 
2243 			/* Set LUN number */
2244 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2245 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2246 
2247 			/* Set N_port handle */
2248 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2249 
2250 			/* Set VP Index */
2251 			pkt->cmd24.vp_index = ha->vp_index;
2252 
2253 			/* Set target ID */
2254 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2255 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2256 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2257 
2258 			/* Set ISP command timeout. */
2259 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2260 
2261 			/* Load SCSI CDB */
2262 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2263 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2264 			    DDI_DEV_AUTOINCR);
2265 			for (cnt = 0; cnt < MAX_CMDSZ;
2266 			    cnt = (uint16_t)(cnt + 4)) {
2267 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2268 				    + cnt, 4);
2269 			}
2270 
2271 			/* Set tag queue control flags */
2272 			pkt->cmd24.task = TA_STAG;
2273 
2274 			if (pld_size) {
2275 				/* Set transfer direction. */
2276 				pkt->cmd24.control_flags = scsi_req.direction;
2277 
2278 				/* Set data segment count. */
2279 				pkt->cmd24.dseg_count = LE_16(1);
2280 
2281 				/* Load total byte count. */
2282 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2283 
2284 				/* Load data descriptor. */
2285 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2286 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2287 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2288 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2289 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2290 			}
2291 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2292 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2293 			pkt->cmd3.entry_count = 1;
2294 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2295 				pkt->cmd3.target_l = LSB(tq->loop_id);
2296 				pkt->cmd3.target_h = MSB(tq->loop_id);
2297 			} else {
2298 				pkt->cmd3.target_h = LSB(tq->loop_id);
2299 			}
2300 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2301 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2302 			pkt->cmd3.control_flags_l = scsi_req.direction;
2303 			pkt->cmd3.timeout = LE_16(15);
2304 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2305 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2306 			}
2307 			if (pld_size) {
2308 				pkt->cmd3.dseg_count = LE_16(1);
2309 				pkt->cmd3.byte_count = LE_32(pld_size);
2310 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2311 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2312 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2313 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2314 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2315 			}
2316 		} else {
2317 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2318 			pkt->cmd.entry_count = 1;
2319 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2320 				pkt->cmd.target_l = LSB(tq->loop_id);
2321 				pkt->cmd.target_h = MSB(tq->loop_id);
2322 			} else {
2323 				pkt->cmd.target_h = LSB(tq->loop_id);
2324 			}
2325 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2326 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2327 			pkt->cmd.control_flags_l = scsi_req.direction;
2328 			pkt->cmd.timeout = LE_16(15);
2329 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2330 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2331 			}
2332 			if (pld_size) {
2333 				pkt->cmd.dseg_count = LE_16(1);
2334 				pkt->cmd.byte_count = LE_32(pld_size);
2335 				pkt->cmd.dseg_0_address = (uint32_t)
2336 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2337 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2338 			}
2339 		}
2340 		/* Go issue command and wait for completion. */
2341 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2342 		QL_DUMP_9(pkt, 8, pkt_size);
2343 
2344 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2345 
2346 		if (pld_size) {
2347 			/* Sync in coming DMA buffer. */
2348 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2349 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2350 			/* Copy in coming DMA data. */
2351 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2352 			    (uint8_t *)dma_mem->bp, pld_size,
2353 			    DDI_DEV_AUTOINCR);
2354 		}
2355 
2356 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2357 			pkt->sts24.entry_status = (uint8_t)
2358 			    (pkt->sts24.entry_status & 0x3c);
2359 		} else {
2360 			pkt->sts.entry_status = (uint8_t)
2361 			    (pkt->sts.entry_status & 0x7e);
2362 		}
2363 
2364 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2365 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2366 			    pkt->sts.entry_status, tq->d_id.b24);
2367 			status = QL_FUNCTION_PARAMETER_ERROR;
2368 		}
2369 
2370 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
2371 		    LE_16(pkt->sts24.comp_status) :
2372 		    LE_16(pkt->sts.comp_status));
2373 
2374 		/*
2375 		 * We have verified about all the request that can be so far.
2376 		 * Now we need to start verification of our ability to
2377 		 * actually issue the CDB.
2378 		 */
2379 		if (DRIVER_SUSPENDED(ha)) {
2380 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2381 			break;
2382 		} else if (status == QL_SUCCESS &&
2383 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2384 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2385 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2386 			if (tq->flags & TQF_FABRIC_DEVICE) {
2387 				rval = ql_login_fport(ha, tq, tq->loop_id,
2388 				    LFF_NO_PLOGI, &mr);
2389 				if (rval != QL_SUCCESS) {
2390 					EL(ha, "failed, login_fport=%xh, "
2391 					    "d_id=%xh\n", rval, tq->d_id.b24);
2392 				}
2393 			} else {
2394 				rval = ql_login_lport(ha, tq, tq->loop_id,
2395 				    LLF_NONE);
2396 				if (rval != QL_SUCCESS) {
2397 					EL(ha, "failed, login_lport=%xh, "
2398 					    "d_id=%xh\n", rval, tq->d_id.b24);
2399 				}
2400 			}
2401 		} else {
2402 			break;
2403 		}
2404 
2405 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2406 
2407 	} while (retries--);
2408 
2409 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2410 		/* Cannot issue command now, maybe later */
2411 		EL(ha, "failed, suspended\n");
2412 		kmem_free(pkt, pkt_size);
2413 		ql_free_dma_resource(ha, dma_mem);
2414 		kmem_free(dma_mem, sizeof (dma_mem_t));
2415 		cmd->Status = EXT_STATUS_SUSPENDED;
2416 		cmd->ResponseLen = 0;
2417 		return;
2418 	}
2419 
2420 	if (status != QL_SUCCESS) {
2421 		/* Command error */
2422 		EL(ha, "failed, I/O\n");
2423 		kmem_free(pkt, pkt_size);
2424 		ql_free_dma_resource(ha, dma_mem);
2425 		kmem_free(dma_mem, sizeof (dma_mem_t));
2426 		cmd->Status = EXT_STATUS_ERR;
2427 		cmd->DetailStatus = status;
2428 		cmd->ResponseLen = 0;
2429 		return;
2430 	}
2431 
2432 	/* Setup status. */
2433 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
2434 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2435 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2436 
2437 		/* Setup residuals. */
2438 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2439 
2440 		/* Setup state flags. */
2441 		sts.state_flags_l = pkt->sts24.state_flags_l;
2442 		sts.state_flags_h = pkt->sts24.state_flags_h;
2443 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2444 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2445 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2446 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2447 		} else {
2448 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2449 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2450 			    SF_GOT_STATUS);
2451 		}
2452 		if (scsi_req.direction & CF_WR) {
2453 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2454 			    SF_DATA_OUT);
2455 		} else if (scsi_req.direction & CF_RD) {
2456 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2457 			    SF_DATA_IN);
2458 		}
2459 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2460 
2461 		/* Setup FCP response info. */
2462 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2463 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2464 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2465 		for (cnt = 0; cnt < sts.rsp_info_length;
2466 		    cnt = (uint16_t)(cnt + 4)) {
2467 			ql_chg_endian(sts.rsp_info + cnt, 4);
2468 		}
2469 
2470 		/* Setup sense data. */
2471 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2472 			sts.req_sense_length =
2473 			    LE_32(pkt->sts24.fcp_sense_length);
2474 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2475 			    SF_ARQ_DONE);
2476 		} else {
2477 			sts.req_sense_length = 0;
2478 		}
2479 		sts.req_sense_data =
2480 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2481 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2482 		    (uintptr_t)sts.req_sense_data);
2483 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2484 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2485 		}
2486 	} else {
2487 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2488 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2489 
2490 		/* Setup residuals. */
2491 		sts.residual_length = LE_32(pkt->sts.residual_length);
2492 
2493 		/* Setup state flags. */
2494 		sts.state_flags_l = pkt->sts.state_flags_l;
2495 		sts.state_flags_h = pkt->sts.state_flags_h;
2496 
2497 		/* Setup FCP response info. */
2498 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2499 		    LE_16(pkt->sts.rsp_info_length) : 0;
2500 		sts.rsp_info = &pkt->sts.rsp_info[0];
2501 
2502 		/* Setup sense data. */
2503 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2504 		    LE_16(pkt->sts.req_sense_length) : 0;
2505 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2506 	}
2507 
2508 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2509 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2510 
2511 	switch (sts.comp_status) {
2512 	case CS_INCOMPLETE:
2513 	case CS_ABORTED:
2514 	case CS_DEVICE_UNAVAILABLE:
2515 	case CS_PORT_UNAVAILABLE:
2516 	case CS_PORT_LOGGED_OUT:
2517 	case CS_PORT_CONFIG_CHG:
2518 	case CS_PORT_BUSY:
2519 	case CS_LOOP_DOWN_ABORT:
2520 		cmd->Status = EXT_STATUS_BUSY;
2521 		break;
2522 	case CS_RESET:
2523 	case CS_QUEUE_FULL:
2524 		cmd->Status = EXT_STATUS_ERR;
2525 		break;
2526 	case CS_TIMEOUT:
2527 		cmd->Status = EXT_STATUS_ERR;
2528 		break;
2529 	case CS_DATA_OVERRUN:
2530 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2531 		break;
2532 	case CS_DATA_UNDERRUN:
2533 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2534 		break;
2535 	}
2536 
2537 	/*
2538 	 * If non data transfer commands fix tranfer counts.
2539 	 */
2540 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2541 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2542 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2543 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2544 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2545 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2546 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2547 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2548 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2549 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2550 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2551 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2552 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2553 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2554 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2555 		/*
2556 		 * Non data transfer command, clear sts_entry residual
2557 		 * length.
2558 		 */
2559 		sts.residual_length = 0;
2560 		cmd->ResponseLen = 0;
2561 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2562 			sts.comp_status = CS_COMPLETE;
2563 			cmd->Status = EXT_STATUS_OK;
2564 		}
2565 	} else {
2566 		cmd->ResponseLen = pld_size;
2567 	}
2568 
2569 	/* Correct ISP completion status */
2570 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2571 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2572 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2573 		    ha->instance);
2574 		scsi_req.resid = 0;
2575 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2576 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2577 		    ha->instance);
2578 		scsi_req.resid = sts.residual_length;
2579 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2580 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2581 
2582 			cmd->ResponseLen = (uint32_t)
2583 			    (pld_size - scsi_req.resid);
2584 		} else {
2585 			EL(ha, "failed, Transfer ERROR\n");
2586 			cmd->Status = EXT_STATUS_ERR;
2587 			cmd->ResponseLen = 0;
2588 		}
2589 	} else {
2590 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2591 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2592 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2593 		    sts.scsi_status_l);
2594 
2595 		scsi_req.resid = pld_size;
2596 		/*
2597 		 * Handle residual count on SCSI check
2598 		 * condition.
2599 		 *
2600 		 * - If Residual Under / Over is set, use the
2601 		 *   Residual Transfer Length field in IOCB.
2602 		 * - If Residual Under / Over is not set, and
2603 		 *   Transferred Data bit is set in State Flags
2604 		 *   field of IOCB, report residual value of 0
2605 		 *   (you may want to do this for tape
2606 		 *   Write-type commands only). This takes care
2607 		 *   of logical end of tape problem and does
2608 		 *   not break Unit Attention.
2609 		 * - If Residual Under / Over is not set, and
2610 		 *   Transferred Data bit is not set in State
2611 		 *   Flags, report residual value equal to
2612 		 *   original data transfer length.
2613 		 */
2614 		if (sts.scsi_status_l & STATUS_CHECK) {
2615 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2616 			cmd->DetailStatus = sts.scsi_status_l;
2617 			if (sts.scsi_status_h &
2618 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2619 				scsi_req.resid = sts.residual_length;
2620 			} else if (sts.state_flags_h &
2621 			    STATE_XFERRED_DATA) {
2622 				scsi_req.resid = 0;
2623 			}
2624 		}
2625 	}
2626 
2627 	if (sts.scsi_status_l & STATUS_CHECK &&
2628 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2629 	    sts.req_sense_length) {
2630 		/*
2631 		 * Check condition with vaild sense data flag set and sense
2632 		 * length != 0
2633 		 */
2634 		if (sts.req_sense_length > scsi_req.sense_length) {
2635 			sense_sz = scsi_req.sense_length;
2636 		} else {
2637 			sense_sz = sts.req_sense_length;
2638 		}
2639 
2640 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2641 		    tq->d_id.b24);
2642 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2643 
2644 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2645 		    (size_t)sense_sz, mode) != 0) {
2646 			EL(ha, "failed, request sense ddi_copyout\n");
2647 		}
2648 
2649 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2650 		cmd->DetailStatus = sts.scsi_status_l;
2651 	}
2652 
2653 	/* Copy response payload from DMA buffer to application. */
2654 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2655 	    cmd->ResponseLen != 0) {
2656 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2657 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2658 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2659 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2660 
2661 		/* Send response payload. */
2662 		if (ql_send_buffer_data(pld,
2663 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2664 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2665 			EL(ha, "failed, send_buffer_data\n");
2666 			cmd->Status = EXT_STATUS_COPY_ERR;
2667 			cmd->ResponseLen = 0;
2668 		}
2669 	}
2670 
2671 	if (cmd->Status != EXT_STATUS_OK) {
2672 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2673 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2674 	} else {
2675 		/*EMPTY*/
2676 		QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2677 		    ha->instance, cmd->ResponseLen);
2678 	}
2679 
2680 	kmem_free(pkt, pkt_size);
2681 	ql_free_dma_resource(ha, dma_mem);
2682 	kmem_free(dma_mem, sizeof (dma_mem_t));
2683 }
2684 
2685 /*
2686  * ql_wwpn_to_scsiaddr
2687  *
2688  * Input:
2689  *	ha:	adapter state pointer.
2690  *	cmd:	EXT_IOCTL cmd struct pointer.
2691  *	mode:	flags.
2692  *
2693  * Context:
2694  *	Kernel context.
2695  */
2696 static void
2697 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2698 {
2699 	int		status;
2700 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2701 	EXT_SCSI_ADDR	*tmp_addr;
2702 	ql_tgt_t	*tq;
2703 
2704 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2705 
2706 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2707 		/* Return error */
2708 		EL(ha, "incorrect RequestLen\n");
2709 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2710 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2711 		return;
2712 	}
2713 
2714 	status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2715 	    cmd->RequestLen, mode);
2716 
2717 	if (status != 0) {
2718 		cmd->Status = EXT_STATUS_COPY_ERR;
2719 		EL(ha, "failed, ddi_copyin\n");
2720 		return;
2721 	}
2722 
2723 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2724 
2725 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2726 		/* no matching device */
2727 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2728 		EL(ha, "failed, device not found\n");
2729 		return;
2730 	}
2731 
2732 	/* Copy out the IDs found.  For now we can only return target ID. */
2733 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2734 
2735 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2736 
2737 	if (status != 0) {
2738 		cmd->Status = EXT_STATUS_COPY_ERR;
2739 		EL(ha, "failed, ddi_copyout\n");
2740 	} else {
2741 		cmd->Status = EXT_STATUS_OK;
2742 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2743 	}
2744 }
2745 
2746 /*
2747  * ql_host_idx
2748  *	Gets host order index.
2749  *
2750  * Input:
2751  *	ha:	adapter state pointer.
2752  *	cmd:	EXT_IOCTL cmd struct pointer.
2753  *	mode:	flags.
2754  *
2755  * Returns:
2756  *	None, request status indicated in cmd->Status.
2757  *
2758  * Context:
2759  *	Kernel context.
2760  */
2761 static void
2762 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2763 {
2764 	uint16_t	idx;
2765 
2766 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2767 
2768 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2769 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2770 		cmd->DetailStatus = sizeof (uint16_t);
2771 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2772 		cmd->ResponseLen = 0;
2773 		return;
2774 	}
2775 
2776 	idx = (uint16_t)ha->instance;
2777 
2778 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2779 	    sizeof (uint16_t), mode) != 0) {
2780 		cmd->Status = EXT_STATUS_COPY_ERR;
2781 		cmd->ResponseLen = 0;
2782 		EL(ha, "failed, ddi_copyout\n");
2783 	} else {
2784 		cmd->ResponseLen = sizeof (uint16_t);
2785 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2786 	}
2787 }
2788 
2789 /*
2790  * ql_host_drvname
2791  *	Gets host driver name
2792  *
2793  * Input:
2794  *	ha:	adapter state pointer.
2795  *	cmd:	EXT_IOCTL cmd struct pointer.
2796  *	mode:	flags.
2797  *
2798  * Returns:
2799  *	None, request status indicated in cmd->Status.
2800  *
2801  * Context:
2802  *	Kernel context.
2803  */
2804 static void
2805 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2806 {
2807 
2808 	char		drvname[] = QL_NAME;
2809 	uint32_t	qlnamelen;
2810 
2811 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2812 
2813 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2814 
2815 	if (cmd->ResponseLen < qlnamelen) {
2816 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2817 		cmd->DetailStatus = qlnamelen;
2818 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2819 		    cmd->ResponseLen, qlnamelen);
2820 		cmd->ResponseLen = 0;
2821 		return;
2822 	}
2823 
2824 	if (ddi_copyout((void *)&drvname,
2825 	    (void *)(uintptr_t)(cmd->ResponseAdr),
2826 	    qlnamelen, mode) != 0) {
2827 		cmd->Status = EXT_STATUS_COPY_ERR;
2828 		cmd->ResponseLen = 0;
2829 		EL(ha, "failed, ddi_copyout\n");
2830 	} else {
2831 		cmd->ResponseLen = qlnamelen-1;
2832 	}
2833 
2834 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2835 }
2836 
2837 /*
2838  * ql_read_nvram
2839  *	Get NVRAM contents.
2840  *
2841  * Input:
2842  *	ha:	adapter state pointer.
2843  *	cmd:	EXT_IOCTL cmd struct pointer.
2844  *	mode:	flags.
2845  *
2846  * Returns:
2847  *	None, request status indicated in cmd->Status.
2848  *
2849  * Context:
2850  *	Kernel context.
2851  */
2852 static void
2853 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2854 {
2855 
2856 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2857 
2858 	if (cmd->ResponseLen < ha->nvram_cache->size) {
2859 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2860 		cmd->DetailStatus = ha->nvram_cache->size;
2861 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2862 		    cmd->ResponseLen);
2863 		cmd->ResponseLen = 0;
2864 		return;
2865 	}
2866 
2867 	/* Get NVRAM data. */
2868 	if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2869 	    mode) != 0) {
2870 		cmd->Status = EXT_STATUS_COPY_ERR;
2871 		cmd->ResponseLen = 0;
2872 		EL(ha, "failed, copy error\n");
2873 	} else {
2874 		cmd->ResponseLen = ha->nvram_cache->size;
2875 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2876 	}
2877 }
2878 
2879 /*
2880  * ql_write_nvram
2881  *	Loads NVRAM contents.
2882  *
2883  * Input:
2884  *	ha:	adapter state pointer.
2885  *	cmd:	EXT_IOCTL cmd struct pointer.
2886  *	mode:	flags.
2887  *
2888  * Returns:
2889  *	None, request status indicated in cmd->Status.
2890  *
2891  * Context:
2892  *	Kernel context.
2893  */
2894 static void
2895 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2896 {
2897 
2898 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2899 
2900 	if (cmd->RequestLen < ha->nvram_cache->size) {
2901 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2902 		cmd->DetailStatus = ha->nvram_cache->size;
2903 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2904 		    cmd->RequestLen);
2905 		return;
2906 	}
2907 
2908 	/* Load NVRAM data. */
2909 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2910 	    mode) != 0) {
2911 		cmd->Status = EXT_STATUS_COPY_ERR;
2912 		EL(ha, "failed, copy error\n");
2913 	} else {
2914 		/*EMPTY*/
2915 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2916 	}
2917 }
2918 
2919 /*
2920  * ql_write_vpd
2921  *	Loads VPD contents.
2922  *
2923  * Input:
2924  *	ha:	adapter state pointer.
2925  *	cmd:	EXT_IOCTL cmd struct pointer.
2926  *	mode:	flags.
2927  *
2928  * Returns:
2929  *	None, request status indicated in cmd->Status.
2930  *
2931  * Context:
2932  *	Kernel context.
2933  */
2934 static void
2935 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2936 {
2937 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2938 
2939 	int32_t		rval = 0;
2940 
2941 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2942 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2943 		EL(ha, "failed, invalid request for HBA\n");
2944 		return;
2945 	}
2946 
2947 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2948 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2949 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2950 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2951 		    cmd->RequestLen);
2952 		return;
2953 	}
2954 
2955 	/* Load VPD data. */
2956 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2957 	    mode)) != 0) {
2958 		cmd->Status = EXT_STATUS_COPY_ERR;
2959 		cmd->DetailStatus = rval;
2960 		EL(ha, "failed, errno=%x\n", rval);
2961 	} else {
2962 		/*EMPTY*/
2963 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2964 	}
2965 }
2966 
2967 /*
2968  * ql_read_vpd
2969  *	Dumps VPD contents.
2970  *
2971  * Input:
2972  *	ha:	adapter state pointer.
2973  *	cmd:	EXT_IOCTL cmd struct pointer.
2974  *	mode:	flags.
2975  *
2976  * Returns:
2977  *	None, request status indicated in cmd->Status.
2978  *
2979  * Context:
2980  *	Kernel context.
2981  */
2982 static void
2983 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2984 {
2985 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2986 
2987 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2988 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2989 		EL(ha, "failed, invalid request for HBA\n");
2990 		return;
2991 	}
2992 
2993 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2994 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2995 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2996 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2997 		    cmd->ResponseLen);
2998 		return;
2999 	}
3000 
3001 	/* Dump VPD data. */
3002 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
3003 	    mode)) != 0) {
3004 		cmd->Status = EXT_STATUS_COPY_ERR;
3005 		EL(ha, "failed,\n");
3006 	} else {
3007 		/*EMPTY*/
3008 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3009 	}
3010 }
3011 
3012 /*
3013  * ql_get_fcache
3014  *	Dumps flash cache contents.
3015  *
3016  * Input:
3017  *	ha:	adapter state pointer.
3018  *	cmd:	EXT_IOCTL cmd struct pointer.
3019  *	mode:	flags.
3020  *
3021  * Returns:
3022  *	None, request status indicated in cmd->Status.
3023  *
3024  * Context:
3025  *	Kernel context.
3026  */
3027 static void
3028 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3029 {
3030 	uint32_t	bsize, boff, types, cpsize, hsize;
3031 	ql_fcache_t	*fptr;
3032 
3033 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3034 
3035 	CACHE_LOCK(ha);
3036 
3037 	if (ha->fcache == NULL) {
3038 		CACHE_UNLOCK(ha);
3039 		cmd->Status = EXT_STATUS_ERR;
3040 		EL(ha, "failed, adapter fcache not setup\n");
3041 		return;
3042 	}
3043 
3044 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
3045 		bsize = 100;
3046 	} else {
3047 		bsize = 400;
3048 	}
3049 
3050 	if (cmd->ResponseLen < bsize) {
3051 		CACHE_UNLOCK(ha);
3052 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3053 		cmd->DetailStatus = bsize;
3054 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3055 		    bsize, cmd->ResponseLen);
3056 		return;
3057 	}
3058 
3059 	boff = 0;
3060 	bsize = 0;
3061 	fptr = ha->fcache;
3062 
3063 	/*
3064 	 * For backwards compatibility, get one of each image type
3065 	 */
3066 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3067 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3068 		/* Get the next image */
3069 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3070 
3071 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3072 
3073 			if (ddi_copyout(fptr->buf,
3074 			    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3075 			    cpsize, mode) != 0) {
3076 				CACHE_UNLOCK(ha);
3077 				EL(ha, "ddicopy failed, done\n");
3078 				cmd->Status = EXT_STATUS_COPY_ERR;
3079 				cmd->DetailStatus = 0;
3080 				return;
3081 			}
3082 			boff += 100;
3083 			bsize += cpsize;
3084 			types &= ~(fptr->type);
3085 		}
3086 	}
3087 
3088 	/*
3089 	 * Get the firmware image -- it needs to be last in the
3090 	 * buffer at offset 300 for backwards compatibility. Also for
3091 	 * backwards compatibility, the pci header is stripped off.
3092 	 */
3093 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3094 
3095 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3096 		if (hsize > fptr->buflen) {
3097 			CACHE_UNLOCK(ha);
3098 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3099 			    hsize, fptr->buflen);
3100 			cmd->Status = EXT_STATUS_COPY_ERR;
3101 			cmd->DetailStatus = 0;
3102 			return;
3103 		}
3104 
3105 		cpsize = ((fptr->buflen - hsize) < 100 ?
3106 		    fptr->buflen - hsize : 100);
3107 
3108 		if (ddi_copyout(fptr->buf+hsize,
3109 		    (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3110 		    cpsize, mode) != 0) {
3111 			CACHE_UNLOCK(ha);
3112 			EL(ha, "fw ddicopy failed, done\n");
3113 			cmd->Status = EXT_STATUS_COPY_ERR;
3114 			cmd->DetailStatus = 0;
3115 			return;
3116 		}
3117 		bsize += 100;
3118 	}
3119 
3120 	CACHE_UNLOCK(ha);
3121 	cmd->Status = EXT_STATUS_OK;
3122 	cmd->DetailStatus = bsize;
3123 
3124 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3125 }
3126 
3127 /*
3128  * ql_get_fcache_ex
3129  *	Dumps flash cache contents.
3130  *
3131  * Input:
3132  *	ha:	adapter state pointer.
3133  *	cmd:	EXT_IOCTL cmd struct pointer.
3134  *	mode:	flags.
3135  *
3136  * Returns:
3137  *	None, request status indicated in cmd->Status.
3138  *
3139  * Context:
3140  *	Kernel context.
3141  */
3142 static void
3143 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3144 {
3145 	uint32_t	bsize = 0;
3146 	uint32_t	boff = 0;
3147 	ql_fcache_t	*fptr;
3148 
3149 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3150 
3151 	CACHE_LOCK(ha);
3152 	if (ha->fcache == NULL) {
3153 		CACHE_UNLOCK(ha);
3154 		cmd->Status = EXT_STATUS_ERR;
3155 		EL(ha, "failed, adapter fcache not setup\n");
3156 		return;
3157 	}
3158 
3159 	/* Make sure user passed enough buffer space */
3160 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3161 		bsize += FBUFSIZE;
3162 	}
3163 
3164 	if (cmd->ResponseLen < bsize) {
3165 		CACHE_UNLOCK(ha);
3166 		if (cmd->ResponseLen != 0) {
3167 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3168 			    bsize, cmd->ResponseLen);
3169 		}
3170 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3171 		cmd->DetailStatus = bsize;
3172 		return;
3173 	}
3174 
3175 	boff = 0;
3176 	fptr = ha->fcache;
3177 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3178 		/* Get the next image */
3179 		if (ddi_copyout(fptr->buf,
3180 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3181 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3182 		    mode) != 0) {
3183 			CACHE_UNLOCK(ha);
3184 			EL(ha, "failed, ddicopy at %xh, done\n", boff);
3185 			cmd->Status = EXT_STATUS_COPY_ERR;
3186 			cmd->DetailStatus = 0;
3187 			return;
3188 		}
3189 		boff += FBUFSIZE;
3190 		fptr = fptr->next;
3191 	}
3192 
3193 	CACHE_UNLOCK(ha);
3194 	cmd->Status = EXT_STATUS_OK;
3195 	cmd->DetailStatus = bsize;
3196 
3197 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3198 }
3199 
3200 /*
3201  * ql_read_flash
3202  *	Get flash contents.
3203  *
3204  * Input:
3205  *	ha:	adapter state pointer.
3206  *	cmd:	EXT_IOCTL cmd struct pointer.
3207  *	mode:	flags.
3208  *
3209  * Returns:
3210  *	None, request status indicated in cmd->Status.
3211  *
3212  * Context:
3213  *	Kernel context.
3214  */
3215 static void
3216 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3217 {
3218 	ql_xioctl_t	*xp = ha->xioctl;
3219 
3220 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3221 
3222 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3223 		EL(ha, "ql_stall_driver failed\n");
3224 		cmd->Status = EXT_STATUS_BUSY;
3225 		cmd->DetailStatus = xp->fdesc.flash_size;
3226 		cmd->ResponseLen = 0;
3227 		return;
3228 	}
3229 
3230 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3231 		cmd->Status = EXT_STATUS_ERR;
3232 		cmd->DetailStatus = xp->fdesc.flash_size;
3233 		EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3234 		    cmd->ResponseLen, xp->fdesc.flash_size);
3235 		cmd->ResponseLen = 0;
3236 	} else {
3237 		/* adjust read size to flash size */
3238 		if (cmd->ResponseLen > xp->fdesc.flash_size) {
3239 			EL(ha, "adjusting req=%xh, max=%xh\n",
3240 			    cmd->ResponseLen, xp->fdesc.flash_size);
3241 			cmd->ResponseLen = xp->fdesc.flash_size;
3242 		}
3243 
3244 		/* Get flash data. */
3245 		if (ql_flash_fcode_dump(ha,
3246 		    (void *)(uintptr_t)(cmd->ResponseAdr),
3247 		    (size_t)(cmd->ResponseLen), 0, mode) != 0) {
3248 			cmd->Status = EXT_STATUS_COPY_ERR;
3249 			cmd->ResponseLen = 0;
3250 			EL(ha, "failed,\n");
3251 		}
3252 	}
3253 
3254 	/* Resume I/O */
3255 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
3256 		ql_restart_driver(ha);
3257 	} else {
3258 		EL(ha, "isp_abort_needed for restart\n");
3259 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3260 		    DRIVER_STALL);
3261 	}
3262 
3263 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3264 }
3265 
3266 /*
3267  * ql_write_flash
3268  *	Loads flash contents.
3269  *
3270  * Input:
3271  *	ha:	adapter state pointer.
3272  *	cmd:	EXT_IOCTL cmd struct pointer.
3273  *	mode:	flags.
3274  *
3275  * Returns:
3276  *	None, request status indicated in cmd->Status.
3277  *
3278  * Context:
3279  *	Kernel context.
3280  */
3281 static void
3282 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3283 {
3284 	ql_xioctl_t	*xp = ha->xioctl;
3285 
3286 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3287 
3288 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3289 		EL(ha, "ql_stall_driver failed\n");
3290 		cmd->Status = EXT_STATUS_BUSY;
3291 		cmd->DetailStatus = xp->fdesc.flash_size;
3292 		cmd->ResponseLen = 0;
3293 		return;
3294 	}
3295 
3296 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3297 		cmd->Status = EXT_STATUS_ERR;
3298 		cmd->DetailStatus = xp->fdesc.flash_size;
3299 		EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3300 		    cmd->RequestLen, xp->fdesc.flash_size);
3301 		cmd->ResponseLen = 0;
3302 	} else {
3303 		/* Load flash data. */
3304 		if (cmd->RequestLen > xp->fdesc.flash_size) {
3305 			cmd->Status = EXT_STATUS_ERR;
3306 			cmd->DetailStatus =  xp->fdesc.flash_size;
3307 			EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3308 			    cmd->RequestLen, xp->fdesc.flash_size);
3309 		} else if (ql_flash_fcode_load(ha,
3310 		    (void *)(uintptr_t)(cmd->RequestAdr),
3311 		    (size_t)(cmd->RequestLen), mode) != 0) {
3312 			cmd->Status = EXT_STATUS_COPY_ERR;
3313 			EL(ha, "failed,\n");
3314 		}
3315 	}
3316 
3317 	/* Resume I/O */
3318 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
3319 		ql_restart_driver(ha);
3320 	} else {
3321 		EL(ha, "isp_abort_needed for restart\n");
3322 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3323 		    DRIVER_STALL);
3324 	}
3325 
3326 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3327 }
3328 
3329 /*
3330  * ql_diagnostic_loopback
3331  *	Performs EXT_CC_LOOPBACK Command
3332  *
3333  * Input:
3334  *	ha:	adapter state pointer.
3335  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3336  *	mode:	flags.
3337  *
3338  * Returns:
3339  *	None, request status indicated in cmd->Status.
3340  *
3341  * Context:
3342  *	Kernel context.
3343  */
3344 static void
3345 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3346 {
3347 	EXT_LOOPBACK_REQ	plbreq;
3348 	EXT_LOOPBACK_RSP	plbrsp;
3349 	ql_mbx_data_t		mr;
3350 	uint32_t		rval, buffer_size, tc;
3351 	caddr_t			bp, data, pay_load;
3352 
3353 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3354 
3355 	/* Get loop back request. */
3356 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
3357 	    (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3358 		EL(ha, "failed, ddi_copyin\n");
3359 		cmd->Status = EXT_STATUS_COPY_ERR;
3360 		cmd->ResponseLen = 0;
3361 		return;
3362 	}
3363 
3364 	/* Check transfer length fits in buffer. */
3365 	if (plbreq.BufferLength < plbreq.TransferCount &&
3366 	    plbreq.TransferCount < MAILBOX_BUFFER_SIZE) {
3367 		EL(ha, "failed, BufferLength=%d, xfercnt=%d, "
3368 		    "mailbox_buffer_size=%d\n", plbreq.BufferLength,
3369 		    plbreq.TransferCount, MAILBOX_BUFFER_SIZE);
3370 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3371 		cmd->ResponseLen = 0;
3372 		return;
3373 	}
3374 
3375 	/*
3376 	 * If this port type is F_PORT then we send an ECHO command rather
3377 	 * than loopback. This requires an els echo cmd header in the first
3378 	 * four bytes of the payload. In either case, ECHO or LOOPBACK, the
3379 	 * operation is limited to the maximum frame size.
3380 	 */
3381 	buffer_size = (uint32_t)QL_MAX_FRAME_SIZE(ha);
3382 
3383 	/* Allocate command/payload memory. */
3384 	bp = kmem_zalloc(buffer_size, KM_SLEEP);
3385 	if (bp == NULL) {
3386 		EL(ha, "failed, kmem_zalloc\n");
3387 		cmd->Status = EXT_STATUS_NO_MEMORY;
3388 		cmd->ResponseLen = 0;
3389 		return;
3390 	}
3391 
3392 	/* Put data in buffer leaving room for ELS cmd hdr */
3393 	data = bp;
3394 	data += 4;
3395 
3396 	/* Get loopback data. */
3397 	if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3398 	    data, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3399 		EL(ha, "failed, ddi_copyin-2\n");
3400 		kmem_free(bp, buffer_size);
3401 		cmd->Status = EXT_STATUS_COPY_ERR;
3402 		cmd->ResponseLen = 0;
3403 		return;
3404 	}
3405 
3406 	if ((ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) ||
3407 	    ql_stall_driver(ha, 0) != QL_SUCCESS) {
3408 		EL(ha, "failed, LOOP_NOT_READY\n");
3409 		kmem_free(bp, buffer_size);
3410 		cmd->Status = EXT_STATUS_BUSY;
3411 		cmd->ResponseLen = 0;
3412 		return;
3413 	}
3414 
3415 	/* Shutdown IP. */
3416 	if (ha->flags & IP_INITIALIZED) {
3417 		(void) ql_shutdown_ip(ha);
3418 	}
3419 
3420 	/* determine topology so we can send the loopback or the echo */
3421 	/* Echo is supported on 2300's only and above */
3422 
3423 	if (!(ha->task_daemon_flags & LOOP_DOWN) &&
3424 	    (ha->topology & QL_F_PORT) &&
3425 	    ha->device_id >= 0x2300) {
3426 		QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using echo\n",
3427 		    ha->instance);
3428 
3429 		if (CFG_IST(ha, CFG_CTRL_8081)) {
3430 			uint32_t echo_cmd;
3431 
3432 			if (plbreq.TransferCount > 252) {
3433 				EL(ha, "failed, echo xfercnt=%d\n",
3434 				    plbreq.TransferCount);
3435 				cmd->Status = EXT_STATUS_INVALID_PARAM;
3436 				cmd->ResponseLen = 0;
3437 				kmem_free(bp, buffer_size);
3438 				return;
3439 			}
3440 
3441 			/* Setup echo cmd & adjust for platform. */
3442 			/* peek at the data looking for ELS echo cmd. */
3443 			echo_cmd = *(uint32_t *)data;
3444 			BIG_ENDIAN_32(&echo_cmd);
3445 
3446 			if (echo_cmd != QL_ECHO_CMD) {
3447 				echo_cmd = QL_ECHO_CMD;
3448 				BIG_ENDIAN_32(&echo_cmd);
3449 				*(uint32_t *)bp = echo_cmd;
3450 				pay_load = bp;
3451 				tc = plbreq.TransferCount + 4;
3452 			} else {
3453 				pay_load = data;
3454 				tc = plbreq.TransferCount;
3455 			}
3456 		}
3457 
3458 		plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3459 		/* ELS ECHO cmd plus the data. */
3460 		rval = ql_diag_echo(ha, 0, pay_load, tc,
3461 		    (uint16_t)(CFG_IST(ha, CFG_CTRL_8081) ? BIT_15 : BIT_6),
3462 		    &mr);
3463 	} else {
3464 		plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3465 		/* just the data */
3466 		rval = ql_diag_loopback(ha, 0, data, plbreq.TransferCount,
3467 		    plbreq.Options, plbreq.IterationCount, &mr);
3468 	}
3469 
3470 	ql_restart_driver(ha);
3471 
3472 	/* Restart IP if it was shutdown. */
3473 	if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3474 		(void) ql_initialize_ip(ha);
3475 		ql_isp_rcvbuf(ha);
3476 	}
3477 
3478 	if (rval != QL_SUCCESS) {
3479 		EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3480 		kmem_free(bp, buffer_size);
3481 		cmd->Status = EXT_STATUS_MAILBOX;
3482 		cmd->DetailStatus = rval;
3483 		cmd->ResponseLen = 0;
3484 		return;
3485 	}
3486 
3487 	/* Return loopback data. */
3488 	if (ql_send_buffer_data(data, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3489 	    plbreq.TransferCount, mode) != plbreq.TransferCount) {
3490 		EL(ha, "failed, ddi_copyout\n");
3491 		kmem_free(bp, buffer_size);
3492 		cmd->Status = EXT_STATUS_COPY_ERR;
3493 		cmd->ResponseLen = 0;
3494 		return;
3495 	}
3496 	kmem_free(bp, buffer_size);
3497 
3498 	/* Return loopback results. */
3499 	plbrsp.BufferAddress = plbreq.BufferAddress;
3500 	plbrsp.BufferLength = plbreq.TransferCount;
3501 	plbrsp.CompletionStatus = mr.mb[0];
3502 
3503 	if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3504 		plbrsp.CrcErrorCount = 0;
3505 		plbrsp.DisparityErrorCount = 0;
3506 		plbrsp.FrameLengthErrorCount = 0;
3507 		plbrsp.IterationCountLastError = 0;
3508 	} else {
3509 		plbrsp.CrcErrorCount = mr.mb[1];
3510 		plbrsp.DisparityErrorCount = mr.mb[2];
3511 		plbrsp.FrameLengthErrorCount = mr.mb[3];
3512 		plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18];
3513 	}
3514 
3515 	rval = ddi_copyout((void *)&plbrsp,
3516 	    (void *)(uintptr_t)cmd->ResponseAdr,
3517 	    sizeof (EXT_LOOPBACK_RSP), mode);
3518 	if (rval != 0) {
3519 		EL(ha, "failed, ddi_copyout-2\n");
3520 		cmd->Status = EXT_STATUS_COPY_ERR;
3521 		cmd->ResponseLen = 0;
3522 		return;
3523 	}
3524 	cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3525 
3526 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3527 }
3528 
3529 /*
3530  * ql_send_els_rnid
3531  *	IOCTL for extended link service RNID command.
3532  *
3533  * Input:
3534  *	ha:	adapter state pointer.
3535  *	cmd:	User space CT arguments pointer.
3536  *	mode:	flags.
3537  *
3538  * Returns:
3539  *	None, request status indicated in cmd->Status.
3540  *
3541  * Context:
3542  *	Kernel context.
3543  */
3544 static void
3545 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3546 {
3547 	EXT_RNID_REQ	tmp_rnid;
3548 	port_id_t	tmp_fcid;
3549 	caddr_t		tmp_buf, bptr;
3550 	uint32_t	copy_len;
3551 	ql_tgt_t	*tq;
3552 	EXT_RNID_DATA	rnid_data;
3553 	uint32_t	loop_ready_wait = 10 * 60 * 10;
3554 	int		rval = 0;
3555 	uint32_t	local_hba = 0;
3556 
3557 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3558 
3559 	if (DRIVER_SUSPENDED(ha)) {
3560 		EL(ha, "failed, LOOP_NOT_READY\n");
3561 		cmd->Status = EXT_STATUS_BUSY;
3562 		cmd->ResponseLen = 0;
3563 		return;
3564 	}
3565 
3566 	if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3567 		/* parameter error */
3568 		EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3569 		    cmd->RequestLen);
3570 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3571 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3572 		cmd->ResponseLen = 0;
3573 		return;
3574 	}
3575 
3576 	if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
3577 	    &tmp_rnid, cmd->RequestLen, mode) != 0) {
3578 		EL(ha, "failed, ddi_copyin\n");
3579 		cmd->Status = EXT_STATUS_COPY_ERR;
3580 		cmd->ResponseLen = 0;
3581 		return;
3582 	}
3583 
3584 	/* Find loop ID of the device */
3585 	if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3586 		bptr = CFG_IST(ha, CFG_CTRL_24258081) ?
3587 		    (caddr_t)&ha->init_ctrl_blk.cb24.node_name :
3588 		    (caddr_t)&ha->init_ctrl_blk.cb.node_name;
3589 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3590 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3591 			local_hba = 1;
3592 		} else {
3593 			tq = ql_find_port(ha,
3594 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3595 		}
3596 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3597 		bptr = CFG_IST(ha, CFG_CTRL_24258081) ?
3598 		    (caddr_t)&ha->init_ctrl_blk.cb24.port_name :
3599 		    (caddr_t)&ha->init_ctrl_blk.cb.port_name;
3600 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3601 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3602 			local_hba = 1;
3603 		} else {
3604 			tq = ql_find_port(ha,
3605 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3606 		}
3607 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3608 		/*
3609 		 * Copy caller's d_id to tmp space.
3610 		 */
3611 		bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3612 		    EXT_DEF_PORTID_SIZE_ACTUAL);
3613 		BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3614 
3615 		if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
3616 		    EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3617 			local_hba = 1;
3618 		} else {
3619 			tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3620 			    QLNT_PID);
3621 		}
3622 	}
3623 
3624 	/* Allocate memory for command. */
3625 	tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP);
3626 	if (tmp_buf == NULL) {
3627 		EL(ha, "failed, kmem_zalloc\n");
3628 		cmd->Status = EXT_STATUS_NO_MEMORY;
3629 		cmd->ResponseLen = 0;
3630 		return;
3631 	}
3632 
3633 	if (local_hba) {
3634 		rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3635 		if (rval != QL_SUCCESS) {
3636 			EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3637 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3638 			cmd->Status = EXT_STATUS_ERR;
3639 			cmd->ResponseLen = 0;
3640 			return;
3641 		}
3642 
3643 		/* Save gotten RNID data. */
3644 		bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3645 
3646 		/* Now build the Send RNID response */
3647 		tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3648 		tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3649 		tmp_buf[2] = 0;
3650 		tmp_buf[3] = sizeof (EXT_RNID_DATA);
3651 
3652 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
3653 			bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4],
3654 			    EXT_DEF_WWN_NAME_SIZE);
3655 			bcopy(ha->init_ctrl_blk.cb24.node_name,
3656 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3657 			    EXT_DEF_WWN_NAME_SIZE);
3658 		} else {
3659 			bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4],
3660 			    EXT_DEF_WWN_NAME_SIZE);
3661 			bcopy(ha->init_ctrl_blk.cb.node_name,
3662 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3663 			    EXT_DEF_WWN_NAME_SIZE);
3664 		}
3665 
3666 		bcopy((uint8_t *)&rnid_data,
3667 		    &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3668 		    sizeof (EXT_RNID_DATA));
3669 	} else {
3670 		if (tq == NULL) {
3671 			/* no matching device */
3672 			EL(ha, "failed, device not found\n");
3673 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3674 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3675 			cmd->DetailStatus = EXT_DSTATUS_TARGET;
3676 			cmd->ResponseLen = 0;
3677 			return;
3678 		}
3679 
3680 		/* Send command */
3681 		rval = ql_send_rnid_els(ha, tq->loop_id,
3682 		    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3683 		if (rval != QL_SUCCESS) {
3684 			EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3685 			    rval, tq->loop_id);
3686 			while (LOOP_NOT_READY(ha)) {
3687 				ql_delay(ha, 100000);
3688 				if (loop_ready_wait-- == 0) {
3689 					EL(ha, "failed, loop not ready\n");
3690 					cmd->Status = EXT_STATUS_ERR;
3691 					cmd->ResponseLen = 0;
3692 				}
3693 			}
3694 			rval = ql_send_rnid_els(ha, tq->loop_id,
3695 			    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE,
3696 			    tmp_buf);
3697 			if (rval != QL_SUCCESS) {
3698 				/* error */
3699 				EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3700 				    rval, tq->loop_id);
3701 				kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3702 				cmd->Status = EXT_STATUS_ERR;
3703 				cmd->ResponseLen = 0;
3704 				return;
3705 			}
3706 		}
3707 	}
3708 
3709 	/* Copy the response */
3710 	copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3711 	    SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3712 
3713 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3714 	    copy_len, mode) != copy_len) {
3715 		cmd->Status = EXT_STATUS_COPY_ERR;
3716 		EL(ha, "failed, ddi_copyout\n");
3717 	} else {
3718 		cmd->ResponseLen = copy_len;
3719 		if (copy_len < SEND_RNID_RSP_SIZE) {
3720 			cmd->Status = EXT_STATUS_DATA_OVERRUN;
3721 			EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3722 
3723 		} else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3724 			cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3725 			EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3726 		} else {
3727 			cmd->Status = EXT_STATUS_OK;
3728 			QL_PRINT_9(CE_CONT, "(%d): done\n",
3729 			    ha->instance);
3730 		}
3731 	}
3732 
3733 	kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3734 }
3735 
3736 /*
3737  * ql_set_host_data
3738  *	Process IOCTL subcommand to set host/adapter related data.
3739  *
3740  * Input:
3741  *	ha:	adapter state pointer.
3742  *	cmd:	User space CT arguments pointer.
3743  *	mode:	flags.
3744  *
3745  * Returns:
3746  *	None, request status indicated in cmd->Status.
3747  *
3748  * Context:
3749  *	Kernel context.
3750  */
3751 static void
3752 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3753 {
3754 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3755 	    cmd->SubCode);
3756 
3757 	/*
3758 	 * case off on command subcode
3759 	 */
3760 	switch (cmd->SubCode) {
3761 	case EXT_SC_SET_RNID:
3762 		ql_set_rnid_parameters(ha, cmd, mode);
3763 		break;
3764 	case EXT_SC_RST_STATISTICS:
3765 		(void) ql_reset_statistics(ha, cmd);
3766 		break;
3767 	case EXT_SC_SET_BEACON_STATE:
3768 		ql_set_led_state(ha, cmd, mode);
3769 		break;
3770 	case EXT_SC_SET_PARMS:
3771 	case EXT_SC_SET_BUS_MODE:
3772 	case EXT_SC_SET_DR_DUMP_BUF:
3773 	case EXT_SC_SET_RISC_CODE:
3774 	case EXT_SC_SET_FLASH_RAM:
3775 	case EXT_SC_SET_LUN_BITMASK:
3776 	case EXT_SC_SET_RETRY_CNT:
3777 	case EXT_SC_SET_RTIN:
3778 	case EXT_SC_SET_FC_LUN_BITMASK:
3779 	case EXT_SC_ADD_TARGET_DEVICE:
3780 	case EXT_SC_SWAP_TARGET_DEVICE:
3781 	case EXT_SC_SET_SEL_TIMEOUT:
3782 	default:
3783 		/* function not supported. */
3784 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3785 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3786 		break;
3787 	}
3788 
3789 	if (cmd->Status != EXT_STATUS_OK) {
3790 		EL(ha, "failed, Status=%d\n", cmd->Status);
3791 	} else {
3792 		/*EMPTY*/
3793 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3794 	}
3795 }
3796 
3797 /*
3798  * ql_get_host_data
3799  *	Performs EXT_CC_GET_DATA subcommands.
3800  *
3801  * Input:
3802  *	ha:	adapter state pointer.
3803  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3804  *	mode:	flags.
3805  *
3806  * Returns:
3807  *	None, request status indicated in cmd->Status.
3808  *
3809  * Context:
3810  *	Kernel context.
3811  */
3812 static void
3813 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3814 {
3815 	int	out_size = 0;
3816 
3817 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3818 	    cmd->SubCode);
3819 
3820 	/* case off on command subcode */
3821 	switch (cmd->SubCode) {
3822 	case EXT_SC_GET_STATISTICS:
3823 		out_size = sizeof (EXT_HBA_PORT_STAT);
3824 		break;
3825 	case EXT_SC_GET_FC_STATISTICS:
3826 		out_size = sizeof (EXT_HBA_PORT_STAT);
3827 		break;
3828 	case EXT_SC_GET_PORT_SUMMARY:
3829 		out_size = sizeof (EXT_DEVICEDATA);
3830 		break;
3831 	case EXT_SC_GET_RNID:
3832 		out_size = sizeof (EXT_RNID_DATA);
3833 		break;
3834 	case EXT_SC_GET_TARGET_ID:
3835 		out_size = sizeof (EXT_DEST_ADDR);
3836 		break;
3837 	case EXT_SC_GET_BEACON_STATE:
3838 		out_size = sizeof (EXT_BEACON_CONTROL);
3839 		break;
3840 	case EXT_SC_GET_FC4_STATISTICS:
3841 		out_size = sizeof (EXT_HBA_FC4STATISTICS);
3842 		break;
3843 	case EXT_SC_GET_DCBX_PARAM:
3844 		out_size = EXT_DEF_DCBX_PARAM_BUF_SIZE;
3845 		break;
3846 	case EXT_SC_GET_RESOURCE_CNTS:
3847 		out_size = sizeof (EXT_RESOURCE_CNTS);
3848 		break;
3849 	case EXT_SC_GET_FCF_LIST:
3850 		out_size = sizeof (EXT_FCF_LIST);
3851 		break;
3852 	case EXT_SC_GET_SCSI_ADDR:
3853 	case EXT_SC_GET_ERR_DETECTIONS:
3854 	case EXT_SC_GET_BUS_MODE:
3855 	case EXT_SC_GET_DR_DUMP_BUF:
3856 	case EXT_SC_GET_RISC_CODE:
3857 	case EXT_SC_GET_FLASH_RAM:
3858 	case EXT_SC_GET_LINK_STATUS:
3859 	case EXT_SC_GET_LOOP_ID:
3860 	case EXT_SC_GET_LUN_BITMASK:
3861 	case EXT_SC_GET_PORT_DATABASE:
3862 	case EXT_SC_GET_PORT_DATABASE_MEM:
3863 	case EXT_SC_GET_POSITION_MAP:
3864 	case EXT_SC_GET_RETRY_CNT:
3865 	case EXT_SC_GET_RTIN:
3866 	case EXT_SC_GET_FC_LUN_BITMASK:
3867 	case EXT_SC_GET_SEL_TIMEOUT:
3868 	default:
3869 		/* function not supported. */
3870 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3871 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3872 		cmd->ResponseLen = 0;
3873 		return;
3874 	}
3875 
3876 	if (cmd->ResponseLen < out_size) {
3877 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3878 		cmd->DetailStatus = out_size;
3879 		EL(ha, "failed, ResponseLen=%xh, size=%xh\n",
3880 		    cmd->ResponseLen, out_size);
3881 		cmd->ResponseLen = 0;
3882 		return;
3883 	}
3884 
3885 	switch (cmd->SubCode) {
3886 	case EXT_SC_GET_RNID:
3887 		ql_get_rnid_parameters(ha, cmd, mode);
3888 		break;
3889 	case EXT_SC_GET_STATISTICS:
3890 		ql_get_statistics(ha, cmd, mode);
3891 		break;
3892 	case EXT_SC_GET_FC_STATISTICS:
3893 		ql_get_statistics_fc(ha, cmd, mode);
3894 		break;
3895 	case EXT_SC_GET_FC4_STATISTICS:
3896 		ql_get_statistics_fc4(ha, cmd, mode);
3897 		break;
3898 	case EXT_SC_GET_PORT_SUMMARY:
3899 		ql_get_port_summary(ha, cmd, mode);
3900 		break;
3901 	case EXT_SC_GET_TARGET_ID:
3902 		ql_get_target_id(ha, cmd, mode);
3903 		break;
3904 	case EXT_SC_GET_BEACON_STATE:
3905 		ql_get_led_state(ha, cmd, mode);
3906 		break;
3907 	case EXT_SC_GET_DCBX_PARAM:
3908 		ql_get_dcbx_parameters(ha, cmd, mode);
3909 		break;
3910 	case EXT_SC_GET_FCF_LIST:
3911 		ql_get_fcf_list(ha, cmd, mode);
3912 		break;
3913 	case EXT_SC_GET_RESOURCE_CNTS:
3914 		ql_get_resource_counts(ha, cmd, mode);
3915 		break;
3916 	}
3917 
3918 	if (cmd->Status != EXT_STATUS_OK) {
3919 		EL(ha, "failed, Status=%d\n", cmd->Status);
3920 	} else {
3921 		/*EMPTY*/
3922 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3923 	}
3924 }
3925 
3926 /* ******************************************************************** */
3927 /*			Helper Functions				*/
3928 /* ******************************************************************** */
3929 
3930 /*
3931  * ql_lun_count
3932  *	Get numbers of LUNS on target.
3933  *
3934  * Input:
3935  *	ha:	adapter state pointer.
3936  *	q:	device queue pointer.
3937  *
3938  * Returns:
3939  *	Number of LUNs.
3940  *
3941  * Context:
3942  *	Kernel context.
3943  */
3944 static int
3945 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
3946 {
3947 	int	cnt;
3948 
3949 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3950 
3951 	/* Bypass LUNs that failed. */
3952 	cnt = ql_report_lun(ha, tq);
3953 	if (cnt == 0) {
3954 		cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
3955 	}
3956 
3957 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3958 
3959 	return (cnt);
3960 }
3961 
3962 /*
3963  * ql_report_lun
3964  *	Get numbers of LUNS using report LUN command.
3965  *
3966  * Input:
3967  *	ha:	adapter state pointer.
3968  *	q:	target queue pointer.
3969  *
3970  * Returns:
3971  *	Number of LUNs.
3972  *
3973  * Context:
3974  *	Kernel context.
3975  */
3976 static int
3977 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
3978 {
3979 	int			rval;
3980 	uint8_t			retries;
3981 	ql_mbx_iocb_t		*pkt;
3982 	ql_rpt_lun_lst_t	*rpt;
3983 	dma_mem_t		dma_mem;
3984 	uint32_t		pkt_size, cnt;
3985 	uint16_t		comp_status;
3986 	uint8_t			scsi_status_h, scsi_status_l, *reqs;
3987 
3988 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3989 
3990 	if (DRIVER_SUSPENDED(ha)) {
3991 		EL(ha, "failed, LOOP_NOT_READY\n");
3992 		return (0);
3993 	}
3994 
3995 	pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
3996 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
3997 	if (pkt == NULL) {
3998 		EL(ha, "failed, kmem_zalloc\n");
3999 		return (0);
4000 	}
4001 	rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4002 
4003 	/* Get DMA memory for the IOCB */
4004 	if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
4005 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4006 		cmn_err(CE_WARN, "%s(%d): DMA memory "
4007 		    "alloc failed", QL_NAME, ha->instance);
4008 		kmem_free(pkt, pkt_size);
4009 		return (0);
4010 	}
4011 
4012 	for (retries = 0; retries < 4; retries++) {
4013 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4014 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4015 			pkt->cmd24.entry_count = 1;
4016 
4017 			/* Set N_port handle */
4018 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4019 
4020 			/* Set target ID */
4021 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4022 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4023 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4024 
4025 			/* Set ISP command timeout. */
4026 			pkt->cmd24.timeout = LE_16(15);
4027 
4028 			/* Load SCSI CDB */
4029 			pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
4030 			pkt->cmd24.scsi_cdb[6] =
4031 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4032 			pkt->cmd24.scsi_cdb[7] =
4033 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4034 			pkt->cmd24.scsi_cdb[8] =
4035 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4036 			pkt->cmd24.scsi_cdb[9] =
4037 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4038 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4039 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4040 				    + cnt, 4);
4041 			}
4042 
4043 			/* Set tag queue control flags */
4044 			pkt->cmd24.task = TA_STAG;
4045 
4046 			/* Set transfer direction. */
4047 			pkt->cmd24.control_flags = CF_RD;
4048 
4049 			/* Set data segment count. */
4050 			pkt->cmd24.dseg_count = LE_16(1);
4051 
4052 			/* Load total byte count. */
4053 			/* Load data descriptor. */
4054 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
4055 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4056 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
4057 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4058 			pkt->cmd24.total_byte_count =
4059 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4060 			pkt->cmd24.dseg_0_length =
4061 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4062 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4063 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4064 			pkt->cmd3.entry_count = 1;
4065 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4066 				pkt->cmd3.target_l = LSB(tq->loop_id);
4067 				pkt->cmd3.target_h = MSB(tq->loop_id);
4068 			} else {
4069 				pkt->cmd3.target_h = LSB(tq->loop_id);
4070 			}
4071 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4072 			pkt->cmd3.timeout = LE_16(15);
4073 			pkt->cmd3.dseg_count = LE_16(1);
4074 			pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
4075 			pkt->cmd3.scsi_cdb[6] =
4076 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4077 			pkt->cmd3.scsi_cdb[7] =
4078 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4079 			pkt->cmd3.scsi_cdb[8] =
4080 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4081 			pkt->cmd3.scsi_cdb[9] =
4082 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4083 			pkt->cmd3.byte_count =
4084 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4085 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4086 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4087 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4088 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4089 			pkt->cmd3.dseg_0_length =
4090 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4091 		} else {
4092 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4093 			pkt->cmd.entry_count = 1;
4094 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4095 				pkt->cmd.target_l = LSB(tq->loop_id);
4096 				pkt->cmd.target_h = MSB(tq->loop_id);
4097 			} else {
4098 				pkt->cmd.target_h = LSB(tq->loop_id);
4099 			}
4100 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4101 			pkt->cmd.timeout = LE_16(15);
4102 			pkt->cmd.dseg_count = LE_16(1);
4103 			pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
4104 			pkt->cmd.scsi_cdb[6] =
4105 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4106 			pkt->cmd.scsi_cdb[7] =
4107 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4108 			pkt->cmd.scsi_cdb[8] =
4109 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4110 			pkt->cmd.scsi_cdb[9] =
4111 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4112 			pkt->cmd.byte_count =
4113 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4114 			pkt->cmd.dseg_0_address = (uint32_t)
4115 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4116 			pkt->cmd.dseg_0_length =
4117 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4118 		}
4119 
4120 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4121 		    sizeof (ql_mbx_iocb_t));
4122 
4123 		/* Sync in coming DMA buffer. */
4124 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4125 		    DDI_DMA_SYNC_FORKERNEL);
4126 		/* Copy in coming DMA data. */
4127 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4128 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4129 
4130 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4131 			pkt->sts24.entry_status = (uint8_t)
4132 			    (pkt->sts24.entry_status & 0x3c);
4133 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4134 			scsi_status_h = pkt->sts24.scsi_status_h;
4135 			scsi_status_l = pkt->sts24.scsi_status_l;
4136 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4137 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4138 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4139 		} else {
4140 			pkt->sts.entry_status = (uint8_t)
4141 			    (pkt->sts.entry_status & 0x7e);
4142 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4143 			scsi_status_h = pkt->sts.scsi_status_h;
4144 			scsi_status_l = pkt->sts.scsi_status_l;
4145 			reqs = &pkt->sts.req_sense_data[0];
4146 		}
4147 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4148 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4149 			    pkt->sts.entry_status, tq->d_id.b24);
4150 			rval = QL_FUNCTION_PARAMETER_ERROR;
4151 		}
4152 
4153 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4154 		    scsi_status_l & STATUS_CHECK) {
4155 			/* Device underrun, treat as OK. */
4156 			if (rval == QL_SUCCESS &&
4157 			    comp_status == CS_DATA_UNDERRUN &&
4158 			    scsi_status_h & FCP_RESID_UNDER) {
4159 				break;
4160 			}
4161 
4162 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4163 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4164 			    comp_status, scsi_status_h, scsi_status_l);
4165 
4166 			if (rval == QL_SUCCESS) {
4167 				if ((comp_status == CS_TIMEOUT) ||
4168 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4169 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4170 					rval = QL_FUNCTION_TIMEOUT;
4171 					break;
4172 				}
4173 				rval = QL_FUNCTION_FAILED;
4174 			} else if (rval == QL_ABORTED) {
4175 				break;
4176 			}
4177 
4178 			if (scsi_status_l & STATUS_CHECK) {
4179 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4180 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4181 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4182 				    reqs[1], reqs[2], reqs[3], reqs[4],
4183 				    reqs[5], reqs[6], reqs[7], reqs[8],
4184 				    reqs[9], reqs[10], reqs[11], reqs[12],
4185 				    reqs[13], reqs[14], reqs[15], reqs[16],
4186 				    reqs[17]);
4187 			}
4188 		} else {
4189 			break;
4190 		}
4191 		bzero((caddr_t)pkt, pkt_size);
4192 	}
4193 
4194 	if (rval != QL_SUCCESS) {
4195 		EL(ha, "failed=%xh\n", rval);
4196 		rval = 0;
4197 	} else {
4198 		QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance);
4199 		QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4200 		rval = (int)(BE_32(rpt->hdr.len) / 8);
4201 	}
4202 
4203 	kmem_free(pkt, pkt_size);
4204 	ql_free_dma_resource(ha, &dma_mem);
4205 
4206 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4207 
4208 	return (rval);
4209 }
4210 
4211 /*
4212  * ql_inq_scan
4213  *	Get numbers of LUNS using inquiry command.
4214  *
4215  * Input:
4216  *	ha:		adapter state pointer.
4217  *	tq:		target queue pointer.
4218  *	count:		scan for the number of existing LUNs.
4219  *
4220  * Returns:
4221  *	Number of LUNs.
4222  *
4223  * Context:
4224  *	Kernel context.
4225  */
4226 static int
4227 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
4228 {
4229 	int		lun, cnt, rval;
4230 	ql_mbx_iocb_t	*pkt;
4231 	uint8_t		*inq;
4232 	uint32_t	pkt_size;
4233 
4234 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4235 
4236 	pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4237 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4238 	if (pkt == NULL) {
4239 		EL(ha, "failed, kmem_zalloc\n");
4240 		return (0);
4241 	}
4242 	inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4243 
4244 	cnt = 0;
4245 	for (lun = 0; lun < MAX_LUNS; lun++) {
4246 
4247 		if (DRIVER_SUSPENDED(ha)) {
4248 			rval = QL_LOOP_DOWN;
4249 			cnt = 0;
4250 			break;
4251 		}
4252 
4253 		rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4254 		if (rval == QL_SUCCESS) {
4255 			switch (*inq) {
4256 			case DTYPE_DIRECT:
4257 			case DTYPE_PROCESSOR:	/* Appliance. */
4258 			case DTYPE_WORM:
4259 			case DTYPE_RODIRECT:
4260 			case DTYPE_SCANNER:
4261 			case DTYPE_OPTICAL:
4262 			case DTYPE_CHANGER:
4263 			case DTYPE_ESI:
4264 				cnt++;
4265 				break;
4266 			case DTYPE_SEQUENTIAL:
4267 				cnt++;
4268 				tq->flags |= TQF_TAPE_DEVICE;
4269 				break;
4270 			default:
4271 				QL_PRINT_9(CE_CONT, "(%d): failed, "
4272 				    "unsupported device id=%xh, lun=%d, "
4273 				    "type=%xh\n", ha->instance, tq->loop_id,
4274 				    lun, *inq);
4275 				break;
4276 			}
4277 
4278 			if (*inq == DTYPE_ESI || cnt >= count) {
4279 				break;
4280 			}
4281 		} else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4282 			cnt = 0;
4283 			break;
4284 		}
4285 	}
4286 
4287 	kmem_free(pkt, pkt_size);
4288 
4289 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4290 
4291 	return (cnt);
4292 }
4293 
4294 /*
4295  * ql_inq
4296  *	Issue inquiry command.
4297  *
4298  * Input:
4299  *	ha:		adapter state pointer.
4300  *	tq:		target queue pointer.
4301  *	lun:		LUN number.
4302  *	pkt:		command and buffer pointer.
4303  *	inq_len:	amount of inquiry data.
4304  *
4305  * Returns:
4306  *	ql local function return status code.
4307  *
4308  * Context:
4309  *	Kernel context.
4310  */
4311 static int
4312 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4313     uint8_t inq_len)
4314 {
4315 	dma_mem_t	dma_mem;
4316 	int		rval, retries;
4317 	uint32_t	pkt_size, cnt;
4318 	uint16_t	comp_status;
4319 	uint8_t		scsi_status_h, scsi_status_l, *reqs;
4320 	caddr_t		inq_data;
4321 
4322 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4323 
4324 	if (DRIVER_SUSPENDED(ha)) {
4325 		EL(ha, "failed, loop down\n");
4326 		return (QL_FUNCTION_TIMEOUT);
4327 	}
4328 
4329 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4330 	bzero((caddr_t)pkt, pkt_size);
4331 
4332 	inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4333 
4334 	/* Get DMA memory for the IOCB */
4335 	if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4336 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4337 		cmn_err(CE_WARN, "%s(%d): DMA memory "
4338 		    "alloc failed", QL_NAME, ha->instance);
4339 		return (0);
4340 	}
4341 
4342 	for (retries = 0; retries < 4; retries++) {
4343 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4344 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4345 			pkt->cmd24.entry_count = 1;
4346 
4347 			/* Set LUN number */
4348 			pkt->cmd24.fcp_lun[2] = LSB(lun);
4349 			pkt->cmd24.fcp_lun[3] = MSB(lun);
4350 
4351 			/* Set N_port handle */
4352 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4353 
4354 			/* Set target ID */
4355 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4356 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4357 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4358 
4359 			/* Set ISP command timeout. */
4360 			pkt->cmd24.timeout = LE_16(15);
4361 
4362 			/* Load SCSI CDB */
4363 			pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4364 			pkt->cmd24.scsi_cdb[4] = inq_len;
4365 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4366 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4367 				    + cnt, 4);
4368 			}
4369 
4370 			/* Set tag queue control flags */
4371 			pkt->cmd24.task = TA_STAG;
4372 
4373 			/* Set transfer direction. */
4374 			pkt->cmd24.control_flags = CF_RD;
4375 
4376 			/* Set data segment count. */
4377 			pkt->cmd24.dseg_count = LE_16(1);
4378 
4379 			/* Load total byte count. */
4380 			pkt->cmd24.total_byte_count = LE_32(inq_len);
4381 
4382 			/* Load data descriptor. */
4383 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
4384 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4385 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
4386 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4387 			pkt->cmd24.dseg_0_length = LE_32(inq_len);
4388 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4389 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4390 			cnt = CMD_TYPE_3_DATA_SEGMENTS;
4391 
4392 			pkt->cmd3.entry_count = 1;
4393 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4394 				pkt->cmd3.target_l = LSB(tq->loop_id);
4395 				pkt->cmd3.target_h = MSB(tq->loop_id);
4396 			} else {
4397 				pkt->cmd3.target_h = LSB(tq->loop_id);
4398 			}
4399 			pkt->cmd3.lun_l = LSB(lun);
4400 			pkt->cmd3.lun_h = MSB(lun);
4401 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4402 			pkt->cmd3.timeout = LE_16(15);
4403 			pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4404 			pkt->cmd3.scsi_cdb[4] = inq_len;
4405 			pkt->cmd3.dseg_count = LE_16(1);
4406 			pkt->cmd3.byte_count = LE_32(inq_len);
4407 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4408 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4409 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4410 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4411 			pkt->cmd3.dseg_0_length = LE_32(inq_len);
4412 		} else {
4413 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4414 			cnt = CMD_TYPE_2_DATA_SEGMENTS;
4415 
4416 			pkt->cmd.entry_count = 1;
4417 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4418 				pkt->cmd.target_l = LSB(tq->loop_id);
4419 				pkt->cmd.target_h = MSB(tq->loop_id);
4420 			} else {
4421 				pkt->cmd.target_h = LSB(tq->loop_id);
4422 			}
4423 			pkt->cmd.lun_l = LSB(lun);
4424 			pkt->cmd.lun_h = MSB(lun);
4425 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4426 			pkt->cmd.timeout = LE_16(15);
4427 			pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4428 			pkt->cmd.scsi_cdb[4] = inq_len;
4429 			pkt->cmd.dseg_count = LE_16(1);
4430 			pkt->cmd.byte_count = LE_32(inq_len);
4431 			pkt->cmd.dseg_0_address = (uint32_t)
4432 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4433 			pkt->cmd.dseg_0_length = LE_32(inq_len);
4434 		}
4435 
4436 /*		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4437 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4438 		    sizeof (ql_mbx_iocb_t));
4439 
4440 		/* Sync in coming IOCB DMA buffer. */
4441 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4442 		    DDI_DMA_SYNC_FORKERNEL);
4443 		/* Copy in coming DMA data. */
4444 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4445 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4446 
4447 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4448 			pkt->sts24.entry_status = (uint8_t)
4449 			    (pkt->sts24.entry_status & 0x3c);
4450 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4451 			scsi_status_h = pkt->sts24.scsi_status_h;
4452 			scsi_status_l = pkt->sts24.scsi_status_l;
4453 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4454 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4455 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4456 		} else {
4457 			pkt->sts.entry_status = (uint8_t)
4458 			    (pkt->sts.entry_status & 0x7e);
4459 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4460 			scsi_status_h = pkt->sts.scsi_status_h;
4461 			scsi_status_l = pkt->sts.scsi_status_l;
4462 			reqs = &pkt->sts.req_sense_data[0];
4463 		}
4464 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4465 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4466 			    pkt->sts.entry_status, tq->d_id.b24);
4467 			rval = QL_FUNCTION_PARAMETER_ERROR;
4468 		}
4469 
4470 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4471 		    scsi_status_l & STATUS_CHECK) {
4472 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4473 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4474 			    comp_status, scsi_status_h, scsi_status_l);
4475 
4476 			if (rval == QL_SUCCESS) {
4477 				if ((comp_status == CS_TIMEOUT) ||
4478 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4479 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4480 					rval = QL_FUNCTION_TIMEOUT;
4481 					break;
4482 				}
4483 				rval = QL_FUNCTION_FAILED;
4484 			}
4485 
4486 			if (scsi_status_l & STATUS_CHECK) {
4487 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4488 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4489 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4490 				    reqs[1], reqs[2], reqs[3], reqs[4],
4491 				    reqs[5], reqs[6], reqs[7], reqs[8],
4492 				    reqs[9], reqs[10], reqs[11], reqs[12],
4493 				    reqs[13], reqs[14], reqs[15], reqs[16],
4494 				    reqs[17]);
4495 			}
4496 		} else {
4497 			break;
4498 		}
4499 	}
4500 	ql_free_dma_resource(ha, &dma_mem);
4501 
4502 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4503 
4504 	return (rval);
4505 }
4506 
4507 /*
4508  * ql_get_buffer_data
4509  *	Copies data from user space to kernal buffer.
4510  *
4511  * Input:
4512  *	src:	User source buffer address.
4513  *	dst:	Kernal destination buffer address.
4514  *	size:	Amount of data.
4515  *	mode:	flags.
4516  *
4517  * Returns:
4518  *	Returns number of bytes transferred.
4519  *
4520  * Context:
4521  *	Kernel context.
4522  */
4523 static uint32_t
4524 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4525 {
4526 	uint32_t	cnt;
4527 
4528 	for (cnt = 0; cnt < size; cnt++) {
4529 		if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4530 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4531 			break;
4532 		}
4533 	}
4534 
4535 	return (cnt);
4536 }
4537 
4538 /*
4539  * ql_send_buffer_data
4540  *	Copies data from kernal buffer to user space.
4541  *
4542  * Input:
4543  *	src:	Kernal source buffer address.
4544  *	dst:	User destination buffer address.
4545  *	size:	Amount of data.
4546  *	mode:	flags.
4547  *
4548  * Returns:
4549  *	Returns number of bytes transferred.
4550  *
4551  * Context:
4552  *	Kernel context.
4553  */
4554 static uint32_t
4555 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4556 {
4557 	uint32_t	cnt;
4558 
4559 	for (cnt = 0; cnt < size; cnt++) {
4560 		if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4561 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4562 			break;
4563 		}
4564 	}
4565 
4566 	return (cnt);
4567 }
4568 
4569 /*
4570  * ql_find_port
4571  *	Locates device queue.
4572  *
4573  * Input:
4574  *	ha:	adapter state pointer.
4575  *	name:	device port name.
4576  *
4577  * Returns:
4578  *	Returns target queue pointer.
4579  *
4580  * Context:
4581  *	Kernel context.
4582  */
4583 static ql_tgt_t *
4584 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type)
4585 {
4586 	ql_link_t	*link;
4587 	ql_tgt_t	*tq;
4588 	uint16_t	index;
4589 
4590 	/* Scan port list for requested target */
4591 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4592 		for (link = ha->dev[index].first; link != NULL;
4593 		    link = link->next) {
4594 			tq = link->base_address;
4595 
4596 			switch (type) {
4597 			case QLNT_LOOP_ID:
4598 				if (bcmp(name, &tq->loop_id,
4599 				    sizeof (uint16_t)) == 0) {
4600 					return (tq);
4601 				}
4602 				break;
4603 			case QLNT_PORT:
4604 				if (bcmp(name, tq->port_name, 8) == 0) {
4605 					return (tq);
4606 				}
4607 				break;
4608 			case QLNT_NODE:
4609 				if (bcmp(name, tq->node_name, 8) == 0) {
4610 					return (tq);
4611 				}
4612 				break;
4613 			case QLNT_PID:
4614 				if (bcmp(name, tq->d_id.r.d_id,
4615 				    sizeof (tq->d_id.r.d_id)) == 0) {
4616 					return (tq);
4617 				}
4618 				break;
4619 			default:
4620 				EL(ha, "failed, invalid type=%d\n",  type);
4621 				return (NULL);
4622 			}
4623 		}
4624 	}
4625 
4626 	return (NULL);
4627 }
4628 
4629 /*
4630  * ql_24xx_flash_desc
4631  *	Get flash descriptor table.
4632  *
4633  * Input:
4634  *	ha:		adapter state pointer.
4635  *
4636  * Returns:
4637  *	ql local function return status code.
4638  *
4639  * Context:
4640  *	Kernel context.
4641  */
4642 static int
4643 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4644 {
4645 	uint32_t	cnt;
4646 	uint16_t	chksum, *bp, data;
4647 	int		rval;
4648 	flash_desc_t	*fdesc;
4649 	ql_xioctl_t	*xp = ha->xioctl;
4650 
4651 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4652 
4653 	if (ha->flash_desc_addr == 0) {
4654 		EL(ha, "desc ptr=0\n");
4655 		return (QL_FUNCTION_FAILED);
4656 	}
4657 
4658 	if ((fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP)) == NULL) {
4659 		EL(ha, "kmem_zalloc=null\n");
4660 		return (QL_MEMORY_ALLOC_FAILED);
4661 	}
4662 	rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t),
4663 	    ha->flash_desc_addr << 2);
4664 	if (rval != QL_SUCCESS) {
4665 		EL(ha, "read status=%xh\n", rval);
4666 		kmem_free(fdesc, sizeof (flash_desc_t));
4667 		return (rval);
4668 	}
4669 
4670 	chksum = 0;
4671 	bp = (uint16_t *)fdesc;
4672 	for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4673 		data = *bp++;
4674 		LITTLE_ENDIAN_16(&data);
4675 		chksum += data;
4676 	}
4677 
4678 	LITTLE_ENDIAN_32(&fdesc->flash_valid);
4679 	LITTLE_ENDIAN_16(&fdesc->flash_version);
4680 	LITTLE_ENDIAN_16(&fdesc->flash_len);
4681 	LITTLE_ENDIAN_16(&fdesc->flash_checksum);
4682 	LITTLE_ENDIAN_16(&fdesc->flash_manuf);
4683 	LITTLE_ENDIAN_16(&fdesc->flash_id);
4684 	LITTLE_ENDIAN_32(&fdesc->block_size);
4685 	LITTLE_ENDIAN_32(&fdesc->alt_block_size);
4686 	LITTLE_ENDIAN_32(&fdesc->flash_size);
4687 	LITTLE_ENDIAN_32(&fdesc->write_enable_data);
4688 	LITTLE_ENDIAN_32(&fdesc->read_timeout);
4689 
4690 	/* flash size in desc table is in 1024 bytes */
4691 	fdesc->flash_size = fdesc->flash_size * 0x400;
4692 
4693 	if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD ||
4694 	    fdesc->flash_version != FLASH_DESC_VERSION) {
4695 		EL(ha, "invalid descriptor table\n");
4696 		kmem_free(fdesc, sizeof (flash_desc_t));
4697 		return (QL_FUNCTION_FAILED);
4698 	}
4699 
4700 	bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t));
4701 	kmem_free(fdesc, sizeof (flash_desc_t));
4702 
4703 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4704 
4705 	return (QL_SUCCESS);
4706 }
4707 
4708 /*
4709  * ql_setup_flash
4710  *	Gets the manufacturer and id number of the flash chip, and
4711  *	sets up the size parameter.
4712  *
4713  * Input:
4714  *	ha:	adapter state pointer.
4715  *
4716  * Returns:
4717  *	int:	ql local function return status code.
4718  *
4719  * Context:
4720  *	Kernel context.
4721  */
4722 static int
4723 ql_setup_flash(ql_adapter_state_t *ha)
4724 {
4725 	ql_xioctl_t	*xp = ha->xioctl;
4726 	int		rval = QL_SUCCESS;
4727 
4728 	if (xp->fdesc.flash_size != 0) {
4729 		return (rval);
4730 	}
4731 
4732 	if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) {
4733 		return (QL_FUNCTION_FAILED);
4734 	}
4735 
4736 	if (CFG_IST(ha, CFG_CTRL_258081)) {
4737 		/*
4738 		 * Temporarily set the ha->xioctl->fdesc.flash_size to
4739 		 * 25xx flash size to avoid failing of ql_dump_focde.
4740 		 */
4741 		if (CFG_IST(ha, CFG_CTRL_8021)) {
4742 			ha->xioctl->fdesc.flash_size = 0x800000;
4743 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
4744 			ha->xioctl->fdesc.flash_size = 0x200000;
4745 		} else {
4746 			ha->xioctl->fdesc.flash_size = 0x400000;
4747 		}
4748 
4749 		if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4750 			EL(ha, "flash desc table ok, exit\n");
4751 			return (rval);
4752 		}
4753 		if (CFG_IST(ha, CFG_CTRL_8021)) {
4754 			xp->fdesc.flash_manuf = WINBOND_FLASH;
4755 			xp->fdesc.flash_id = WINBOND_FLASHID;
4756 			xp->fdesc.flash_len = 0x17;
4757 		} else {
4758 			(void) ql_24xx_flash_id(ha);
4759 		}
4760 
4761 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
4762 		(void) ql_24xx_flash_id(ha);
4763 	} else {
4764 		ql_flash_enable(ha);
4765 
4766 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4767 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4768 		ql_write_flash_byte(ha, 0x5555, 0x90);
4769 		xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4770 
4771 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
4772 			ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4773 			ql_write_flash_byte(ha, 0x5555, 0x55);
4774 			ql_write_flash_byte(ha, 0xaaaa, 0x90);
4775 			xp->fdesc.flash_id = (uint16_t)
4776 			    ql_read_flash_byte(ha, 0x0002);
4777 		} else {
4778 			ql_write_flash_byte(ha, 0x5555, 0xaa);
4779 			ql_write_flash_byte(ha, 0x2aaa, 0x55);
4780 			ql_write_flash_byte(ha, 0x5555, 0x90);
4781 			xp->fdesc.flash_id = (uint16_t)
4782 			    ql_read_flash_byte(ha, 0x0001);
4783 		}
4784 
4785 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4786 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4787 		ql_write_flash_byte(ha, 0x5555, 0xf0);
4788 
4789 		ql_flash_disable(ha);
4790 	}
4791 
4792 	/* Default flash descriptor table. */
4793 	xp->fdesc.write_statusreg_cmd = 1;
4794 	xp->fdesc.write_enable_bits = 0;
4795 	xp->fdesc.unprotect_sector_cmd = 0;
4796 	xp->fdesc.protect_sector_cmd = 0;
4797 	xp->fdesc.write_disable_bits = 0x9c;
4798 	xp->fdesc.block_size = 0x10000;
4799 	xp->fdesc.erase_cmd = 0xd8;
4800 
4801 	switch (xp->fdesc.flash_manuf) {
4802 	case AMD_FLASH:
4803 		switch (xp->fdesc.flash_id) {
4804 		case SPAN_FLASHID_2048K:
4805 			xp->fdesc.flash_size = 0x200000;
4806 			break;
4807 		case AMD_FLASHID_1024K:
4808 			xp->fdesc.flash_size = 0x100000;
4809 			break;
4810 		case AMD_FLASHID_512K:
4811 		case AMD_FLASHID_512Kt:
4812 		case AMD_FLASHID_512Kb:
4813 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
4814 				xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
4815 			} else {
4816 				xp->fdesc.flash_size = 0x80000;
4817 			}
4818 			break;
4819 		case AMD_FLASHID_128K:
4820 			xp->fdesc.flash_size = 0x20000;
4821 			break;
4822 		default:
4823 			rval = QL_FUNCTION_FAILED;
4824 			break;
4825 		}
4826 		break;
4827 	case ST_FLASH:
4828 		switch (xp->fdesc.flash_id) {
4829 		case ST_FLASHID_128K:
4830 			xp->fdesc.flash_size = 0x20000;
4831 			break;
4832 		case ST_FLASHID_512K:
4833 			xp->fdesc.flash_size = 0x80000;
4834 			break;
4835 		case ST_FLASHID_M25PXX:
4836 			if (xp->fdesc.flash_len == 0x14) {
4837 				xp->fdesc.flash_size = 0x100000;
4838 			} else if (xp->fdesc.flash_len == 0x15) {
4839 				xp->fdesc.flash_size = 0x200000;
4840 			} else {
4841 				rval = QL_FUNCTION_FAILED;
4842 			}
4843 			break;
4844 		default:
4845 			rval = QL_FUNCTION_FAILED;
4846 			break;
4847 		}
4848 		break;
4849 	case SST_FLASH:
4850 		switch (xp->fdesc.flash_id) {
4851 		case SST_FLASHID_128K:
4852 			xp->fdesc.flash_size = 0x20000;
4853 			break;
4854 		case SST_FLASHID_1024K_A:
4855 			xp->fdesc.flash_size = 0x100000;
4856 			xp->fdesc.block_size = 0x8000;
4857 			xp->fdesc.erase_cmd = 0x52;
4858 			break;
4859 		case SST_FLASHID_1024K:
4860 		case SST_FLASHID_1024K_B:
4861 			xp->fdesc.flash_size = 0x100000;
4862 			break;
4863 		case SST_FLASHID_2048K:
4864 			xp->fdesc.flash_size = 0x200000;
4865 			break;
4866 		default:
4867 			rval = QL_FUNCTION_FAILED;
4868 			break;
4869 		}
4870 		break;
4871 	case MXIC_FLASH:
4872 		switch (xp->fdesc.flash_id) {
4873 		case MXIC_FLASHID_512K:
4874 			xp->fdesc.flash_size = 0x80000;
4875 			break;
4876 		case MXIC_FLASHID_1024K:
4877 			xp->fdesc.flash_size = 0x100000;
4878 			break;
4879 		case MXIC_FLASHID_25LXX:
4880 			if (xp->fdesc.flash_len == 0x14) {
4881 				xp->fdesc.flash_size = 0x100000;
4882 			} else if (xp->fdesc.flash_len == 0x15) {
4883 				xp->fdesc.flash_size = 0x200000;
4884 			} else {
4885 				rval = QL_FUNCTION_FAILED;
4886 			}
4887 			break;
4888 		default:
4889 			rval = QL_FUNCTION_FAILED;
4890 			break;
4891 		}
4892 		break;
4893 	case ATMEL_FLASH:
4894 		switch (xp->fdesc.flash_id) {
4895 		case ATMEL_FLASHID_1024K:
4896 			xp->fdesc.flash_size = 0x100000;
4897 			xp->fdesc.write_disable_bits = 0xbc;
4898 			xp->fdesc.unprotect_sector_cmd = 0x39;
4899 			xp->fdesc.protect_sector_cmd = 0x36;
4900 			break;
4901 		default:
4902 			rval = QL_FUNCTION_FAILED;
4903 			break;
4904 		}
4905 		break;
4906 	case WINBOND_FLASH:
4907 		switch (xp->fdesc.flash_id) {
4908 		case WINBOND_FLASHID:
4909 			if (xp->fdesc.flash_len == 0x15) {
4910 				xp->fdesc.flash_size = 0x200000;
4911 			} else if (xp->fdesc.flash_len == 0x16) {
4912 				xp->fdesc.flash_size = 0x400000;
4913 			} else if (xp->fdesc.flash_len == 0x17) {
4914 				xp->fdesc.flash_size = 0x800000;
4915 			} else {
4916 				rval = QL_FUNCTION_FAILED;
4917 			}
4918 			break;
4919 		default:
4920 			rval = QL_FUNCTION_FAILED;
4921 			break;
4922 		}
4923 		break;
4924 	case INTEL_FLASH:
4925 		switch (xp->fdesc.flash_id) {
4926 		case INTEL_FLASHID:
4927 			if (xp->fdesc.flash_len == 0x11) {
4928 				xp->fdesc.flash_size = 0x200000;
4929 			} else if (xp->fdesc.flash_len == 0x12) {
4930 				xp->fdesc.flash_size = 0x400000;
4931 			} else if (xp->fdesc.flash_len == 0x13) {
4932 				xp->fdesc.flash_size = 0x800000;
4933 			} else {
4934 				rval = QL_FUNCTION_FAILED;
4935 			}
4936 			break;
4937 		default:
4938 			rval = QL_FUNCTION_FAILED;
4939 			break;
4940 		}
4941 		break;
4942 	default:
4943 		rval = QL_FUNCTION_FAILED;
4944 		break;
4945 	}
4946 
4947 	/* Try flash table later. */
4948 	if (rval != QL_SUCCESS && CFG_IST(ha, CFG_CTRL_24258081)) {
4949 		EL(ha, "no default id\n");
4950 		return (QL_SUCCESS);
4951 	}
4952 
4953 	/*
4954 	 * hack for non std 2312 and 6312 boards. hardware people need to
4955 	 * use either the 128k flash chip (original), or something larger.
4956 	 * For driver purposes, we'll treat it as a 128k flash chip.
4957 	 */
4958 	if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
4959 	    ha->device_id == 0x6322) && (xp->fdesc.flash_size > 0x20000) &&
4960 	    (CFG_IST(ha, CFG_SBUS_CARD) ==  0)) {
4961 		EL(ha, "chip exceeds max size: %xh, using 128k\n",
4962 		    xp->fdesc.flash_size);
4963 		xp->fdesc.flash_size = 0x20000;
4964 	}
4965 
4966 	if (rval == QL_SUCCESS) {
4967 		EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n",
4968 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id,
4969 		    xp->fdesc.flash_size);
4970 	} else {
4971 		EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n",
4972 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id);
4973 	}
4974 
4975 	return (rval);
4976 }
4977 
4978 /*
4979  * ql_flash_fcode_load
4980  *	Loads fcode data into flash from application.
4981  *
4982  * Input:
4983  *	ha:	adapter state pointer.
4984  *	bp:	user buffer address.
4985  *	size:	user buffer size.
4986  *	mode:	flags
4987  *
4988  * Returns:
4989  *
4990  * Context:
4991  *	Kernel context.
4992  */
4993 static int
4994 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
4995     int mode)
4996 {
4997 	uint8_t		*bfp;
4998 	ql_xioctl_t	*xp = ha->xioctl;
4999 	int		rval = 0;
5000 
5001 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5002 
5003 	if (bsize > xp->fdesc.flash_size) {
5004 		EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
5005 		    xp->fdesc.flash_size);
5006 		return (ENOMEM);
5007 	}
5008 
5009 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5010 		EL(ha, "failed, kmem_zalloc\n");
5011 		rval = ENOMEM;
5012 	} else  {
5013 		if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
5014 			EL(ha, "failed, ddi_copyin\n");
5015 			rval = EFAULT;
5016 		} else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) {
5017 			EL(ha, "failed, load_fcode\n");
5018 			rval = EFAULT;
5019 		} else {
5020 			/* Reset caches on all adapter instances. */
5021 			ql_update_flash_caches(ha);
5022 			rval = 0;
5023 		}
5024 		kmem_free(bfp, bsize);
5025 	}
5026 
5027 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5028 
5029 	return (rval);
5030 }
5031 
5032 /*
5033  * ql_load_fcode
5034  *	Loads fcode in to flash.
5035  *
5036  * Input:
5037  *	ha:	adapter state pointer.
5038  *	dp:	data pointer.
5039  *	size:	data length.
5040  *	addr:	flash byte address.
5041  *
5042  * Returns:
5043  *	ql local function return status code.
5044  *
5045  * Context:
5046  *	Kernel context.
5047  */
5048 int
5049 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr)
5050 {
5051 	uint32_t	cnt;
5052 	int		rval;
5053 
5054 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
5055 		return (ql_24xx_load_flash(ha, dp, size, addr));
5056 	}
5057 
5058 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5059 
5060 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5061 		/*
5062 		 * sbus has an additional check to make
5063 		 * sure they don't brick the HBA.
5064 		 */
5065 		if (dp[0] != 0xf1) {
5066 			EL(ha, "failed, incorrect fcode for sbus\n");
5067 			return (QL_FUNCTION_PARAMETER_ERROR);
5068 		}
5069 	}
5070 
5071 	GLOBAL_HW_LOCK();
5072 
5073 	/* Enable Flash Read/Write. */
5074 	ql_flash_enable(ha);
5075 
5076 	/* Erase flash prior to write. */
5077 	rval = ql_erase_flash(ha, 0);
5078 
5079 	if (rval == QL_SUCCESS) {
5080 		/* Write fcode data to flash. */
5081 		for (cnt = 0; cnt < (uint32_t)size; cnt++) {
5082 			/* Allow other system activity. */
5083 			if (cnt % 0x1000 == 0) {
5084 				drv_usecwait(1);
5085 			}
5086 			rval = ql_program_flash_address(ha, addr++, *dp++);
5087 			if (rval != QL_SUCCESS)
5088 				break;
5089 		}
5090 	}
5091 
5092 	ql_flash_disable(ha);
5093 
5094 	GLOBAL_HW_UNLOCK();
5095 
5096 	if (rval != QL_SUCCESS) {
5097 		EL(ha, "failed, rval=%xh\n", rval);
5098 	} else {
5099 		/*EMPTY*/
5100 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5101 	}
5102 	return (rval);
5103 }
5104 
5105 /*
5106  * ql_flash_fcode_dump
5107  *	Dumps FLASH to application.
5108  *
5109  * Input:
5110  *	ha:	adapter state pointer.
5111  *	bp:	user buffer address.
5112  *	bsize:	user buffer size
5113  *	faddr:	flash byte address
5114  *	mode:	flags
5115  *
5116  * Returns:
5117  *
5118  * Context:
5119  *	Kernel context.
5120  */
5121 static int
5122 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5123     uint32_t faddr, int mode)
5124 {
5125 	uint8_t		*bfp;
5126 	int		rval;
5127 	ql_xioctl_t	*xp = ha->xioctl;
5128 
5129 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5130 
5131 	/* adjust max read size to flash size */
5132 	if (bsize > xp->fdesc.flash_size) {
5133 		EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5134 		    xp->fdesc.flash_size);
5135 		bsize = xp->fdesc.flash_size;
5136 	}
5137 
5138 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5139 		EL(ha, "failed, kmem_zalloc\n");
5140 		rval = ENOMEM;
5141 	} else {
5142 		/* Dump Flash fcode. */
5143 		rval = ql_dump_fcode(ha, bfp, bsize, faddr);
5144 
5145 		if (rval != QL_SUCCESS) {
5146 			EL(ha, "failed, dump_fcode = %x\n", rval);
5147 			rval = EFAULT;
5148 		} else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5149 			EL(ha, "failed, ddi_copyout\n");
5150 			rval = EFAULT;
5151 		} else {
5152 			rval = 0;
5153 		}
5154 		kmem_free(bfp, bsize);
5155 	}
5156 
5157 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5158 
5159 	return (rval);
5160 }
5161 
5162 /*
5163  * ql_dump_fcode
5164  *	Dumps fcode from flash.
5165  *
5166  * Input:
5167  *	ha:		adapter state pointer.
5168  *	dp:		data pointer.
5169  *	size:		data length in bytes.
5170  *	startpos:	starting position in flash (byte address).
5171  *
5172  * Returns:
5173  *	ql local function return status code.
5174  *
5175  * Context:
5176  *	Kernel context.
5177  *
5178  */
5179 int
5180 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5181     uint32_t startpos)
5182 {
5183 	uint32_t	cnt, data, addr;
5184 	uint8_t		bp[4];
5185 	int		rval = QL_SUCCESS;
5186 
5187 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5188 
5189 	/* make sure startpos+size doesn't exceed flash */
5190 	if (size + startpos > ha->xioctl->fdesc.flash_size) {
5191 		EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5192 		    size, startpos, ha->xioctl->fdesc.flash_size);
5193 		return (QL_FUNCTION_PARAMETER_ERROR);
5194 	}
5195 
5196 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
5197 		/* check start addr is 32 bit aligned for 24xx */
5198 		if ((startpos & 0x3) != 0) {
5199 			rval = ql_24xx_read_flash(ha,
5200 			    ha->flash_data_addr | startpos >> 2, &data);
5201 			if (rval != QL_SUCCESS) {
5202 				EL(ha, "failed2, rval = %xh\n", rval);
5203 				return (rval);
5204 			}
5205 			bp[0] = LSB(LSW(data));
5206 			bp[1] = MSB(LSW(data));
5207 			bp[2] = LSB(MSW(data));
5208 			bp[3] = MSB(MSW(data));
5209 			while (size && startpos & 0x3) {
5210 				*dp++ = bp[startpos & 0x3];
5211 				startpos++;
5212 				size--;
5213 			}
5214 			if (size == 0) {
5215 				QL_PRINT_9(CE_CONT, "(%d): done2\n",
5216 				    ha->instance);
5217 				return (rval);
5218 			}
5219 		}
5220 
5221 		/* adjust 24xx start addr for 32 bit words */
5222 		addr = startpos / 4 | ha->flash_data_addr;
5223 	}
5224 
5225 	GLOBAL_HW_LOCK();
5226 
5227 	/* Enable Flash Read/Write. */
5228 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
5229 		ql_flash_enable(ha);
5230 	}
5231 
5232 	/* Read fcode data from flash. */
5233 	while (size) {
5234 		/* Allow other system activity. */
5235 		if (size % 0x1000 == 0) {
5236 			ql_delay(ha, 100000);
5237 		}
5238 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
5239 			rval = ql_24xx_read_flash(ha, addr++, &data);
5240 			if (rval != QL_SUCCESS) {
5241 				break;
5242 			}
5243 			bp[0] = LSB(LSW(data));
5244 			bp[1] = MSB(LSW(data));
5245 			bp[2] = LSB(MSW(data));
5246 			bp[3] = MSB(MSW(data));
5247 			for (cnt = 0; size && cnt < 4; size--) {
5248 				*dp++ = bp[cnt++];
5249 			}
5250 		} else {
5251 			*dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++);
5252 			size--;
5253 		}
5254 	}
5255 
5256 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
5257 		ql_flash_disable(ha);
5258 	}
5259 
5260 	GLOBAL_HW_UNLOCK();
5261 
5262 	if (rval != QL_SUCCESS) {
5263 		EL(ha, "failed, rval = %xh\n", rval);
5264 	} else {
5265 		/*EMPTY*/
5266 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5267 	}
5268 	return (rval);
5269 }
5270 
5271 /*
5272  * ql_program_flash_address
5273  *	Program flash address.
5274  *
5275  * Input:
5276  *	ha:	adapter state pointer.
5277  *	addr:	flash byte address.
5278  *	data:	data to be written to flash.
5279  *
5280  * Returns:
5281  *	ql local function return status code.
5282  *
5283  * Context:
5284  *	Kernel context.
5285  */
5286 static int
5287 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr,
5288     uint8_t data)
5289 {
5290 	int	rval;
5291 
5292 	/* Write Program Command Sequence */
5293 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5294 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5295 		ql_write_flash_byte(ha, addr, data);
5296 	} else {
5297 		ql_write_flash_byte(ha, 0x5555, 0xaa);
5298 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
5299 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5300 		ql_write_flash_byte(ha, addr, data);
5301 	}
5302 
5303 	/* Wait for write to complete. */
5304 	rval = ql_poll_flash(ha, addr, data);
5305 
5306 	if (rval != QL_SUCCESS) {
5307 		EL(ha, "failed, rval=%xh\n", rval);
5308 	}
5309 	return (rval);
5310 }
5311 
5312 /*
5313  * ql_set_rnid_parameters
5314  *	Set RNID parameters.
5315  *
5316  * Input:
5317  *	ha:	adapter state pointer.
5318  *	cmd:	User space CT arguments pointer.
5319  *	mode:	flags.
5320  */
5321 static void
5322 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5323 {
5324 	EXT_SET_RNID_REQ	tmp_set;
5325 	EXT_RNID_DATA		*tmp_buf;
5326 	int			rval = 0;
5327 
5328 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5329 
5330 	if (DRIVER_SUSPENDED(ha)) {
5331 		EL(ha, "failed, LOOP_NOT_READY\n");
5332 		cmd->Status = EXT_STATUS_BUSY;
5333 		cmd->ResponseLen = 0;
5334 		return;
5335 	}
5336 
5337 	cmd->ResponseLen = 0; /* NO response to caller. */
5338 	if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5339 		/* parameter error */
5340 		EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5341 		    cmd->RequestLen);
5342 		cmd->Status = EXT_STATUS_INVALID_PARAM;
5343 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5344 		cmd->ResponseLen = 0;
5345 		return;
5346 	}
5347 
5348 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set,
5349 	    cmd->RequestLen, mode);
5350 	if (rval != 0) {
5351 		EL(ha, "failed, ddi_copyin\n");
5352 		cmd->Status = EXT_STATUS_COPY_ERR;
5353 		cmd->ResponseLen = 0;
5354 		return;
5355 	}
5356 
5357 	/* Allocate memory for command. */
5358 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5359 	if (tmp_buf == NULL) {
5360 		EL(ha, "failed, kmem_zalloc\n");
5361 		cmd->Status = EXT_STATUS_NO_MEMORY;
5362 		cmd->ResponseLen = 0;
5363 		return;
5364 	}
5365 
5366 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5367 	    (caddr_t)tmp_buf);
5368 	if (rval != QL_SUCCESS) {
5369 		/* error */
5370 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5371 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5372 		cmd->Status = EXT_STATUS_ERR;
5373 		cmd->ResponseLen = 0;
5374 		return;
5375 	}
5376 
5377 	/* Now set the requested params. */
5378 	bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5379 	bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5380 	bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5381 
5382 	rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5383 	    (caddr_t)tmp_buf);
5384 	if (rval != QL_SUCCESS) {
5385 		/* error */
5386 		EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5387 		cmd->Status = EXT_STATUS_ERR;
5388 		cmd->ResponseLen = 0;
5389 	}
5390 
5391 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5392 
5393 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5394 }
5395 
5396 /*
5397  * ql_get_rnid_parameters
5398  *	Get RNID parameters.
5399  *
5400  * Input:
5401  *	ha:	adapter state pointer.
5402  *	cmd:	User space CT arguments pointer.
5403  *	mode:	flags.
5404  */
5405 static void
5406 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5407 {
5408 	EXT_RNID_DATA	*tmp_buf;
5409 	uint32_t	rval;
5410 
5411 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5412 
5413 	if (DRIVER_SUSPENDED(ha)) {
5414 		EL(ha, "failed, LOOP_NOT_READY\n");
5415 		cmd->Status = EXT_STATUS_BUSY;
5416 		cmd->ResponseLen = 0;
5417 		return;
5418 	}
5419 
5420 	/* Allocate memory for command. */
5421 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5422 	if (tmp_buf == NULL) {
5423 		EL(ha, "failed, kmem_zalloc\n");
5424 		cmd->Status = EXT_STATUS_NO_MEMORY;
5425 		cmd->ResponseLen = 0;
5426 		return;
5427 	}
5428 
5429 	/* Send command */
5430 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5431 	    (caddr_t)tmp_buf);
5432 	if (rval != QL_SUCCESS) {
5433 		/* error */
5434 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5435 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5436 		cmd->Status = EXT_STATUS_ERR;
5437 		cmd->ResponseLen = 0;
5438 		return;
5439 	}
5440 
5441 	/* Copy the response */
5442 	if (ql_send_buffer_data((caddr_t)tmp_buf,
5443 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
5444 	    sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5445 		EL(ha, "failed, ddi_copyout\n");
5446 		cmd->Status = EXT_STATUS_COPY_ERR;
5447 		cmd->ResponseLen = 0;
5448 	} else {
5449 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5450 		cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5451 	}
5452 
5453 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5454 }
5455 
5456 /*
5457  * ql_reset_statistics
5458  *	Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5459  *
5460  * Input:
5461  *	ha:	adapter state pointer.
5462  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5463  *
5464  * Returns:
5465  *	None, request status indicated in cmd->Status.
5466  *
5467  * Context:
5468  *	Kernel context.
5469  */
5470 static int
5471 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5472 {
5473 	ql_xioctl_t		*xp = ha->xioctl;
5474 	int			rval = 0;
5475 
5476 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5477 
5478 	if (DRIVER_SUSPENDED(ha)) {
5479 		EL(ha, "failed, LOOP_NOT_READY\n");
5480 		cmd->Status = EXT_STATUS_BUSY;
5481 		cmd->ResponseLen = 0;
5482 		return (QL_FUNCTION_SUSPENDED);
5483 	}
5484 
5485 	rval = ql_reset_link_status(ha);
5486 	if (rval != QL_SUCCESS) {
5487 		EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5488 		cmd->Status = EXT_STATUS_MAILBOX;
5489 		cmd->DetailStatus = rval;
5490 		cmd->ResponseLen = 0;
5491 	}
5492 
5493 	TASK_DAEMON_LOCK(ha);
5494 	xp->IosRequested = 0;
5495 	xp->BytesRequested = 0;
5496 	xp->IOInputRequests = 0;
5497 	xp->IOOutputRequests = 0;
5498 	xp->IOControlRequests = 0;
5499 	xp->IOInputMByteCnt = 0;
5500 	xp->IOOutputMByteCnt = 0;
5501 	xp->IOOutputByteCnt = 0;
5502 	xp->IOInputByteCnt = 0;
5503 	TASK_DAEMON_UNLOCK(ha);
5504 
5505 	INTR_LOCK(ha);
5506 	xp->ControllerErrorCount = 0;
5507 	xp->DeviceErrorCount = 0;
5508 	xp->TotalLipResets = 0;
5509 	xp->TotalInterrupts = 0;
5510 	INTR_UNLOCK(ha);
5511 
5512 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5513 
5514 	return (rval);
5515 }
5516 
5517 /*
5518  * ql_get_statistics
5519  *	Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5520  *
5521  * Input:
5522  *	ha:	adapter state pointer.
5523  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5524  *	mode:	flags.
5525  *
5526  * Returns:
5527  *	None, request status indicated in cmd->Status.
5528  *
5529  * Context:
5530  *	Kernel context.
5531  */
5532 static void
5533 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5534 {
5535 	EXT_HBA_PORT_STAT	ps = {0};
5536 	ql_link_stats_t		*ls;
5537 	int			rval;
5538 	ql_xioctl_t		*xp = ha->xioctl;
5539 	int			retry = 10;
5540 
5541 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5542 
5543 	while (ha->task_daemon_flags &
5544 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5545 		ql_delay(ha, 10000000);	/* 10 second delay */
5546 
5547 		retry--;
5548 
5549 		if (retry == 0) { /* effectively 100 seconds */
5550 			EL(ha, "failed, LOOP_NOT_READY\n");
5551 			cmd->Status = EXT_STATUS_BUSY;
5552 			cmd->ResponseLen = 0;
5553 			return;
5554 		}
5555 	}
5556 
5557 	/* Allocate memory for command. */
5558 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5559 	if (ls == NULL) {
5560 		EL(ha, "failed, kmem_zalloc\n");
5561 		cmd->Status = EXT_STATUS_NO_MEMORY;
5562 		cmd->ResponseLen = 0;
5563 		return;
5564 	}
5565 
5566 	/*
5567 	 * I think these are supposed to be port statistics
5568 	 * the loop ID or port ID should be in cmd->Instance.
5569 	 */
5570 	rval = ql_get_status_counts(ha, (uint16_t)
5571 	    (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id),
5572 	    sizeof (ql_link_stats_t), (caddr_t)ls, 0);
5573 	if (rval != QL_SUCCESS) {
5574 		EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
5575 		    ha->loop_id);
5576 		cmd->Status = EXT_STATUS_MAILBOX;
5577 		cmd->DetailStatus = rval;
5578 		cmd->ResponseLen = 0;
5579 	} else {
5580 		ps.ControllerErrorCount = xp->ControllerErrorCount;
5581 		ps.DeviceErrorCount = xp->DeviceErrorCount;
5582 		ps.IoCount = (uint32_t)(xp->IOInputRequests +
5583 		    xp->IOOutputRequests + xp->IOControlRequests);
5584 		ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt +
5585 		    xp->IOOutputMByteCnt);
5586 		ps.LipResetCount = xp->TotalLipResets;
5587 		ps.InterruptCount = xp->TotalInterrupts;
5588 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5589 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5590 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5591 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5592 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5593 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5594 
5595 		rval = ddi_copyout((void *)&ps,
5596 		    (void *)(uintptr_t)cmd->ResponseAdr,
5597 		    sizeof (EXT_HBA_PORT_STAT), mode);
5598 		if (rval != 0) {
5599 			EL(ha, "failed, ddi_copyout\n");
5600 			cmd->Status = EXT_STATUS_COPY_ERR;
5601 			cmd->ResponseLen = 0;
5602 		} else {
5603 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5604 		}
5605 	}
5606 
5607 	kmem_free(ls, sizeof (ql_link_stats_t));
5608 
5609 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5610 }
5611 
5612 /*
5613  * ql_get_statistics_fc
5614  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5615  *
5616  * Input:
5617  *	ha:	adapter state pointer.
5618  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5619  *	mode:	flags.
5620  *
5621  * Returns:
5622  *	None, request status indicated in cmd->Status.
5623  *
5624  * Context:
5625  *	Kernel context.
5626  */
5627 static void
5628 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5629 {
5630 	EXT_HBA_PORT_STAT	ps = {0};
5631 	ql_link_stats_t		*ls;
5632 	int			rval;
5633 	uint16_t		qlnt;
5634 	EXT_DEST_ADDR		pextdestaddr;
5635 	uint8_t			*name;
5636 	ql_tgt_t		*tq = NULL;
5637 	int			retry = 10;
5638 
5639 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5640 
5641 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
5642 	    (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) {
5643 		EL(ha, "failed, ddi_copyin\n");
5644 		cmd->Status = EXT_STATUS_COPY_ERR;
5645 		cmd->ResponseLen = 0;
5646 		return;
5647 	}
5648 
5649 	qlnt = QLNT_PORT;
5650 	name = pextdestaddr.DestAddr.WWPN;
5651 
5652 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5653 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
5654 	    name[5], name[6], name[7]);
5655 
5656 	tq = ql_find_port(ha, name, qlnt);
5657 
5658 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5659 		EL(ha, "failed, fc_port not found\n");
5660 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5661 		cmd->ResponseLen = 0;
5662 		return;
5663 	}
5664 
5665 	while (ha->task_daemon_flags &
5666 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE  | DRIVER_STALL)) {
5667 		ql_delay(ha, 10000000);	/* 10 second delay */
5668 
5669 		retry--;
5670 
5671 		if (retry == 0) { /* effectively 100 seconds */
5672 			EL(ha, "failed, LOOP_NOT_READY\n");
5673 			cmd->Status = EXT_STATUS_BUSY;
5674 			cmd->ResponseLen = 0;
5675 			return;
5676 		}
5677 	}
5678 
5679 	/* Allocate memory for command. */
5680 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5681 	if (ls == NULL) {
5682 		EL(ha, "failed, kmem_zalloc\n");
5683 		cmd->Status = EXT_STATUS_NO_MEMORY;
5684 		cmd->ResponseLen = 0;
5685 		return;
5686 	}
5687 
5688 	rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t),
5689 	    (caddr_t)ls, 0);
5690 	if (rval != QL_SUCCESS) {
5691 		EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval,
5692 		    tq->d_id.b24);
5693 		cmd->Status = EXT_STATUS_MAILBOX;
5694 		cmd->DetailStatus = rval;
5695 		cmd->ResponseLen = 0;
5696 	} else {
5697 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5698 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5699 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5700 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5701 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5702 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5703 
5704 		rval = ddi_copyout((void *)&ps,
5705 		    (void *)(uintptr_t)cmd->ResponseAdr,
5706 		    sizeof (EXT_HBA_PORT_STAT), mode);
5707 
5708 		if (rval != 0) {
5709 			EL(ha, "failed, ddi_copyout\n");
5710 			cmd->Status = EXT_STATUS_COPY_ERR;
5711 			cmd->ResponseLen = 0;
5712 		} else {
5713 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5714 		}
5715 	}
5716 
5717 	kmem_free(ls, sizeof (ql_link_stats_t));
5718 
5719 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5720 }
5721 
5722 /*
5723  * ql_get_statistics_fc4
5724  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5725  *
5726  * Input:
5727  *	ha:	adapter state pointer.
5728  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5729  *	mode:	flags.
5730  *
5731  * Returns:
5732  *	None, request status indicated in cmd->Status.
5733  *
5734  * Context:
5735  *	Kernel context.
5736  */
5737 static void
5738 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5739 {
5740 	uint32_t		rval;
5741 	EXT_HBA_FC4STATISTICS	fc4stats = {0};
5742 	ql_xioctl_t		*xp = ha->xioctl;
5743 
5744 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5745 
5746 	fc4stats.InputRequests = xp->IOInputRequests;
5747 	fc4stats.OutputRequests = xp->IOOutputRequests;
5748 	fc4stats.ControlRequests = xp->IOControlRequests;
5749 	fc4stats.InputMegabytes = xp->IOInputMByteCnt;
5750 	fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
5751 
5752 	rval = ddi_copyout((void *)&fc4stats,
5753 	    (void *)(uintptr_t)cmd->ResponseAdr,
5754 	    sizeof (EXT_HBA_FC4STATISTICS), mode);
5755 
5756 	if (rval != 0) {
5757 		EL(ha, "failed, ddi_copyout\n");
5758 		cmd->Status = EXT_STATUS_COPY_ERR;
5759 		cmd->ResponseLen = 0;
5760 	} else {
5761 		cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
5762 	}
5763 
5764 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5765 }
5766 
5767 /*
5768  * ql_set_led_state
5769  *	Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
5770  *
5771  * Input:
5772  *	ha:	adapter state pointer.
5773  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5774  *	mode:	flags.
5775  *
5776  * Returns:
5777  *	None, request status indicated in cmd->Status.
5778  *
5779  * Context:
5780  *	Kernel context.
5781  */
5782 static void
5783 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5784 {
5785 	EXT_BEACON_CONTROL	bstate;
5786 	uint32_t		rval;
5787 	ql_xioctl_t		*xp = ha->xioctl;
5788 
5789 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5790 
5791 	if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
5792 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5793 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5794 		EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL,"
5795 		    " Len=%xh\n", cmd->RequestLen);
5796 		cmd->ResponseLen = 0;
5797 		return;
5798 	}
5799 
5800 	if (ha->device_id < 0x2300) {
5801 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5802 		cmd->DetailStatus = 0;
5803 		EL(ha, "done - failed, Invalid function for HBA model\n");
5804 		cmd->ResponseLen = 0;
5805 		return;
5806 	}
5807 
5808 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate,
5809 	    cmd->RequestLen, mode);
5810 
5811 	if (rval != 0) {
5812 		cmd->Status = EXT_STATUS_COPY_ERR;
5813 		EL(ha, "done -  failed, ddi_copyin\n");
5814 		return;
5815 	}
5816 
5817 	switch (bstate.State) {
5818 	case EXT_DEF_GRN_BLINK_OFF:	/* turn beacon off */
5819 		if (xp->ledstate.BeaconState == BEACON_OFF) {
5820 			/* not quite an error -- LED state is already off */
5821 			cmd->Status = EXT_STATUS_OK;
5822 			EL(ha, "LED off request -- LED is already off\n");
5823 			break;
5824 		}
5825 
5826 		xp->ledstate.BeaconState = BEACON_OFF;
5827 		xp->ledstate.LEDflags = LED_ALL_OFF;
5828 
5829 		if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
5830 			cmd->Status = EXT_STATUS_MAILBOX;
5831 		} else {
5832 			cmd->Status = EXT_STATUS_OK;
5833 		}
5834 		break;
5835 
5836 	case EXT_DEF_GRN_BLINK_ON:	/* turn beacon on */
5837 		if (xp->ledstate.BeaconState == BEACON_ON) {
5838 			/* not quite an error -- LED state is already on */
5839 			cmd->Status = EXT_STATUS_OK;
5840 			EL(ha, "LED on request  - LED is already on\n");
5841 			break;
5842 		}
5843 
5844 		if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
5845 			cmd->Status = EXT_STATUS_MAILBOX;
5846 			break;
5847 		}
5848 
5849 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
5850 			xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
5851 		} else {
5852 			xp->ledstate.LEDflags = LED_GREEN;
5853 		}
5854 		xp->ledstate.BeaconState = BEACON_ON;
5855 
5856 		cmd->Status = EXT_STATUS_OK;
5857 		break;
5858 	default:
5859 		cmd->Status = EXT_STATUS_ERR;
5860 		EL(ha, "failed, unknown state request %xh\n", bstate.State);
5861 		break;
5862 	}
5863 
5864 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5865 }
5866 
5867 /*
5868  * ql_get_led_state
5869  *	Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
5870  *
5871  * Input:
5872  *	ha:	adapter state pointer.
5873  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5874  *	mode:	flags.
5875  *
5876  * Returns:
5877  *	None, request status indicated in cmd->Status.
5878  *
5879  * Context:
5880  *	Kernel context.
5881  */
5882 static void
5883 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5884 {
5885 	EXT_BEACON_CONTROL	bstate = {0};
5886 	uint32_t		rval;
5887 	ql_xioctl_t		*xp = ha->xioctl;
5888 
5889 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5890 
5891 	if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
5892 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5893 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5894 		EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL,"
5895 		    "Len=%xh\n", cmd->ResponseLen);
5896 		cmd->ResponseLen = 0;
5897 		return;
5898 	}
5899 
5900 	if (ha->device_id < 0x2300) {
5901 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5902 		cmd->DetailStatus = 0;
5903 		EL(ha, "done - failed, Invalid function for HBA model\n");
5904 		cmd->ResponseLen = 0;
5905 		return;
5906 	}
5907 
5908 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
5909 		cmd->Status = EXT_STATUS_BUSY;
5910 		EL(ha, "done -  failed, isp abort active\n");
5911 		cmd->ResponseLen = 0;
5912 		return;
5913 	}
5914 
5915 	/* inform the user of the current beacon state (off or on) */
5916 	bstate.State = xp->ledstate.BeaconState;
5917 
5918 	rval = ddi_copyout((void *)&bstate,
5919 	    (void *)(uintptr_t)cmd->ResponseAdr,
5920 	    sizeof (EXT_BEACON_CONTROL), mode);
5921 
5922 	if (rval != 0) {
5923 		EL(ha, "failed, ddi_copyout\n");
5924 		cmd->Status = EXT_STATUS_COPY_ERR;
5925 		cmd->ResponseLen = 0;
5926 	} else {
5927 		cmd->Status = EXT_STATUS_OK;
5928 		cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
5929 	}
5930 
5931 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5932 }
5933 
5934 /*
5935  * ql_blink_led
5936  *	Determine the next state of the LED and drive it
5937  *
5938  * Input:
5939  *	ha:	adapter state pointer.
5940  *
5941  * Context:
5942  *	Interrupt context.
5943  */
5944 void
5945 ql_blink_led(ql_adapter_state_t *ha)
5946 {
5947 	uint32_t		nextstate;
5948 	ql_xioctl_t		*xp = ha->xioctl;
5949 
5950 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5951 
5952 	if (xp->ledstate.BeaconState == BEACON_ON) {
5953 		/* determine the next led state */
5954 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
5955 			nextstate = (xp->ledstate.LEDflags) &
5956 			    (~(RD32_IO_REG(ha, gpiod)));
5957 		} else {
5958 			nextstate = (xp->ledstate.LEDflags) &
5959 			    (~(RD16_IO_REG(ha, gpiod)));
5960 		}
5961 
5962 		/* turn the led on or off */
5963 		ql_drive_led(ha, nextstate);
5964 	}
5965 
5966 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5967 }
5968 
5969 /*
5970  * ql_drive_led
5971  *	drive the led's as determined by LEDflags
5972  *
5973  * Input:
5974  *	ha:		adapter state pointer.
5975  *	LEDflags:	LED flags
5976  *
5977  * Context:
5978  *	Kernel/Interrupt context.
5979  */
5980 static void
5981 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
5982 {
5983 
5984 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5985 
5986 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
5987 
5988 		uint16_t	gpio_enable, gpio_data;
5989 
5990 		/* setup to send new data */
5991 		gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
5992 		gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
5993 		WRT16_IO_REG(ha, gpioe, gpio_enable);
5994 
5995 		/* read current data and clear out old led data */
5996 		gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
5997 		gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
5998 
5999 		/* set in the new led data. */
6000 		gpio_data = (uint16_t)(gpio_data | LEDflags);
6001 
6002 		/* write out the new led data */
6003 		WRT16_IO_REG(ha, gpiod, gpio_data);
6004 
6005 	} else if (CFG_IST(ha, CFG_CTRL_24258081)) {
6006 
6007 		uint32_t	gpio_data;
6008 
6009 		/* setup to send new data */
6010 		gpio_data = RD32_IO_REG(ha, gpiod);
6011 		gpio_data |= LED_MASK_UPDATE_24;
6012 		WRT32_IO_REG(ha, gpiod, gpio_data);
6013 
6014 		/* read current data and clear out old led data */
6015 		gpio_data = RD32_IO_REG(ha, gpiod);
6016 		gpio_data &= ~LED_MASK_COLORS_24;
6017 
6018 		/* set in the new led data */
6019 		gpio_data |= LEDflags;
6020 
6021 		/* write out the new led data */
6022 		WRT32_IO_REG(ha, gpiod, gpio_data);
6023 
6024 	} else {
6025 		EL(ha, "unsupported HBA: %xh", ha->device_id);
6026 	}
6027 
6028 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6029 }
6030 
6031 /*
6032  * ql_setup_led
6033  *	Setup LED for driver control
6034  *
6035  * Input:
6036  *	ha:	adapter state pointer.
6037  *
6038  * Context:
6039  *	Kernel/Interrupt context.
6040  */
6041 static uint32_t
6042 ql_setup_led(ql_adapter_state_t *ha)
6043 {
6044 	uint32_t	rval;
6045 	ql_mbx_data_t	mr;
6046 
6047 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6048 
6049 	/* decouple the LED control from the fw */
6050 	rval = ql_get_firmware_option(ha, &mr);
6051 	if (rval != QL_SUCCESS) {
6052 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
6053 		return (rval);
6054 	}
6055 
6056 	/* set the appropriate options */
6057 	mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
6058 
6059 	/* send it back to the firmware */
6060 	rval = ql_set_firmware_option(ha, &mr);
6061 	if (rval != QL_SUCCESS) {
6062 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
6063 		return (rval);
6064 	}
6065 
6066 	/* initally, turn the LED's off */
6067 	ql_drive_led(ha, LED_ALL_OFF);
6068 
6069 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6070 
6071 	return (rval);
6072 }
6073 
6074 /*
6075  * ql_wrapup_led
6076  *	Return LED control to the firmware
6077  *
6078  * Input:
6079  *	ha:	adapter state pointer.
6080  *
6081  * Context:
6082  *	Kernel/Interrupt context.
6083  */
6084 static uint32_t
6085 ql_wrapup_led(ql_adapter_state_t *ha)
6086 {
6087 	uint32_t	rval;
6088 	ql_mbx_data_t	mr;
6089 
6090 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6091 
6092 	/* Turn all LED's off */
6093 	ql_drive_led(ha, LED_ALL_OFF);
6094 
6095 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
6096 
6097 		uint32_t	gpio_data;
6098 
6099 		/* disable the LED update mask */
6100 		gpio_data = RD32_IO_REG(ha, gpiod);
6101 		gpio_data &= ~LED_MASK_UPDATE_24;
6102 
6103 		/* write out the data */
6104 		WRT32_IO_REG(ha, gpiod, gpio_data);
6105 	}
6106 
6107 	/* give LED control back to the f/w */
6108 	rval = ql_get_firmware_option(ha, &mr);
6109 	if (rval != QL_SUCCESS) {
6110 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
6111 		return (rval);
6112 	}
6113 
6114 	mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6115 
6116 	rval = ql_set_firmware_option(ha, &mr);
6117 	if (rval != QL_SUCCESS) {
6118 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
6119 		return (rval);
6120 	}
6121 
6122 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6123 
6124 	return (rval);
6125 }
6126 
6127 /*
6128  * ql_get_port_summary
6129  *	Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6130  *
6131  *	The EXT_IOCTL->RequestAdr points to a single
6132  *	UINT32 which identifies the device type.
6133  *
6134  * Input:
6135  *	ha:	adapter state pointer.
6136  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6137  *	mode:	flags.
6138  *
6139  * Returns:
6140  *	None, request status indicated in cmd->Status.
6141  *
6142  * Context:
6143  *	Kernel context.
6144  */
6145 static void
6146 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6147 {
6148 	EXT_DEVICEDATA		dd = {0};
6149 	EXT_DEVICEDATA		*uddp;
6150 	ql_link_t		*link;
6151 	ql_tgt_t		*tq;
6152 	uint32_t		rlen, dev_type, index;
6153 	int			rval = 0;
6154 	EXT_DEVICEDATAENTRY	*uddep, *ddep;
6155 
6156 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6157 
6158 	ddep = &dd.EntryList[0];
6159 
6160 	/*
6161 	 * Get the type of device the requestor is looking for.
6162 	 *
6163 	 * We ignore this for now.
6164 	 */
6165 	rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6166 	    (void *)&dev_type, sizeof (dev_type), mode);
6167 	if (rval != 0) {
6168 		cmd->Status = EXT_STATUS_COPY_ERR;
6169 		cmd->ResponseLen = 0;
6170 		EL(ha, "failed, ddi_copyin\n");
6171 		return;
6172 	}
6173 	/*
6174 	 * Count the number of entries to be returned. Count devices
6175 	 * that are offlline, but have been persistently bound.
6176 	 */
6177 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6178 		for (link = ha->dev[index].first; link != NULL;
6179 		    link = link->next) {
6180 			tq = link->base_address;
6181 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6182 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6183 				continue;	/* Skip this one */
6184 			}
6185 			dd.TotalDevices++;
6186 		}
6187 	}
6188 	/*
6189 	 * Compute the number of entries that can be returned
6190 	 * based upon the size of caller's response buffer.
6191 	 */
6192 	dd.ReturnListEntryCount = 0;
6193 	if (dd.TotalDevices == 0) {
6194 		rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY);
6195 	} else {
6196 		rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6197 		    (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6198 	}
6199 	if (rlen > cmd->ResponseLen) {
6200 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6201 		cmd->DetailStatus = rlen;
6202 		EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6203 		    rlen, cmd->ResponseLen);
6204 		cmd->ResponseLen = 0;
6205 		return;
6206 	}
6207 	cmd->ResponseLen = 0;
6208 	uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6209 	uddep = &uddp->EntryList[0];
6210 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6211 		for (link = ha->dev[index].first; link != NULL;
6212 		    link = link->next) {
6213 			tq = link->base_address;
6214 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6215 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6216 				continue;	/* Skip this one */
6217 			}
6218 
6219 			bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6220 
6221 			bcopy(tq->node_name, ddep->NodeWWN, 8);
6222 			bcopy(tq->port_name, ddep->PortWWN, 8);
6223 
6224 			ddep->PortID[0] = tq->d_id.b.domain;
6225 			ddep->PortID[1] = tq->d_id.b.area;
6226 			ddep->PortID[2] = tq->d_id.b.al_pa;
6227 
6228 			bcopy(tq->port_name,
6229 			    (caddr_t)&ddep->TargetAddress.Target, 8);
6230 
6231 			ddep->DeviceFlags = tq->flags;
6232 			ddep->LoopID = tq->loop_id;
6233 			QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, "
6234 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6235 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6236 			    ha->instance, ddep->TargetAddress.Target,
6237 			    ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6238 			    ddep->NodeWWN[2], ddep->NodeWWN[3],
6239 			    ddep->NodeWWN[4], ddep->NodeWWN[5],
6240 			    ddep->NodeWWN[6], ddep->NodeWWN[7],
6241 			    ddep->PortWWN[0], ddep->PortWWN[1],
6242 			    ddep->PortWWN[2], ddep->PortWWN[3],
6243 			    ddep->PortWWN[4], ddep->PortWWN[5],
6244 			    ddep->PortWWN[6], ddep->PortWWN[7]);
6245 			rval = ddi_copyout((void *)ddep, (void *)uddep,
6246 			    sizeof (EXT_DEVICEDATAENTRY), mode);
6247 
6248 			if (rval != 0) {
6249 				cmd->Status = EXT_STATUS_COPY_ERR;
6250 				cmd->ResponseLen = 0;
6251 				EL(ha, "failed, ddi_copyout\n");
6252 				break;
6253 			}
6254 			dd.ReturnListEntryCount++;
6255 			uddep++;
6256 			cmd->ResponseLen += (uint32_t)
6257 			    sizeof (EXT_DEVICEDATAENTRY);
6258 		}
6259 	}
6260 	rval = ddi_copyout((void *)&dd, (void *)uddp,
6261 	    sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6262 
6263 	if (rval != 0) {
6264 		cmd->Status = EXT_STATUS_COPY_ERR;
6265 		cmd->ResponseLen = 0;
6266 		EL(ha, "failed, ddi_copyout-2\n");
6267 	} else {
6268 		cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6269 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6270 	}
6271 }
6272 
6273 /*
6274  * ql_get_target_id
6275  *	Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6276  *
6277  * Input:
6278  *	ha:	adapter state pointer.
6279  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6280  *	mode:	flags.
6281  *
6282  * Returns:
6283  *	None, request status indicated in cmd->Status.
6284  *
6285  * Context:
6286  *	Kernel context.
6287  */
6288 static void
6289 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6290 {
6291 	uint32_t		rval;
6292 	uint16_t		qlnt;
6293 	EXT_DEST_ADDR		extdestaddr = {0};
6294 	uint8_t			*name;
6295 	uint8_t			wwpn[EXT_DEF_WWN_NAME_SIZE];
6296 	ql_tgt_t		*tq;
6297 
6298 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6299 
6300 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6301 	    (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) {
6302 		EL(ha, "failed, ddi_copyin\n");
6303 		cmd->Status = EXT_STATUS_COPY_ERR;
6304 		cmd->ResponseLen = 0;
6305 		return;
6306 	}
6307 
6308 	qlnt = QLNT_PORT;
6309 	name = wwpn;
6310 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6311 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
6312 	    name[5], name[6], name[7]);
6313 
6314 	tq = ql_find_port(ha, name, qlnt);
6315 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6316 		EL(ha, "failed, fc_port not found\n");
6317 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6318 		cmd->ResponseLen = 0;
6319 		return;
6320 	}
6321 
6322 	bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6323 
6324 	rval = ddi_copyout((void *)&extdestaddr,
6325 	    (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode);
6326 	if (rval != 0) {
6327 		EL(ha, "failed, ddi_copyout\n");
6328 		cmd->Status = EXT_STATUS_COPY_ERR;
6329 		cmd->ResponseLen = 0;
6330 	}
6331 
6332 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6333 }
6334 
6335 /*
6336  * ql_setup_fcache
6337  *	Populates selected flash sections into the cache
6338  *
6339  * Input:
6340  *	ha = adapter state pointer.
6341  *
6342  * Returns:
6343  *	ql local function return status code.
6344  *
6345  * Context:
6346  *	Kernel context.
6347  *
6348  * Note:
6349  *	Driver must be in stalled state prior to entering or
6350  *	add code to this function prior to calling ql_setup_flash()
6351  */
6352 int
6353 ql_setup_fcache(ql_adapter_state_t *ha)
6354 {
6355 	int		rval;
6356 	uint32_t	freadpos = 0;
6357 	uint32_t	fw_done = 0;
6358 	ql_fcache_t	*head = NULL;
6359 	ql_fcache_t	*tail = NULL;
6360 	ql_fcache_t	*ftmp;
6361 
6362 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6363 
6364 	CACHE_LOCK(ha);
6365 
6366 	/* If we already have populated it, rtn */
6367 	if (ha->fcache != NULL) {
6368 		CACHE_UNLOCK(ha);
6369 		EL(ha, "buffer already populated\n");
6370 		return (QL_SUCCESS);
6371 	}
6372 
6373 	ql_flash_nvram_defaults(ha);
6374 
6375 	if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6376 		CACHE_UNLOCK(ha);
6377 		EL(ha, "unable to setup flash; rval=%xh\n", rval);
6378 		return (rval);
6379 	}
6380 
6381 	while (freadpos != 0xffffffff) {
6382 
6383 		if (CFG_IST(ha, CFG_CTRL_8021)) {
6384 			EL(ha, "8021 flash header break\n");
6385 			ql_process_flt(ha, FLASH_8021_LAYOUT_TABLE << 2);
6386 			(void) ql_24xx_flash_desc(ha);
6387 			rval = QL_NOT_SUPPORTED;
6388 			break;
6389 		}
6390 		/* Allocate & populate this node */
6391 
6392 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6393 			EL(ha, "node alloc failed\n");
6394 			rval = QL_FUNCTION_FAILED;
6395 			break;
6396 		}
6397 
6398 		/* link in the new node */
6399 		if (head == NULL) {
6400 			head = tail = ftmp;
6401 		} else {
6402 			tail->next = ftmp;
6403 			tail = ftmp;
6404 		}
6405 
6406 		/* Do the firmware node first for 24xx/25xx's */
6407 		if (fw_done == 0) {
6408 			if (CFG_IST(ha, CFG_CTRL_24258081)) {
6409 				freadpos = ha->flash_fw_addr << 2;
6410 			}
6411 			fw_done = 1;
6412 		}
6413 
6414 		if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6415 		    freadpos)) != QL_SUCCESS) {
6416 			EL(ha, "failed, 24xx dump_fcode"
6417 			    " pos=%xh rval=%xh\n", freadpos, rval);
6418 			rval = QL_FUNCTION_FAILED;
6419 			break;
6420 		}
6421 
6422 		/* checkout the pci data / format */
6423 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6424 			EL(ha, "flash header incorrect\n");
6425 			rval = QL_FUNCTION_FAILED;
6426 			break;
6427 		}
6428 	}
6429 
6430 	if (rval != QL_SUCCESS) {
6431 		/* release all resources we have */
6432 		ftmp = head;
6433 		while (ftmp != NULL) {
6434 			tail = ftmp->next;
6435 			kmem_free(ftmp->buf, FBUFSIZE);
6436 			kmem_free(ftmp, sizeof (ql_fcache_t));
6437 			ftmp = tail;
6438 		}
6439 
6440 		EL(ha, "failed, done\n");
6441 	} else {
6442 		ha->fcache = head;
6443 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6444 	}
6445 	CACHE_UNLOCK(ha);
6446 
6447 	return (rval);
6448 }
6449 
6450 /*
6451  * ql_update_fcache
6452  *	re-populates updated flash into the fcache. If
6453  *	fcache does not exist (e.g., flash was empty/invalid on
6454  *	boot), this routine will create and the populate it.
6455  *
6456  * Input:
6457  *	ha	= adapter state pointer.
6458  *	*bpf 	= Pointer to flash buffer.
6459  *	bsize	= Size of flash buffer.
6460  *
6461  * Returns:
6462  *
6463  * Context:
6464  *	Kernel context.
6465  */
6466 void
6467 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6468 {
6469 	int		rval = QL_SUCCESS;
6470 	uint32_t	freadpos = 0;
6471 	uint32_t	fw_done = 0;
6472 	ql_fcache_t	*head = NULL;
6473 	ql_fcache_t	*tail = NULL;
6474 	ql_fcache_t	*ftmp;
6475 
6476 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6477 
6478 	while (freadpos != 0xffffffff) {
6479 
6480 		/* Allocate & populate this node */
6481 
6482 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6483 			EL(ha, "node alloc failed\n");
6484 			rval = QL_FUNCTION_FAILED;
6485 			break;
6486 		}
6487 
6488 		/* link in the new node */
6489 		if (head == NULL) {
6490 			head = tail = ftmp;
6491 		} else {
6492 			tail->next = ftmp;
6493 			tail = ftmp;
6494 		}
6495 
6496 		/* Do the firmware node first for 24xx's */
6497 		if (fw_done == 0) {
6498 			if (CFG_IST(ha, CFG_CTRL_24258081)) {
6499 				freadpos = ha->flash_fw_addr << 2;
6500 			}
6501 			fw_done = 1;
6502 		}
6503 
6504 		/* read in first FBUFSIZE bytes of this flash section */
6505 		if (freadpos+FBUFSIZE > bsize) {
6506 			EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6507 			    freadpos, bsize);
6508 			rval = QL_FUNCTION_FAILED;
6509 			break;
6510 		}
6511 		bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE);
6512 
6513 		/* checkout the pci data / format */
6514 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6515 			EL(ha, "flash header incorrect\n");
6516 			rval = QL_FUNCTION_FAILED;
6517 			break;
6518 		}
6519 	}
6520 
6521 	if (rval != QL_SUCCESS) {
6522 		/*
6523 		 * release all resources we have
6524 		 */
6525 		ql_fcache_rel(head);
6526 		EL(ha, "failed, done\n");
6527 	} else {
6528 		/*
6529 		 * Release previous fcache resources and update with new
6530 		 */
6531 		CACHE_LOCK(ha);
6532 		ql_fcache_rel(ha->fcache);
6533 		ha->fcache = head;
6534 		CACHE_UNLOCK(ha);
6535 
6536 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6537 	}
6538 }
6539 
6540 /*
6541  * ql_setup_fnode
6542  *	Allocates fcache node
6543  *
6544  * Input:
6545  *	ha = adapter state pointer.
6546  *	node = point to allocated fcache node (NULL = failed)
6547  *
6548  * Returns:
6549  *
6550  * Context:
6551  *	Kernel context.
6552  *
6553  * Note:
6554  *	Driver must be in stalled state prior to entering or
6555  *	add code to this function prior to calling ql_setup_flash()
6556  */
6557 static ql_fcache_t *
6558 ql_setup_fnode(ql_adapter_state_t *ha)
6559 {
6560 	ql_fcache_t	*fnode = NULL;
6561 
6562 	if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t),
6563 	    KM_SLEEP))) == NULL) {
6564 		EL(ha, "fnode alloc failed\n");
6565 		fnode = NULL;
6566 	} else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE,
6567 	    KM_SLEEP))) == NULL) {
6568 		EL(ha, "buf alloc failed\n");
6569 		kmem_free(fnode, sizeof (ql_fcache_t));
6570 		fnode = NULL;
6571 	} else {
6572 		fnode->buflen = FBUFSIZE;
6573 	}
6574 
6575 	return (fnode);
6576 }
6577 
6578 /*
6579  * ql_fcache_rel
6580  *	Releases the fcache resources
6581  *
6582  * Input:
6583  *	ha	= adapter state pointer.
6584  *	head	= Pointer to fcache linked list
6585  *
6586  * Returns:
6587  *
6588  * Context:
6589  *	Kernel context.
6590  *
6591  */
6592 void
6593 ql_fcache_rel(ql_fcache_t *head)
6594 {
6595 	ql_fcache_t	*ftmp = head;
6596 	ql_fcache_t	*tail;
6597 
6598 	/* release all resources we have */
6599 	while (ftmp != NULL) {
6600 		tail = ftmp->next;
6601 		kmem_free(ftmp->buf, FBUFSIZE);
6602 		kmem_free(ftmp, sizeof (ql_fcache_t));
6603 		ftmp = tail;
6604 	}
6605 }
6606 
6607 /*
6608  * ql_update_flash_caches
6609  *	Updates driver flash caches
6610  *
6611  * Input:
6612  *	ha:	adapter state pointer.
6613  *
6614  * Context:
6615  *	Kernel context.
6616  */
6617 static void
6618 ql_update_flash_caches(ql_adapter_state_t *ha)
6619 {
6620 	uint32_t		len;
6621 	ql_link_t		*link;
6622 	ql_adapter_state_t	*ha2;
6623 
6624 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6625 
6626 	/* Get base path length. */
6627 	for (len = (uint32_t)strlen(ha->devpath); len; len--) {
6628 		if (ha->devpath[len] == ',' ||
6629 		    ha->devpath[len] == '@') {
6630 			break;
6631 		}
6632 	}
6633 
6634 	/* Reset fcache on all adapter instances. */
6635 	for (link = ql_hba.first; link != NULL; link = link->next) {
6636 		ha2 = link->base_address;
6637 
6638 		if (strncmp(ha->devpath, ha2->devpath, len) != 0) {
6639 			continue;
6640 		}
6641 
6642 		CACHE_LOCK(ha2);
6643 		ql_fcache_rel(ha2->fcache);
6644 		ha2->fcache = NULL;
6645 
6646 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
6647 			if (ha2->vcache != NULL) {
6648 				kmem_free(ha2->vcache, QL_24XX_VPD_SIZE);
6649 				ha2->vcache = NULL;
6650 			}
6651 		}
6652 		CACHE_UNLOCK(ha2);
6653 
6654 		(void) ql_setup_fcache(ha2);
6655 	}
6656 
6657 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6658 }
6659 
6660 /*
6661  * ql_get_fbuf
6662  *	Search the fcache list for the type specified
6663  *
6664  * Input:
6665  *	fptr	= Pointer to fcache linked list
6666  *	ftype	= Type of image to be returned.
6667  *
6668  * Returns:
6669  *	Pointer to ql_fcache_t.
6670  *	NULL means not found.
6671  *
6672  * Context:
6673  *	Kernel context.
6674  *
6675  *
6676  */
6677 ql_fcache_t *
6678 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype)
6679 {
6680 	while (fptr != NULL) {
6681 		/* does this image meet criteria? */
6682 		if (ftype & fptr->type) {
6683 			break;
6684 		}
6685 		fptr = fptr->next;
6686 	}
6687 	return (fptr);
6688 }
6689 
6690 /*
6691  * ql_check_pci
6692  *
6693  *	checks the passed buffer for a valid pci signature and
6694  *	expected (and in range) pci length values.
6695  *
6696  *	For firmware type, a pci header is added since the image in
6697  *	the flash does not have one (!!!).
6698  *
6699  *	On successful pci check, nextpos adjusted to next pci header.
6700  *
6701  * Returns:
6702  *	-1 --> last pci image
6703  *	0 --> pci header valid
6704  *	1 --> pci header invalid.
6705  *
6706  * Context:
6707  *	Kernel context.
6708  */
6709 static int
6710 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
6711 {
6712 	pci_header_t	*pcih;
6713 	pci_data_t	*pcid;
6714 	uint32_t	doff;
6715 	uint8_t		*pciinfo;
6716 
6717 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6718 
6719 	if (fcache != NULL) {
6720 		pciinfo = fcache->buf;
6721 	} else {
6722 		EL(ha, "failed, null fcache ptr passed\n");
6723 		return (1);
6724 	}
6725 
6726 	if (pciinfo == NULL) {
6727 		EL(ha, "failed, null pciinfo ptr passed\n");
6728 		return (1);
6729 	}
6730 
6731 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
6732 		caddr_t	bufp;
6733 		uint_t	len;
6734 
6735 		if (pciinfo[0] != SBUS_CODE_FCODE) {
6736 			EL(ha, "failed, unable to detect sbus fcode\n");
6737 			return (1);
6738 		}
6739 		fcache->type = FTYPE_FCODE;
6740 
6741 		/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
6742 		if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
6743 		    PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
6744 		    DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
6745 		    (int *)&len) == DDI_PROP_SUCCESS) {
6746 
6747 			(void) snprintf(fcache->verstr,
6748 			    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
6749 			kmem_free(bufp, len);
6750 		}
6751 
6752 		*nextpos = 0xffffffff;
6753 
6754 		QL_PRINT_9(CE_CONT, "(%d): CFG_SBUS_CARD, done\n",
6755 		    ha->instance);
6756 
6757 		return (0);
6758 	}
6759 
6760 	if (*nextpos == ha->flash_fw_addr << 2) {
6761 
6762 		pci_header_t	fwh = {0};
6763 		pci_data_t	fwd = {0};
6764 		uint8_t		*buf, *bufp;
6765 
6766 		/*
6767 		 * Build a pci header for the firmware module
6768 		 */
6769 		if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
6770 		    NULL) {
6771 			EL(ha, "failed, unable to allocate buffer\n");
6772 			return (1);
6773 		}
6774 
6775 		fwh.signature[0] = PCI_HEADER0;
6776 		fwh.signature[1] = PCI_HEADER1;
6777 		fwh.dataoffset[0] = LSB(sizeof (pci_header_t));
6778 		fwh.dataoffset[1] = MSB(sizeof (pci_header_t));
6779 
6780 		fwd.signature[0] = 'P';
6781 		fwd.signature[1] = 'C';
6782 		fwd.signature[2] = 'I';
6783 		fwd.signature[3] = 'R';
6784 		fwd.codetype = PCI_CODE_FW;
6785 		fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
6786 		fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
6787 
6788 		bufp = buf;
6789 		bcopy(&fwh, bufp, sizeof (pci_header_t));
6790 		bufp += sizeof (pci_header_t);
6791 		bcopy(&fwd, bufp, sizeof (pci_data_t));
6792 		bufp += sizeof (pci_data_t);
6793 
6794 		bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
6795 		    sizeof (pci_data_t)));
6796 		bcopy(buf, fcache->buf, FBUFSIZE);
6797 
6798 		fcache->type = FTYPE_FW;
6799 
6800 		(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6801 		    "%d.%02d.%02d", fcache->buf[19], fcache->buf[23],
6802 		    fcache->buf[27]);
6803 
6804 		*nextpos = CFG_IST(ha, CFG_CTRL_8081) ? 0x200000 : 0;
6805 		kmem_free(buf, FBUFSIZE);
6806 
6807 		QL_PRINT_9(CE_CONT, "(%d): FTYPE_FW, done\n", ha->instance);
6808 
6809 		return (0);
6810 	}
6811 
6812 	/* get to the pci header image length */
6813 	pcih = (pci_header_t *)pciinfo;
6814 
6815 	doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
6816 
6817 	/* some header section sanity check */
6818 	if (pcih->signature[0] != PCI_HEADER0 ||
6819 	    pcih->signature[1] != PCI_HEADER1 || doff > 50) {
6820 		EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
6821 		    pcih->signature[0], pcih->signature[1], doff);
6822 		return (1);
6823 	}
6824 
6825 	pcid = (pci_data_t *)(pciinfo + doff);
6826 
6827 	/* a slight sanity data section check */
6828 	if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
6829 	    pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
6830 		EL(ha, "failed, data sig mismatch!\n");
6831 		return (1);
6832 	}
6833 
6834 	if (pcid->indicator == PCI_IND_LAST_IMAGE) {
6835 		EL(ha, "last image\n");
6836 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
6837 			ql_flash_layout_table(ha, *nextpos +
6838 			    (pcid->imagelength[0] | (pcid->imagelength[1] <<
6839 			    8)) * PCI_SECTOR_SIZE);
6840 			(void) ql_24xx_flash_desc(ha);
6841 		}
6842 		*nextpos = 0xffffffff;
6843 	} else {
6844 		/* adjust the next flash read start position */
6845 		*nextpos += (pcid->imagelength[0] |
6846 		    (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
6847 	}
6848 
6849 	switch (pcid->codetype) {
6850 	case PCI_CODE_X86PC:
6851 		fcache->type = FTYPE_BIOS;
6852 		break;
6853 	case PCI_CODE_FCODE:
6854 		fcache->type = FTYPE_FCODE;
6855 		break;
6856 	case PCI_CODE_EFI:
6857 		fcache->type = FTYPE_EFI;
6858 		break;
6859 	case PCI_CODE_HPPA:
6860 		fcache->type = FTYPE_HPPA;
6861 		break;
6862 	default:
6863 		fcache->type = FTYPE_UNKNOWN;
6864 		break;
6865 	}
6866 
6867 	(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6868 	    "%d.%02d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
6869 
6870 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6871 
6872 	return (0);
6873 }
6874 
6875 /*
6876  * ql_flash_layout_table
6877  *	Obtains flash addresses from table
6878  *
6879  * Input:
6880  *	ha:		adapter state pointer.
6881  *	flt_paddr:	flash layout pointer address.
6882  *
6883  * Context:
6884  *	Kernel context.
6885  */
6886 static void
6887 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr)
6888 {
6889 	ql_flt_ptr_t	*fptr;
6890 	uint8_t		*bp;
6891 	int		rval;
6892 	uint32_t	len, faddr, cnt;
6893 	uint16_t	chksum, w16;
6894 
6895 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6896 
6897 	/* Process flash layout table header */
6898 	len = sizeof (ql_flt_ptr_t);
6899 	if ((bp = kmem_zalloc(len, KM_SLEEP)) == NULL) {
6900 		EL(ha, "kmem_zalloc=null\n");
6901 		return;
6902 	}
6903 
6904 	/* Process pointer to flash layout table */
6905 	if ((rval = ql_dump_fcode(ha, bp, len, flt_paddr)) != QL_SUCCESS) {
6906 		EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr,
6907 		    rval);
6908 		kmem_free(bp, len);
6909 		return;
6910 	}
6911 	fptr = (ql_flt_ptr_t *)bp;
6912 
6913 	/* Verify pointer to flash layout table. */
6914 	for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
6915 		w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6916 		chksum += w16;
6917 	}
6918 	if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' ||
6919 	    fptr->sig[2] != 'L' || fptr->sig[3] != 'T') {
6920 		EL(ha, "ptr chksum=%xh, sig=%c%c%c%c\n", chksum, fptr->sig[0],
6921 		    fptr->sig[1], fptr->sig[2], fptr->sig[3]);
6922 		kmem_free(bp, len);
6923 		return;
6924 	}
6925 	faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2],
6926 	    fptr->addr[3]);
6927 
6928 	kmem_free(bp, len);
6929 
6930 	ql_process_flt(ha, faddr);
6931 
6932 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6933 }
6934 
6935 /*
6936  * ql_process_flt
6937  *	Obtains flash addresses from flash layout table
6938  *
6939  * Input:
6940  *	ha:	adapter state pointer.
6941  *	faddr:	flash layout table byte address.
6942  *
6943  * Context:
6944  *	Kernel context.
6945  */
6946 static void
6947 ql_process_flt(ql_adapter_state_t *ha, uint32_t faddr)
6948 {
6949 	ql_flt_hdr_t	*fhdr;
6950 	ql_flt_region_t	*frgn;
6951 	uint8_t		*bp, *eaddr;
6952 	int		rval;
6953 	uint32_t	len, cnt, fe_addr;
6954 	uint16_t	chksum, w16;
6955 
6956 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6957 
6958 	/* Process flash layout table header */
6959 	if ((bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP)) == NULL) {
6960 		EL(ha, "kmem_zalloc=null\n");
6961 		return;
6962 	}
6963 	fhdr = (ql_flt_hdr_t *)bp;
6964 
6965 	/* Process flash layout table. */
6966 	if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) !=
6967 	    QL_SUCCESS) {
6968 		EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval);
6969 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6970 		return;
6971 	}
6972 	fhdr = (ql_flt_hdr_t *)bp;
6973 
6974 	/* Verify flash layout table. */
6975 	len = (uint16_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) +
6976 	    sizeof (ql_flt_hdr_t));
6977 	if (len > FLASH_LAYOUT_TABLE_SIZE) {
6978 		chksum = 0xffff;
6979 	} else {
6980 		for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
6981 			w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6982 			chksum += w16;
6983 		}
6984 	}
6985 	w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]);
6986 	if (chksum != 0 || w16 != 1) {
6987 		EL(ha, "table chksum=%xh, version=%d\n", chksum, w16);
6988 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6989 		return;
6990 	}
6991 
6992 	/* Process flash layout table regions */
6993 	eaddr = bp + sizeof (ql_flt_hdr_t) + len;
6994 	for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t));
6995 	    (uint8_t *)frgn < eaddr; frgn++) {
6996 		faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1],
6997 		    frgn->beg_addr[2], frgn->beg_addr[3]);
6998 		faddr >>= 2;
6999 		fe_addr = CHAR_TO_LONG(frgn->end_addr[0], frgn->end_addr[1],
7000 		    frgn->end_addr[2], frgn->end_addr[3]);
7001 		fe_addr >>= 2;
7002 
7003 		switch (frgn->region) {
7004 		case FLASH_8021_BOOTLOADER_REGION:
7005 			ha->bootloader_addr = faddr;
7006 			ha->bootloader_size = (fe_addr - faddr) + 1;
7007 			QL_PRINT_9(CE_CONT, "(%d): bootloader_addr=%xh, "
7008 			    "size=%xh\n", ha->instance, faddr,
7009 			    ha->bootloader_size);
7010 			break;
7011 		case FLASH_FW_REGION:
7012 		case FLASH_8021_FW_REGION:
7013 			ha->flash_fw_addr = faddr;
7014 			ha->flash_fw_size = (fe_addr - faddr) + 1;
7015 			QL_PRINT_9(CE_CONT, "(%d): flash_fw_addr=%xh, "
7016 			    "size=%xh\n", ha->instance, faddr,
7017 			    ha->flash_fw_size);
7018 			break;
7019 		case FLASH_GOLDEN_FW_REGION:
7020 		case FLASH_8021_GOLDEN_FW_REGION:
7021 			ha->flash_golden_fw_addr = faddr;
7022 			QL_PRINT_9(CE_CONT, "(%d): flash_golden_fw_addr=%xh\n",
7023 			    ha->instance, faddr);
7024 			break;
7025 		case FLASH_8021_VPD_REGION:
7026 			ha->flash_vpd_addr = faddr;
7027 			QL_PRINT_9(CE_CONT, "(%d): 8021_flash_vpd_addr=%xh\n",
7028 			    ha->instance, faddr);
7029 			break;
7030 		case FLASH_VPD_0_REGION:
7031 			if (!(ha->flags & FUNCTION_1) &&
7032 			    !(CFG_IST(ha, CFG_CTRL_8021))) {
7033 				ha->flash_vpd_addr = faddr;
7034 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
7035 				    "\n", ha->instance, faddr);
7036 			}
7037 			break;
7038 		case FLASH_NVRAM_0_REGION:
7039 			if (!(ha->flags & FUNCTION_1)) {
7040 				ha->flash_nvram_addr = faddr;
7041 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
7042 				    "%xh\n", ha->instance, faddr);
7043 			}
7044 			break;
7045 		case FLASH_VPD_1_REGION:
7046 			if (ha->flags & FUNCTION_1 &&
7047 			    !(CFG_IST(ha, CFG_CTRL_8021))) {
7048 				ha->flash_vpd_addr = faddr;
7049 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
7050 				    "\n", ha->instance, faddr);
7051 			}
7052 			break;
7053 		case FLASH_NVRAM_1_REGION:
7054 			if (ha->flags & FUNCTION_1) {
7055 				ha->flash_nvram_addr = faddr;
7056 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
7057 				    "%xh\n", ha->instance, faddr);
7058 			}
7059 			break;
7060 		case FLASH_DESC_TABLE_REGION:
7061 			if (!(CFG_IST(ha, CFG_CTRL_8021))) {
7062 				ha->flash_desc_addr = faddr;
7063 				QL_PRINT_9(CE_CONT, "(%d): flash_desc_addr="
7064 				    "%xh\n", ha->instance, faddr);
7065 			}
7066 			break;
7067 		case FLASH_ERROR_LOG_0_REGION:
7068 			if (!(ha->flags & FUNCTION_1)) {
7069 				ha->flash_errlog_start = faddr;
7070 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
7071 				    "%xh\n", ha->instance, faddr);
7072 			}
7073 			break;
7074 		case FLASH_ERROR_LOG_1_REGION:
7075 			if (ha->flags & FUNCTION_1) {
7076 				ha->flash_errlog_start = faddr;
7077 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
7078 				    "%xh\n", ha->instance, faddr);
7079 			}
7080 			break;
7081 		default:
7082 			break;
7083 		}
7084 	}
7085 	kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7086 
7087 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7088 }
7089 
7090 /*
7091  * ql_flash_nvram_defaults
7092  *	Flash default addresses.
7093  *
7094  * Input:
7095  *	ha:		adapter state pointer.
7096  *
7097  * Returns:
7098  *	ql local function return status code.
7099  *
7100  * Context:
7101  *	Kernel context.
7102  */
7103 static void
7104 ql_flash_nvram_defaults(ql_adapter_state_t *ha)
7105 {
7106 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7107 
7108 	if (ha->flags & FUNCTION_1) {
7109 		if (CFG_IST(ha, CFG_CTRL_2300)) {
7110 			ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR;
7111 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7112 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
7113 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7114 			ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR;
7115 			ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR;
7116 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1;
7117 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7118 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7119 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7120 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7121 			ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR;
7122 			ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR;
7123 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1;
7124 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7125 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7126 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7127 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7128 			ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR;
7129 			ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR;
7130 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1;
7131 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7132 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7133 		} else if (CFG_IST(ha, CFG_CTRL_8021)) {
7134 			ha->flash_data_addr = 0;
7135 			ha->flash_nvram_addr = NVRAM_8021_FUNC1_ADDR;
7136 			ha->flash_vpd_addr = VPD_8021_FUNC1_ADDR;
7137 			ha->flash_errlog_start = 0;
7138 			ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7139 			ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7140 			ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7141 			ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7142 			ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7143 		} else {
7144 			EL(ha, "unassigned flash fn1 addr: %x\n",
7145 			    ha->device_id);
7146 		}
7147 	} else {
7148 		if (CFG_IST(ha, CFG_CTRL_2200)) {
7149 			ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR;
7150 			ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR;
7151 		} else if (CFG_IST(ha, CFG_CTRL_2300) ||
7152 		    (CFG_IST(ha, CFG_CTRL_6322))) {
7153 			ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR;
7154 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7155 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
7156 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7157 			ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR;
7158 			ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR;
7159 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0;
7160 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7161 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7162 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7163 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7164 			ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR;
7165 			ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR;
7166 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0;
7167 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7168 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7169 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7170 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7171 			ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR;
7172 			ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR;
7173 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0;
7174 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7175 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7176 		} else if (CFG_IST(ha, CFG_CTRL_8021)) {
7177 			ha->flash_data_addr = 0;
7178 			ha->flash_nvram_addr = NVRAM_8021_FUNC0_ADDR;
7179 			ha->flash_vpd_addr = VPD_8021_FUNC0_ADDR;
7180 			ha->flash_errlog_start = 0;
7181 			ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7182 			ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7183 			ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7184 			ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7185 			ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7186 		} else {
7187 			EL(ha, "unassigned flash fn0 addr: %x\n",
7188 			    ha->device_id);
7189 		}
7190 	}
7191 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7192 }
7193 
7194 /*
7195  * ql_get_sfp
7196  *	Returns sfp data to sdmapi caller
7197  *
7198  * Input:
7199  *	ha:	adapter state pointer.
7200  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7201  *	mode:	flags.
7202  *
7203  * Returns:
7204  *	None, request status indicated in cmd->Status.
7205  *
7206  * Context:
7207  *	Kernel context.
7208  */
7209 static void
7210 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7211 {
7212 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7213 
7214 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
7215 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7216 		EL(ha, "failed, invalid request for HBA\n");
7217 		return;
7218 	}
7219 
7220 	if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
7221 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7222 		cmd->DetailStatus = QL_24XX_SFP_SIZE;
7223 		EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
7224 		    cmd->ResponseLen);
7225 		return;
7226 	}
7227 
7228 	/* Dump SFP data in user buffer */
7229 	if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7230 	    mode)) != 0) {
7231 		cmd->Status = EXT_STATUS_COPY_ERR;
7232 		EL(ha, "failed, copy error\n");
7233 	} else {
7234 		cmd->Status = EXT_STATUS_OK;
7235 	}
7236 
7237 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7238 }
7239 
7240 /*
7241  * ql_dump_sfp
7242  *	Dumps SFP.
7243  *
7244  * Input:
7245  *	ha:	adapter state pointer.
7246  *	bp:	buffer address.
7247  *	mode:	flags
7248  *
7249  * Returns:
7250  *
7251  * Context:
7252  *	Kernel context.
7253  */
7254 static int
7255 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
7256 {
7257 	dma_mem_t	mem;
7258 	uint32_t	cnt;
7259 	int		rval2, rval = 0;
7260 	uint32_t	dxfer;
7261 
7262 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7263 
7264 	/* Get memory for SFP. */
7265 
7266 	if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
7267 	    QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
7268 		EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
7269 		return (ENOMEM);
7270 	}
7271 
7272 	for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
7273 		rval2 = ql_read_sfp(ha, &mem,
7274 		    (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
7275 		    (uint16_t)(cnt & 0xff));
7276 		if (rval2 != QL_SUCCESS) {
7277 			EL(ha, "failed, read_sfp=%xh\n", rval2);
7278 			rval = EFAULT;
7279 			break;
7280 		}
7281 
7282 		/* copy the data back */
7283 		if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
7284 		    mode)) != mem.size) {
7285 			/* ddi copy error */
7286 			EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
7287 			rval = EFAULT;
7288 			break;
7289 		}
7290 
7291 		/* adjust the buffer pointer */
7292 		bp = (caddr_t)bp + mem.size;
7293 	}
7294 
7295 	ql_free_phys(ha, &mem);
7296 
7297 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7298 
7299 	return (rval);
7300 }
7301 
7302 /*
7303  * ql_port_param
7304  *	Retrieves or sets the firmware port speed settings
7305  *
7306  * Input:
7307  *	ha:	adapter state pointer.
7308  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7309  *	mode:	flags.
7310  *
7311  * Returns:
7312  *	None, request status indicated in cmd->Status.
7313  *
7314  * Context:
7315  *	Kernel context.
7316  *
7317  */
7318 static void
7319 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7320 {
7321 	uint8_t			*name;
7322 	ql_tgt_t		*tq;
7323 	EXT_PORT_PARAM		port_param = {0};
7324 	uint32_t		rval = QL_SUCCESS;
7325 	uint32_t		idma_rate;
7326 
7327 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7328 
7329 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
7330 		EL(ha, "invalid request for this HBA\n");
7331 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7332 		cmd->ResponseLen = 0;
7333 		return;
7334 	}
7335 
7336 	if (LOOP_NOT_READY(ha)) {
7337 		EL(ha, "failed, loop not ready\n");
7338 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
7339 		cmd->ResponseLen = 0;
7340 		return;
7341 	}
7342 
7343 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7344 	    (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) {
7345 		EL(ha, "failed, ddi_copyin\n");
7346 		cmd->Status = EXT_STATUS_COPY_ERR;
7347 		cmd->ResponseLen = 0;
7348 		return;
7349 	}
7350 
7351 	if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
7352 		EL(ha, "Unsupported dest lookup type: %xh\n",
7353 		    port_param.FCScsiAddr.DestType);
7354 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7355 		cmd->ResponseLen = 0;
7356 		return;
7357 	}
7358 
7359 	name = port_param.FCScsiAddr.DestAddr.WWPN;
7360 
7361 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
7362 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
7363 	    name[5], name[6], name[7]);
7364 
7365 	tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
7366 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
7367 		EL(ha, "failed, fc_port not found\n");
7368 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7369 		cmd->ResponseLen = 0;
7370 		return;
7371 	}
7372 
7373 	cmd->Status = EXT_STATUS_OK;
7374 	cmd->DetailStatus = EXT_STATUS_OK;
7375 
7376 	switch (port_param.Mode) {
7377 	case EXT_IIDMA_MODE_GET:
7378 		/*
7379 		 * Report the firmware's port rate for the wwpn
7380 		 */
7381 		rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7382 		    port_param.Mode);
7383 
7384 		if (rval != QL_SUCCESS) {
7385 			EL(ha, "iidma get failed: %xh\n", rval);
7386 			cmd->Status = EXT_STATUS_MAILBOX;
7387 			cmd->DetailStatus = rval;
7388 			cmd->ResponseLen = 0;
7389 		} else {
7390 			switch (idma_rate) {
7391 			case IIDMA_RATE_1GB:
7392 				port_param.Speed =
7393 				    EXT_DEF_PORTSPEED_1GBIT;
7394 				break;
7395 			case IIDMA_RATE_2GB:
7396 				port_param.Speed =
7397 				    EXT_DEF_PORTSPEED_2GBIT;
7398 				break;
7399 			case IIDMA_RATE_4GB:
7400 				port_param.Speed =
7401 				    EXT_DEF_PORTSPEED_4GBIT;
7402 				break;
7403 			case IIDMA_RATE_8GB:
7404 				port_param.Speed =
7405 				    EXT_DEF_PORTSPEED_8GBIT;
7406 				break;
7407 			case IIDMA_RATE_10GB:
7408 				port_param.Speed =
7409 				    EXT_DEF_PORTSPEED_10GBIT;
7410 				break;
7411 			default:
7412 				port_param.Speed =
7413 				    EXT_DEF_PORTSPEED_UNKNOWN;
7414 				EL(ha, "failed, Port speed rate=%xh\n",
7415 				    idma_rate);
7416 				break;
7417 			}
7418 
7419 			/* Copy back the data */
7420 			rval = ddi_copyout((void *)&port_param,
7421 			    (void *)(uintptr_t)cmd->ResponseAdr,
7422 			    sizeof (EXT_PORT_PARAM), mode);
7423 
7424 			if (rval != 0) {
7425 				cmd->Status = EXT_STATUS_COPY_ERR;
7426 				cmd->ResponseLen = 0;
7427 				EL(ha, "failed, ddi_copyout\n");
7428 			} else {
7429 				cmd->ResponseLen = (uint32_t)
7430 				    sizeof (EXT_PORT_PARAM);
7431 			}
7432 		}
7433 		break;
7434 
7435 	case EXT_IIDMA_MODE_SET:
7436 		/*
7437 		 * Set the firmware's port rate for the wwpn
7438 		 */
7439 		switch (port_param.Speed) {
7440 		case EXT_DEF_PORTSPEED_1GBIT:
7441 			idma_rate = IIDMA_RATE_1GB;
7442 			break;
7443 		case EXT_DEF_PORTSPEED_2GBIT:
7444 			idma_rate = IIDMA_RATE_2GB;
7445 			break;
7446 		case EXT_DEF_PORTSPEED_4GBIT:
7447 			idma_rate = IIDMA_RATE_4GB;
7448 			break;
7449 		case EXT_DEF_PORTSPEED_8GBIT:
7450 			idma_rate = IIDMA_RATE_8GB;
7451 			break;
7452 		case EXT_DEF_PORTSPEED_10GBIT:
7453 			port_param.Speed = IIDMA_RATE_10GB;
7454 			break;
7455 		default:
7456 			EL(ha, "invalid set iidma rate: %x\n",
7457 			    port_param.Speed);
7458 			cmd->Status = EXT_STATUS_INVALID_PARAM;
7459 			cmd->ResponseLen = 0;
7460 			rval = QL_PARAMETER_ERROR;
7461 			break;
7462 		}
7463 
7464 		if (rval == QL_SUCCESS) {
7465 			rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7466 			    port_param.Mode);
7467 			if (rval != QL_SUCCESS) {
7468 				EL(ha, "iidma set failed: %xh\n", rval);
7469 				cmd->Status = EXT_STATUS_MAILBOX;
7470 				cmd->DetailStatus = rval;
7471 				cmd->ResponseLen = 0;
7472 			}
7473 		}
7474 		break;
7475 	default:
7476 		EL(ha, "invalid mode specified: %x\n", port_param.Mode);
7477 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7478 		cmd->ResponseLen = 0;
7479 		cmd->DetailStatus = 0;
7480 		break;
7481 	}
7482 
7483 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7484 }
7485 
7486 /*
7487  * ql_get_fwexttrace
7488  *	Dumps f/w extended trace buffer
7489  *
7490  * Input:
7491  *	ha:	adapter state pointer.
7492  *	bp:	buffer address.
7493  *	mode:	flags
7494  *
7495  * Returns:
7496  *
7497  * Context:
7498  *	Kernel context.
7499  */
7500 /* ARGSUSED */
7501 static void
7502 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7503 {
7504 	int	rval;
7505 	caddr_t	payload;
7506 
7507 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7508 
7509 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
7510 		EL(ha, "invalid request for this HBA\n");
7511 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7512 		cmd->ResponseLen = 0;
7513 		return;
7514 	}
7515 
7516 	if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
7517 	    (ha->fwexttracebuf.bp == NULL)) {
7518 		EL(ha, "f/w extended trace is not enabled\n");
7519 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7520 		cmd->ResponseLen = 0;
7521 		return;
7522 	}
7523 
7524 	if (cmd->ResponseLen < FWEXTSIZE) {
7525 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7526 		cmd->DetailStatus = FWEXTSIZE;
7527 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n",
7528 		    cmd->ResponseLen, FWEXTSIZE);
7529 		cmd->ResponseLen = 0;
7530 		return;
7531 	}
7532 
7533 	/* Time Stamp */
7534 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP);
7535 	if (rval != QL_SUCCESS) {
7536 		EL(ha, "f/w extended trace insert"
7537 		    "time stamp failed: %xh\n", rval);
7538 		cmd->Status = EXT_STATUS_ERR;
7539 		cmd->ResponseLen = 0;
7540 		return;
7541 	}
7542 
7543 	/* Disable Tracing */
7544 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE);
7545 	if (rval != QL_SUCCESS) {
7546 		EL(ha, "f/w extended trace disable failed: %xh\n", rval);
7547 		cmd->Status = EXT_STATUS_ERR;
7548 		cmd->ResponseLen = 0;
7549 		return;
7550 	}
7551 
7552 	/* Allocate payload buffer */
7553 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7554 	if (payload == NULL) {
7555 		EL(ha, "failed, kmem_zalloc\n");
7556 		cmd->Status = EXT_STATUS_NO_MEMORY;
7557 		cmd->ResponseLen = 0;
7558 		return;
7559 	}
7560 
7561 	/* Sync DMA buffer. */
7562 	(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
7563 	    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
7564 
7565 	/* Copy trace buffer data. */
7566 	ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload,
7567 	    (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE,
7568 	    DDI_DEV_AUTOINCR);
7569 
7570 	/* Send payload to application. */
7571 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7572 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7573 		EL(ha, "failed, send_buffer_data\n");
7574 		cmd->Status = EXT_STATUS_COPY_ERR;
7575 		cmd->ResponseLen = 0;
7576 	} else {
7577 		cmd->Status = EXT_STATUS_OK;
7578 	}
7579 
7580 	kmem_free(payload, FWEXTSIZE);
7581 
7582 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7583 }
7584 
7585 /*
7586  * ql_get_fwfcetrace
7587  *	Dumps f/w fibre channel event trace buffer
7588  *
7589  * Input:
7590  *	ha:	adapter state pointer.
7591  *	bp:	buffer address.
7592  *	mode:	flags
7593  *
7594  * Returns:
7595  *
7596  * Context:
7597  *	Kernel context.
7598  */
7599 /* ARGSUSED */
7600 static void
7601 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7602 {
7603 	int	rval;
7604 	caddr_t	payload;
7605 
7606 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7607 
7608 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
7609 		EL(ha, "invalid request for this HBA\n");
7610 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7611 		cmd->ResponseLen = 0;
7612 		return;
7613 	}
7614 
7615 	if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
7616 	    (ha->fwfcetracebuf.bp == NULL)) {
7617 		EL(ha, "f/w FCE trace is not enabled\n");
7618 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7619 		cmd->ResponseLen = 0;
7620 		return;
7621 	}
7622 
7623 	if (cmd->ResponseLen < FWFCESIZE) {
7624 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7625 		cmd->DetailStatus = FWFCESIZE;
7626 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n",
7627 		    cmd->ResponseLen, FWFCESIZE);
7628 		cmd->ResponseLen = 0;
7629 		return;
7630 	}
7631 
7632 	/* Disable Tracing */
7633 	rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE);
7634 	if (rval != QL_SUCCESS) {
7635 		EL(ha, "f/w FCE trace disable failed: %xh\n", rval);
7636 		cmd->Status = EXT_STATUS_ERR;
7637 		cmd->ResponseLen = 0;
7638 		return;
7639 	}
7640 
7641 	/* Allocate payload buffer */
7642 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7643 	if (payload == NULL) {
7644 		EL(ha, "failed, kmem_zalloc\n");
7645 		cmd->Status = EXT_STATUS_NO_MEMORY;
7646 		cmd->ResponseLen = 0;
7647 		return;
7648 	}
7649 
7650 	/* Sync DMA buffer. */
7651 	(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
7652 	    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
7653 
7654 	/* Copy trace buffer data. */
7655 	ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)payload,
7656 	    (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE,
7657 	    DDI_DEV_AUTOINCR);
7658 
7659 	/* Send payload to application. */
7660 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7661 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7662 		EL(ha, "failed, send_buffer_data\n");
7663 		cmd->Status = EXT_STATUS_COPY_ERR;
7664 		cmd->ResponseLen = 0;
7665 	} else {
7666 		cmd->Status = EXT_STATUS_OK;
7667 	}
7668 
7669 	kmem_free(payload, FWFCESIZE);
7670 
7671 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7672 }
7673 
7674 /*
7675  * ql_get_pci_data
7676  *	Retrieves pci config space data
7677  *
7678  * Input:
7679  *	ha:	adapter state pointer.
7680  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7681  *	mode:	flags.
7682  *
7683  * Returns:
7684  *	None, request status indicated in cmd->Status.
7685  *
7686  * Context:
7687  *	Kernel context.
7688  *
7689  */
7690 static void
7691 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7692 {
7693 	uint8_t		cap_ptr;
7694 	uint8_t		cap_id;
7695 	uint32_t	buf_size = 256;
7696 
7697 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7698 
7699 	/*
7700 	 * First check the "Capabilities List" bit of the status register.
7701 	 */
7702 	if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
7703 		/*
7704 		 * Now get the capability pointer
7705 		 */
7706 		cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
7707 		while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
7708 			/*
7709 			 * Check for the pcie capability.
7710 			 */
7711 			cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
7712 			if (cap_id == PCI_CAP_ID_PCI_E) {
7713 				buf_size = 4096;
7714 				break;
7715 			}
7716 			cap_ptr = (uint8_t)ql_pci_config_get8(ha,
7717 			    (cap_ptr + PCI_CAP_NEXT_PTR));
7718 		}
7719 	}
7720 
7721 	if (cmd->ResponseLen < buf_size) {
7722 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7723 		cmd->DetailStatus = buf_size;
7724 		EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
7725 		    cmd->ResponseLen);
7726 		return;
7727 	}
7728 
7729 	/* Dump PCI config data. */
7730 	if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7731 	    buf_size, mode)) != 0) {
7732 		cmd->Status = EXT_STATUS_COPY_ERR;
7733 		cmd->DetailStatus = 0;
7734 		EL(ha, "failed, copy err pci_dump\n");
7735 	} else {
7736 		cmd->Status = EXT_STATUS_OK;
7737 		cmd->DetailStatus = buf_size;
7738 	}
7739 
7740 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7741 }
7742 
7743 /*
7744  * ql_pci_dump
7745  *	Dumps PCI config data to application buffer.
7746  *
7747  * Input:
7748  *	ha = adapter state pointer.
7749  *	bp = user buffer address.
7750  *
7751  * Returns:
7752  *
7753  * Context:
7754  *	Kernel context.
7755  */
7756 int
7757 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
7758 {
7759 	uint32_t	pci_os;
7760 	uint32_t	*ptr32, *org_ptr32;
7761 
7762 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7763 
7764 	ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
7765 	if (ptr32 == NULL) {
7766 		EL(ha, "failed kmem_zalloc\n");
7767 		return (ENOMEM);
7768 	}
7769 
7770 	/* store the initial value of ptr32 */
7771 	org_ptr32 = ptr32;
7772 	for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
7773 		*ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
7774 		LITTLE_ENDIAN_32(ptr32);
7775 		ptr32++;
7776 	}
7777 
7778 	if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
7779 	    0) {
7780 		EL(ha, "failed ddi_copyout\n");
7781 		kmem_free(org_ptr32, pci_size);
7782 		return (EFAULT);
7783 	}
7784 
7785 	QL_DUMP_9(org_ptr32, 8, pci_size);
7786 
7787 	kmem_free(org_ptr32, pci_size);
7788 
7789 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7790 
7791 	return (0);
7792 }
7793 
7794 /*
7795  * ql_menlo_reset
7796  *	Reset Menlo
7797  *
7798  * Input:
7799  *	ha:	adapter state pointer.
7800  *	bp:	buffer address.
7801  *	mode:	flags
7802  *
7803  * Returns:
7804  *
7805  * Context:
7806  *	Kernel context.
7807  */
7808 static void
7809 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7810 {
7811 	EXT_MENLO_RESET	rst;
7812 	ql_mbx_data_t	mr;
7813 	int		rval;
7814 
7815 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7816 
7817 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7818 		EL(ha, "failed, invalid request for HBA\n");
7819 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7820 		cmd->ResponseLen = 0;
7821 		return;
7822 	}
7823 
7824 	/*
7825 	 * TODO: only vp_index 0 can do this (?)
7826 	 */
7827 
7828 	/*  Verify the size of request structure. */
7829 	if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) {
7830 		/* Return error */
7831 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7832 		    sizeof (EXT_MENLO_RESET));
7833 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7834 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7835 		cmd->ResponseLen = 0;
7836 		return;
7837 	}
7838 
7839 	/* Get reset request. */
7840 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7841 	    (void *)&rst, sizeof (EXT_MENLO_RESET), mode) != 0) {
7842 		EL(ha, "failed, ddi_copyin\n");
7843 		cmd->Status = EXT_STATUS_COPY_ERR;
7844 		cmd->ResponseLen = 0;
7845 		return;
7846 	}
7847 
7848 	/* Wait for I/O to stop and daemon to stall. */
7849 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
7850 		EL(ha, "ql_stall_driver failed\n");
7851 		ql_restart_hba(ha);
7852 		cmd->Status = EXT_STATUS_BUSY;
7853 		cmd->ResponseLen = 0;
7854 		return;
7855 	}
7856 
7857 	rval = ql_reset_menlo(ha, &mr, rst.Flags);
7858 	if (rval != QL_SUCCESS) {
7859 		EL(ha, "failed, status=%xh\n", rval);
7860 		cmd->Status = EXT_STATUS_MAILBOX;
7861 		cmd->DetailStatus = rval;
7862 		cmd->ResponseLen = 0;
7863 	} else if (mr.mb[1] != 0) {
7864 		EL(ha, "failed, substatus=%d\n", mr.mb[1]);
7865 		cmd->Status = EXT_STATUS_ERR;
7866 		cmd->DetailStatus = mr.mb[1];
7867 		cmd->ResponseLen = 0;
7868 	}
7869 
7870 	ql_restart_hba(ha);
7871 
7872 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7873 }
7874 
7875 /*
7876  * ql_menlo_get_fw_version
7877  *	Get Menlo firmware version.
7878  *
7879  * Input:
7880  *	ha:	adapter state pointer.
7881  *	bp:	buffer address.
7882  *	mode:	flags
7883  *
7884  * Returns:
7885  *
7886  * Context:
7887  *	Kernel context.
7888  */
7889 static void
7890 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7891 {
7892 	int				rval;
7893 	ql_mbx_iocb_t			*pkt;
7894 	EXT_MENLO_GET_FW_VERSION	ver = {0};
7895 
7896 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7897 
7898 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7899 		EL(ha, "failed, invalid request for HBA\n");
7900 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7901 		cmd->ResponseLen = 0;
7902 		return;
7903 	}
7904 
7905 	if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) {
7906 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7907 		cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION);
7908 		EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
7909 		    sizeof (EXT_MENLO_GET_FW_VERSION));
7910 		cmd->ResponseLen = 0;
7911 		return;
7912 	}
7913 
7914 	/* Allocate packet. */
7915 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7916 	if (pkt == NULL) {
7917 		EL(ha, "failed, kmem_zalloc\n");
7918 		cmd->Status = EXT_STATUS_NO_MEMORY;
7919 		cmd->ResponseLen = 0;
7920 		return;
7921 	}
7922 
7923 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
7924 	pkt->mvfy.entry_count = 1;
7925 	pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW);
7926 
7927 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7928 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
7929 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
7930 	ver.FwVersion = LE_32(pkt->mvfy.fw_version);
7931 
7932 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
7933 	    pkt->mvfy.options_status != CS_COMPLETE) {
7934 		/* Command error */
7935 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7936 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
7937 		    pkt->mvfy.failure_code);
7938 		cmd->Status = EXT_STATUS_ERR;
7939 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7940 		    QL_FUNCTION_FAILED;
7941 		cmd->ResponseLen = 0;
7942 	} else if (ddi_copyout((void *)&ver,
7943 	    (void *)(uintptr_t)cmd->ResponseAdr,
7944 	    sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) {
7945 		EL(ha, "failed, ddi_copyout\n");
7946 		cmd->Status = EXT_STATUS_COPY_ERR;
7947 		cmd->ResponseLen = 0;
7948 	} else {
7949 		cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION);
7950 	}
7951 
7952 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7953 
7954 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7955 }
7956 
7957 /*
7958  * ql_menlo_update_fw
7959  *	Get Menlo update firmware.
7960  *
7961  * Input:
7962  *	ha:	adapter state pointer.
7963  *	bp:	buffer address.
7964  *	mode:	flags
7965  *
7966  * Returns:
7967  *
7968  * Context:
7969  *	Kernel context.
7970  */
7971 static void
7972 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7973 {
7974 	ql_mbx_iocb_t		*pkt;
7975 	dma_mem_t		*dma_mem;
7976 	EXT_MENLO_UPDATE_FW	fw;
7977 	uint32_t		*ptr32;
7978 	int			rval;
7979 
7980 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7981 
7982 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7983 		EL(ha, "failed, invalid request for HBA\n");
7984 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7985 		cmd->ResponseLen = 0;
7986 		return;
7987 	}
7988 
7989 	/*
7990 	 * TODO: only vp_index 0 can do this (?)
7991 	 */
7992 
7993 	/*  Verify the size of request structure. */
7994 	if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) {
7995 		/* Return error */
7996 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7997 		    sizeof (EXT_MENLO_UPDATE_FW));
7998 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7999 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8000 		cmd->ResponseLen = 0;
8001 		return;
8002 	}
8003 
8004 	/* Get update fw request. */
8005 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw,
8006 	    sizeof (EXT_MENLO_UPDATE_FW), mode) != 0) {
8007 		EL(ha, "failed, ddi_copyin\n");
8008 		cmd->Status = EXT_STATUS_COPY_ERR;
8009 		cmd->ResponseLen = 0;
8010 		return;
8011 	}
8012 
8013 	/* Wait for I/O to stop and daemon to stall. */
8014 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8015 		EL(ha, "ql_stall_driver failed\n");
8016 		ql_restart_hba(ha);
8017 		cmd->Status = EXT_STATUS_BUSY;
8018 		cmd->ResponseLen = 0;
8019 		return;
8020 	}
8021 
8022 	/* Allocate packet. */
8023 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
8024 	if (dma_mem == NULL) {
8025 		EL(ha, "failed, kmem_zalloc\n");
8026 		cmd->Status = EXT_STATUS_NO_MEMORY;
8027 		cmd->ResponseLen = 0;
8028 		return;
8029 	}
8030 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8031 	if (pkt == NULL) {
8032 		EL(ha, "failed, kmem_zalloc\n");
8033 		kmem_free(dma_mem, sizeof (dma_mem_t));
8034 		ql_restart_hba(ha);
8035 		cmd->Status = EXT_STATUS_NO_MEMORY;
8036 		cmd->ResponseLen = 0;
8037 		return;
8038 	}
8039 
8040 	/* Get DMA memory for the IOCB */
8041 	if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
8042 	    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8043 		cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8044 		    "alloc failed", QL_NAME, ha->instance);
8045 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8046 		kmem_free(dma_mem, sizeof (dma_mem_t));
8047 		ql_restart_hba(ha);
8048 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8049 		cmd->ResponseLen = 0;
8050 		return;
8051 	}
8052 
8053 	/* Get firmware data. */
8054 	if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp,
8055 	    fw.TotalByteCount, mode) != fw.TotalByteCount) {
8056 		EL(ha, "failed, get_buffer_data\n");
8057 		ql_free_dma_resource(ha, dma_mem);
8058 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8059 		kmem_free(dma_mem, sizeof (dma_mem_t));
8060 		ql_restart_hba(ha);
8061 		cmd->Status = EXT_STATUS_COPY_ERR;
8062 		cmd->ResponseLen = 0;
8063 		return;
8064 	}
8065 
8066 	/* Sync DMA buffer. */
8067 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8068 	    DDI_DMA_SYNC_FORDEV);
8069 
8070 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
8071 	pkt->mvfy.entry_count = 1;
8072 	pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
8073 	ptr32 = dma_mem->bp;
8074 	pkt->mvfy.fw_version = LE_32(ptr32[2]);
8075 	pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
8076 	pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
8077 	pkt->mvfy.dseg_count = LE_16(1);
8078 	pkt->mvfy.dseg_0_address[0] = (uint32_t)
8079 	    LE_32(LSD(dma_mem->cookie.dmac_laddress));
8080 	pkt->mvfy.dseg_0_address[1] = (uint32_t)
8081 	    LE_32(MSD(dma_mem->cookie.dmac_laddress));
8082 	pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount);
8083 
8084 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8085 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
8086 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
8087 
8088 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
8089 	    pkt->mvfy.options_status != CS_COMPLETE) {
8090 		/* Command error */
8091 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8092 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8093 		    pkt->mvfy.failure_code);
8094 		cmd->Status = EXT_STATUS_ERR;
8095 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8096 		    QL_FUNCTION_FAILED;
8097 		cmd->ResponseLen = 0;
8098 	}
8099 
8100 	ql_free_dma_resource(ha, dma_mem);
8101 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8102 	kmem_free(dma_mem, sizeof (dma_mem_t));
8103 	ql_restart_hba(ha);
8104 
8105 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8106 }
8107 
8108 /*
8109  * ql_menlo_manage_info
8110  *	Get Menlo manage info.
8111  *
8112  * Input:
8113  *	ha:	adapter state pointer.
8114  *	bp:	buffer address.
8115  *	mode:	flags
8116  *
8117  * Returns:
8118  *
8119  * Context:
8120  *	Kernel context.
8121  */
8122 static void
8123 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8124 {
8125 	ql_mbx_iocb_t		*pkt;
8126 	dma_mem_t		*dma_mem = NULL;
8127 	EXT_MENLO_MANAGE_INFO	info;
8128 	int			rval;
8129 
8130 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8131 
8132 
8133 	/* The call is only supported for Schultz right now */
8134 	if (CFG_IST(ha, CFG_CTRL_8081)) {
8135 		ql_get_xgmac_statistics(ha, cmd, mode);
8136 		QL_PRINT_9(CE_CONT, "(%d): CFG_CTRL_81XX done\n",
8137 		    ha->instance);
8138 		return;
8139 	}
8140 
8141 	if (!CFG_IST(ha, CFG_CTRL_8081) || !CFG_IST(ha, CFG_CTRL_MENLO)) {
8142 		EL(ha, "failed, invalid request for HBA\n");
8143 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8144 		cmd->ResponseLen = 0;
8145 		return;
8146 	}
8147 
8148 	/*  Verify the size of request structure. */
8149 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
8150 		/* Return error */
8151 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8152 		    sizeof (EXT_MENLO_MANAGE_INFO));
8153 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8154 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8155 		cmd->ResponseLen = 0;
8156 		return;
8157 	}
8158 
8159 	/* Get manage info request. */
8160 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
8161 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
8162 		EL(ha, "failed, ddi_copyin\n");
8163 		cmd->Status = EXT_STATUS_COPY_ERR;
8164 		cmd->ResponseLen = 0;
8165 		return;
8166 	}
8167 
8168 	/* Allocate packet. */
8169 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8170 	if (pkt == NULL) {
8171 		EL(ha, "failed, kmem_zalloc\n");
8172 		ql_restart_driver(ha);
8173 		cmd->Status = EXT_STATUS_NO_MEMORY;
8174 		cmd->ResponseLen = 0;
8175 		return;
8176 	}
8177 
8178 	pkt->mdata.entry_type = MENLO_DATA_TYPE;
8179 	pkt->mdata.entry_count = 1;
8180 	pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
8181 
8182 	/* Get DMA memory for the IOCB */
8183 	if (info.Operation == MENLO_OP_READ_MEM ||
8184 	    info.Operation == MENLO_OP_WRITE_MEM) {
8185 		pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
8186 		pkt->mdata.parameter_1 =
8187 		    LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
8188 		dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
8189 		    KM_SLEEP);
8190 		if (dma_mem == NULL) {
8191 			EL(ha, "failed, kmem_zalloc\n");
8192 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8193 			cmd->Status = EXT_STATUS_NO_MEMORY;
8194 			cmd->ResponseLen = 0;
8195 			return;
8196 		}
8197 		if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
8198 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8199 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8200 			    "alloc failed", QL_NAME, ha->instance);
8201 			kmem_free(dma_mem, sizeof (dma_mem_t));
8202 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8203 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8204 			cmd->ResponseLen = 0;
8205 			return;
8206 		}
8207 		if (info.Operation == MENLO_OP_WRITE_MEM) {
8208 			/* Get data. */
8209 			if (ql_get_buffer_data(
8210 			    (caddr_t)(uintptr_t)info.pDataBytes,
8211 			    dma_mem->bp, info.TotalByteCount, mode) !=
8212 			    info.TotalByteCount) {
8213 				EL(ha, "failed, get_buffer_data\n");
8214 				ql_free_dma_resource(ha, dma_mem);
8215 				kmem_free(dma_mem, sizeof (dma_mem_t));
8216 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8217 				cmd->Status = EXT_STATUS_COPY_ERR;
8218 				cmd->ResponseLen = 0;
8219 				return;
8220 			}
8221 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
8222 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
8223 		}
8224 		pkt->mdata.dseg_count = LE_16(1);
8225 		pkt->mdata.dseg_0_address[0] = (uint32_t)
8226 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
8227 		pkt->mdata.dseg_0_address[1] = (uint32_t)
8228 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
8229 		pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount);
8230 	} else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
8231 		pkt->mdata.parameter_1 =
8232 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
8233 		pkt->mdata.parameter_2 =
8234 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
8235 		pkt->mdata.parameter_3 =
8236 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
8237 	} else if (info.Operation & MENLO_OP_GET_INFO) {
8238 		pkt->mdata.parameter_1 =
8239 		    LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
8240 		pkt->mdata.parameter_2 =
8241 		    LE_32(info.Parameters.ap.MenloInfo.InfoContext);
8242 	}
8243 
8244 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8245 	LITTLE_ENDIAN_16(&pkt->mdata.options_status);
8246 	LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
8247 
8248 	if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
8249 	    pkt->mdata.options_status != CS_COMPLETE) {
8250 		/* Command error */
8251 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8252 		    pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status,
8253 		    pkt->mdata.failure_code);
8254 		cmd->Status = EXT_STATUS_ERR;
8255 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8256 		    QL_FUNCTION_FAILED;
8257 		cmd->ResponseLen = 0;
8258 	} else if (info.Operation == MENLO_OP_READ_MEM) {
8259 		(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8260 		    DDI_DMA_SYNC_FORKERNEL);
8261 		if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes,
8262 		    dma_mem->bp, info.TotalByteCount, mode) !=
8263 		    info.TotalByteCount) {
8264 			cmd->Status = EXT_STATUS_COPY_ERR;
8265 			cmd->ResponseLen = 0;
8266 		}
8267 	}
8268 
8269 	ql_free_dma_resource(ha, dma_mem);
8270 	kmem_free(dma_mem, sizeof (dma_mem_t));
8271 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8272 
8273 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8274 }
8275 
8276 /*
8277  * ql_suspend_hba
8278  *	Suspends all adapter ports.
8279  *
8280  * Input:
8281  *	ha:		adapter state pointer.
8282  *	options:	BIT_0 --> leave driver stalled on exit if
8283  *				  failed.
8284  *
8285  * Returns:
8286  *	ql local function return status code.
8287  *
8288  * Context:
8289  *	Kernel context.
8290  */
8291 static int
8292 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
8293 {
8294 	ql_adapter_state_t	*ha2;
8295 	ql_link_t		*link;
8296 	int			rval = QL_SUCCESS;
8297 
8298 	/* Quiesce I/O on all adapter ports */
8299 	for (link = ql_hba.first; link != NULL; link = link->next) {
8300 		ha2 = link->base_address;
8301 
8302 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8303 			continue;
8304 		}
8305 
8306 		if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) {
8307 			EL(ha, "ql_stall_driver status=%xh\n", rval);
8308 			break;
8309 		}
8310 	}
8311 
8312 	return (rval);
8313 }
8314 
8315 /*
8316  * ql_restart_hba
8317  *	Restarts adapter.
8318  *
8319  * Input:
8320  *	ha:	adapter state pointer.
8321  *
8322  * Context:
8323  *	Kernel context.
8324  */
8325 static void
8326 ql_restart_hba(ql_adapter_state_t *ha)
8327 {
8328 	ql_adapter_state_t	*ha2;
8329 	ql_link_t		*link;
8330 
8331 	/* Resume I/O on all adapter ports */
8332 	for (link = ql_hba.first; link != NULL; link = link->next) {
8333 		ha2 = link->base_address;
8334 
8335 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8336 			continue;
8337 		}
8338 
8339 		ql_restart_driver(ha2);
8340 	}
8341 }
8342 
8343 /*
8344  * ql_get_vp_cnt_id
8345  *	Retrieves pci config space data
8346  *
8347  * Input:
8348  *	ha:	adapter state pointer.
8349  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8350  *	mode:	flags.
8351  *
8352  * Returns:
8353  *	None, request status indicated in cmd->Status.
8354  *
8355  * Context:
8356  *	Kernel context.
8357  *
8358  */
8359 static void
8360 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8361 {
8362 	ql_adapter_state_t	*vha;
8363 	PEXT_VPORT_ID_CNT	ptmp_vp;
8364 	int			id = 0;
8365 	int			rval;
8366 	char			name[MAXPATHLEN];
8367 
8368 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8369 
8370 	/*
8371 	 * To be backward compatible with older API
8372 	 * check for the size of old EXT_VPORT_ID_CNT
8373 	 */
8374 	if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) &&
8375 	    (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) {
8376 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8377 		cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
8378 		EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
8379 		    cmd->ResponseLen);
8380 		cmd->ResponseLen = 0;
8381 		return;
8382 	}
8383 
8384 	ptmp_vp = (EXT_VPORT_ID_CNT *)
8385 	    kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP);
8386 	if (ptmp_vp == NULL) {
8387 		EL(ha, "failed, kmem_zalloc\n");
8388 		cmd->ResponseLen = 0;
8389 		return;
8390 	}
8391 	vha = ha->vp_next;
8392 	while (vha != NULL) {
8393 		ptmp_vp->VpCnt++;
8394 		ptmp_vp->VpId[id] = vha->vp_index;
8395 		(void) ddi_pathname(vha->dip, name);
8396 		(void) strcpy((char *)ptmp_vp->vp_path[id], name);
8397 		ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance;
8398 		id++;
8399 		vha = vha->vp_next;
8400 	}
8401 	rval = ddi_copyout((void *)ptmp_vp,
8402 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8403 	    cmd->ResponseLen, mode);
8404 	if (rval != 0) {
8405 		cmd->Status = EXT_STATUS_COPY_ERR;
8406 		cmd->ResponseLen = 0;
8407 		EL(ha, "failed, ddi_copyout\n");
8408 	} else {
8409 		cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
8410 		QL_PRINT_9(CE_CONT, "(%d): done, vport_cnt=%d\n",
8411 		    ha->instance, ptmp_vp->VpCnt);
8412 	}
8413 
8414 }
8415 
8416 /*
8417  * ql_vp_ioctl
8418  *	Performs all EXT_CC_VPORT_CMD functions.
8419  *
8420  * Input:
8421  *	ha:	adapter state pointer.
8422  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8423  *	mode:	flags.
8424  *
8425  * Returns:
8426  *	None, request status indicated in cmd->Status.
8427  *
8428  * Context:
8429  *	Kernel context.
8430  */
8431 static void
8432 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8433 {
8434 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
8435 	    cmd->SubCode);
8436 
8437 	/* case off on command subcode */
8438 	switch (cmd->SubCode) {
8439 	case EXT_VF_SC_VPORT_GETINFO:
8440 		ql_qry_vport(ha, cmd, mode);
8441 		break;
8442 	default:
8443 		/* function not supported. */
8444 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
8445 		EL(ha, "failed, Unsupported Subcode=%xh\n",
8446 		    cmd->SubCode);
8447 		break;
8448 	}
8449 
8450 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8451 }
8452 
8453 /*
8454  * ql_qry_vport
8455  *	Performs EXT_VF_SC_VPORT_GETINFO subfunction.
8456  *
8457  * Input:
8458  *	ha:	adapter state pointer.
8459  *	cmd:	EXT_IOCTL cmd struct pointer.
8460  *	mode:	flags.
8461  *
8462  * Returns:
8463  *	None, request status indicated in cmd->Status.
8464  *
8465  * Context:
8466  *	Kernel context.
8467  */
8468 static void
8469 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
8470 {
8471 	ql_adapter_state_t	*tmp_vha;
8472 	EXT_VPORT_INFO		tmp_vport = {0};
8473 	int			max_vport;
8474 
8475 	QL_PRINT_9(CE_CONT, "(%d): started\n", vha->instance);
8476 
8477 	if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
8478 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8479 		cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
8480 		EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
8481 		    cmd->ResponseLen);
8482 		cmd->ResponseLen = 0;
8483 		return;
8484 	}
8485 
8486 	/* Fill in the vport information. */
8487 	bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
8488 	    EXT_DEF_WWN_NAME_SIZE);
8489 	bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
8490 	    EXT_DEF_WWN_NAME_SIZE);
8491 	tmp_vport.state = vha->state;
8492 
8493 	tmp_vha = vha->pha->vp_next;
8494 	while (tmp_vha != NULL) {
8495 		tmp_vport.used++;
8496 		tmp_vha = tmp_vha->vp_next;
8497 	}
8498 
8499 	max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS :
8500 	    MAX_25_VIRTUAL_PORTS);
8501 	if (max_vport > tmp_vport.used) {
8502 		tmp_vport.free = max_vport - tmp_vport.used;
8503 	}
8504 
8505 	if (ddi_copyout((void *)&tmp_vport,
8506 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8507 	    sizeof (EXT_VPORT_INFO), mode) != 0) {
8508 		cmd->Status = EXT_STATUS_COPY_ERR;
8509 		cmd->ResponseLen = 0;
8510 		EL(vha, "failed, ddi_copyout\n");
8511 	} else {
8512 		cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
8513 		QL_PRINT_9(CE_CONT, "(%d): done\n", vha->instance);
8514 	}
8515 }
8516 
8517 /*
8518  * ql_access_flash
8519  *	Performs all EXT_CC_ACCESS_FLASH_OS functions.
8520  *
8521  * Input:
8522  *	pi:	port info pointer.
8523  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8524  *	mode:	flags.
8525  *
8526  * Returns:
8527  *	None, request status indicated in cmd->Status.
8528  *
8529  * Context:
8530  *	Kernel context.
8531  */
8532 static void
8533 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8534 {
8535 	int	rval;
8536 
8537 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8538 
8539 	switch (cmd->SubCode) {
8540 	case EXT_SC_FLASH_READ:
8541 		if ((rval = ql_flash_fcode_dump(ha,
8542 		    (void *)(uintptr_t)(cmd->ResponseAdr),
8543 		    (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) {
8544 			cmd->Status = EXT_STATUS_COPY_ERR;
8545 			cmd->ResponseLen = 0;
8546 			EL(ha, "flash_fcode_dump status=%xh\n", rval);
8547 		}
8548 		break;
8549 	case EXT_SC_FLASH_WRITE:
8550 		if ((rval = ql_r_m_w_flash(ha,
8551 		    (void *)(uintptr_t)(cmd->RequestAdr),
8552 		    (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) !=
8553 		    QL_SUCCESS) {
8554 			cmd->Status = EXT_STATUS_COPY_ERR;
8555 			cmd->ResponseLen = 0;
8556 			EL(ha, "r_m_w_flash status=%xh\n", rval);
8557 		} else {
8558 			/* Reset caches on all adapter instances. */
8559 			ql_update_flash_caches(ha);
8560 		}
8561 		break;
8562 	default:
8563 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8564 		cmd->Status = EXT_STATUS_ERR;
8565 		cmd->ResponseLen = 0;
8566 		break;
8567 	}
8568 
8569 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8570 }
8571 
8572 /*
8573  * ql_reset_cmd
8574  *	Performs all EXT_CC_RESET_FW_OS functions.
8575  *
8576  * Input:
8577  *	ha:	adapter state pointer.
8578  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8579  *
8580  * Returns:
8581  *	None, request status indicated in cmd->Status.
8582  *
8583  * Context:
8584  *	Kernel context.
8585  */
8586 static void
8587 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
8588 {
8589 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8590 
8591 	switch (cmd->SubCode) {
8592 	case EXT_SC_RESET_FC_FW:
8593 		EL(ha, "isp_abort_needed\n");
8594 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
8595 		break;
8596 	case EXT_SC_RESET_MPI_FW:
8597 		if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8598 			EL(ha, "invalid request for HBA\n");
8599 			cmd->Status = EXT_STATUS_INVALID_REQUEST;
8600 			cmd->ResponseLen = 0;
8601 		} else {
8602 			/* Wait for I/O to stop and daemon to stall. */
8603 			if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8604 				EL(ha, "ql_suspend_hba failed\n");
8605 				cmd->Status = EXT_STATUS_BUSY;
8606 				cmd->ResponseLen = 0;
8607 			} else if (ql_restart_mpi(ha) != QL_SUCCESS) {
8608 				cmd->Status = EXT_STATUS_ERR;
8609 				cmd->ResponseLen = 0;
8610 			} else {
8611 				uint8_t	timer;
8612 				/*
8613 				 * While the restart_mpi mailbox cmd may be
8614 				 * done the MPI is not. Wait at least 6 sec. or
8615 				 * exit if the loop comes up.
8616 				 */
8617 				for (timer = 6; timer; timer--) {
8618 					if (!(ha->task_daemon_flags &
8619 					    LOOP_DOWN)) {
8620 						break;
8621 					}
8622 					/* Delay for 1 second. */
8623 					ql_delay(ha, 1000000);
8624 				}
8625 			}
8626 			ql_restart_hba(ha);
8627 		}
8628 		break;
8629 	default:
8630 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8631 		cmd->Status = EXT_STATUS_ERR;
8632 		cmd->ResponseLen = 0;
8633 		break;
8634 	}
8635 
8636 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8637 }
8638 
8639 /*
8640  * ql_get_dcbx_parameters
8641  *	Get DCBX parameters.
8642  *
8643  * Input:
8644  *	ha:	adapter state pointer.
8645  *	cmd:	User space CT arguments pointer.
8646  *	mode:	flags.
8647  */
8648 static void
8649 ql_get_dcbx_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8650 {
8651 	uint8_t		*tmp_buf;
8652 	int		rval;
8653 
8654 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8655 
8656 	if (!(CFG_IST(ha, CFG_CTRL_8081))) {
8657 		EL(ha, "invalid request for HBA\n");
8658 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8659 		cmd->ResponseLen = 0;
8660 		return;
8661 	}
8662 
8663 	/* Allocate memory for command. */
8664 	tmp_buf = kmem_zalloc(EXT_DEF_DCBX_PARAM_BUF_SIZE, KM_SLEEP);
8665 	if (tmp_buf == NULL) {
8666 		EL(ha, "failed, kmem_zalloc\n");
8667 		cmd->Status = EXT_STATUS_NO_MEMORY;
8668 		cmd->ResponseLen = 0;
8669 		return;
8670 	}
8671 	/* Send command */
8672 	rval = ql_get_dcbx_params(ha, EXT_DEF_DCBX_PARAM_BUF_SIZE,
8673 	    (caddr_t)tmp_buf);
8674 	if (rval != QL_SUCCESS) {
8675 		/* error */
8676 		EL(ha, "failed, get_dcbx_params_mbx=%xh\n", rval);
8677 		kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8678 		cmd->Status = EXT_STATUS_ERR;
8679 		cmd->ResponseLen = 0;
8680 		return;
8681 	}
8682 
8683 	/* Copy the response */
8684 	if (ql_send_buffer_data((caddr_t)tmp_buf,
8685 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
8686 	    EXT_DEF_DCBX_PARAM_BUF_SIZE, mode) != EXT_DEF_DCBX_PARAM_BUF_SIZE) {
8687 		EL(ha, "failed, ddi_copyout\n");
8688 		cmd->Status = EXT_STATUS_COPY_ERR;
8689 		cmd->ResponseLen = 0;
8690 	} else {
8691 		cmd->ResponseLen = EXT_DEF_DCBX_PARAM_BUF_SIZE;
8692 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8693 	}
8694 	kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8695 
8696 }
8697 
8698 /*
8699  * ql_qry_cna_port
8700  *	Performs EXT_SC_QUERY_CNA_PORT subfunction.
8701  *
8702  * Input:
8703  *	ha:	adapter state pointer.
8704  *	cmd:	EXT_IOCTL cmd struct pointer.
8705  *	mode:	flags.
8706  *
8707  * Returns:
8708  *	None, request status indicated in cmd->Status.
8709  *
8710  * Context:
8711  *	Kernel context.
8712  */
8713 static void
8714 ql_qry_cna_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8715 {
8716 	EXT_CNA_PORT	cna_port = {0};
8717 
8718 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8719 
8720 	if (!(CFG_IST(ha, CFG_CTRL_8081))) {
8721 		EL(ha, "invalid request for HBA\n");
8722 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8723 		cmd->ResponseLen = 0;
8724 		return;
8725 	}
8726 
8727 	if (cmd->ResponseLen < sizeof (EXT_CNA_PORT)) {
8728 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8729 		cmd->DetailStatus = sizeof (EXT_CNA_PORT);
8730 		EL(ha, "failed, ResponseLen < EXT_CNA_PORT, Len=%xh\n",
8731 		    cmd->ResponseLen);
8732 		cmd->ResponseLen = 0;
8733 		return;
8734 	}
8735 
8736 	cna_port.VLanId = ha->fcoe_vlan_id;
8737 	cna_port.FabricParam = ha->fabric_params;
8738 	bcopy(ha->fcoe_vnport_mac, cna_port.VNPortMACAddress,
8739 	    EXT_DEF_MAC_ADDRESS_SIZE);
8740 
8741 	if (ddi_copyout((void *)&cna_port,
8742 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8743 	    sizeof (EXT_CNA_PORT), mode) != 0) {
8744 		cmd->Status = EXT_STATUS_COPY_ERR;
8745 		cmd->ResponseLen = 0;
8746 		EL(ha, "failed, ddi_copyout\n");
8747 	} else {
8748 		cmd->ResponseLen = sizeof (EXT_CNA_PORT);
8749 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8750 	}
8751 }
8752 
8753 /*
8754  * ql_qry_adapter_versions
8755  *	Performs EXT_SC_QUERY_ADAPTER_VERSIONS subfunction.
8756  *
8757  * Input:
8758  *	ha:	adapter state pointer.
8759  *	cmd:	EXT_IOCTL cmd struct pointer.
8760  *	mode:	flags.
8761  *
8762  * Returns:
8763  *	None, request status indicated in cmd->Status.
8764  *
8765  * Context:
8766  *	Kernel context.
8767  */
8768 static void
8769 ql_qry_adapter_versions(ql_adapter_state_t *ha, EXT_IOCTL *cmd,
8770     int mode)
8771 {
8772 	uint8_t				is_8142, mpi_cap;
8773 	uint32_t			ver_len, transfer_size;
8774 	PEXT_ADAPTERREGIONVERSION	padapter_ver = NULL;
8775 
8776 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8777 
8778 	/* 8142s do not have a EDC PHY firmware. */
8779 	mpi_cap = (uint8_t)(ha->mpi_capability_list >> 8);
8780 
8781 	is_8142 = 0;
8782 	/* Sizeof (Length + Reserved) = 8 Bytes */
8783 	if (mpi_cap == 0x02 || mpi_cap == 0x04) {
8784 		ver_len = (sizeof (EXT_REGIONVERSION) * (NO_OF_VERSIONS - 1))
8785 		    + 8;
8786 		is_8142 = 1;
8787 	} else {
8788 		ver_len = (sizeof (EXT_REGIONVERSION) * NO_OF_VERSIONS) + 8;
8789 	}
8790 
8791 	/* Allocate local memory for EXT_ADAPTERREGIONVERSION */
8792 	padapter_ver = (EXT_ADAPTERREGIONVERSION *)kmem_zalloc(ver_len,
8793 	    KM_SLEEP);
8794 
8795 	if (padapter_ver == NULL) {
8796 		EL(ha, "failed, kmem_zalloc\n");
8797 		cmd->Status = EXT_STATUS_NO_MEMORY;
8798 		cmd->ResponseLen = 0;
8799 		return;
8800 	}
8801 
8802 	padapter_ver->Length = 1;
8803 	/* Copy MPI version */
8804 	padapter_ver->RegionVersion[0].Region =
8805 	    EXT_OPT_ROM_REGION_MPI_RISC_FW;
8806 	padapter_ver->RegionVersion[0].Version[0] =
8807 	    ha->mpi_fw_major_version;
8808 	padapter_ver->RegionVersion[0].Version[1] =
8809 	    ha->mpi_fw_minor_version;
8810 	padapter_ver->RegionVersion[0].Version[2] =
8811 	    ha->mpi_fw_subminor_version;
8812 	padapter_ver->RegionVersion[0].VersionLength = 3;
8813 	padapter_ver->RegionVersion[0].Location = RUNNING_VERSION;
8814 
8815 	if (!is_8142) {
8816 		padapter_ver->RegionVersion[1].Region =
8817 		    EXT_OPT_ROM_REGION_EDC_PHY_FW;
8818 		padapter_ver->RegionVersion[1].Version[0] =
8819 		    ha->phy_fw_major_version;
8820 		padapter_ver->RegionVersion[1].Version[1] =
8821 		    ha->phy_fw_minor_version;
8822 		padapter_ver->RegionVersion[1].Version[2] =
8823 		    ha->phy_fw_subminor_version;
8824 		padapter_ver->RegionVersion[1].VersionLength = 3;
8825 		padapter_ver->RegionVersion[1].Location = RUNNING_VERSION;
8826 		padapter_ver->Length = NO_OF_VERSIONS;
8827 	}
8828 
8829 	if (cmd->ResponseLen < ver_len) {
8830 		EL(ha, "failed, ResponseLen < ver_len, ",
8831 		    "RespLen=%xh ver_len=%xh\n", cmd->ResponseLen, ver_len);
8832 		/* Calculate the No. of valid versions being returned. */
8833 		padapter_ver->Length = (uint32_t)
8834 		    ((cmd->ResponseLen - 8) / sizeof (EXT_REGIONVERSION));
8835 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8836 		cmd->DetailStatus = ver_len;
8837 		transfer_size = cmd->ResponseLen;
8838 	} else {
8839 		transfer_size = ver_len;
8840 	}
8841 
8842 	if (ddi_copyout((void *)padapter_ver,
8843 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8844 	    transfer_size, mode) != 0) {
8845 		cmd->Status = EXT_STATUS_COPY_ERR;
8846 		cmd->ResponseLen = 0;
8847 		EL(ha, "failed, ddi_copyout\n");
8848 	} else {
8849 		cmd->ResponseLen = ver_len;
8850 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8851 	}
8852 
8853 	kmem_free(padapter_ver, ver_len);
8854 }
8855 
8856 /*
8857  * ql_get_xgmac_statistics
8858  *	Get XgMac information
8859  *
8860  * Input:
8861  *	ha:	adapter state pointer.
8862  *	cmd:	EXT_IOCTL cmd struct pointer.
8863  *	mode:	flags.
8864  *
8865  * Returns:
8866  *	None, request status indicated in cmd->Status.
8867  *
8868  * Context:
8869  *	Kernel context.
8870  */
8871 static void
8872 ql_get_xgmac_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8873 {
8874 	int			rval;
8875 	uint32_t		size;
8876 	int8_t			*tmp_buf;
8877 	EXT_MENLO_MANAGE_INFO	info;
8878 
8879 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8880 
8881 	/*  Verify the size of request structure. */
8882 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
8883 		/* Return error */
8884 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8885 		    sizeof (EXT_MENLO_MANAGE_INFO));
8886 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8887 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8888 		cmd->ResponseLen = 0;
8889 		return;
8890 	}
8891 
8892 	/* Get manage info request. */
8893 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
8894 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
8895 		EL(ha, "failed, ddi_copyin\n");
8896 		cmd->Status = EXT_STATUS_COPY_ERR;
8897 		cmd->ResponseLen = 0;
8898 		return;
8899 	}
8900 
8901 	size = info.TotalByteCount;
8902 	if (!size) {
8903 		/* parameter error */
8904 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8905 		cmd->DetailStatus = 0;
8906 		EL(ha, "failed, size=%xh\n", size);
8907 		cmd->ResponseLen = 0;
8908 		return;
8909 	}
8910 
8911 	/* Allocate memory for command. */
8912 	tmp_buf = kmem_zalloc(size, KM_SLEEP);
8913 	if (tmp_buf == NULL) {
8914 		EL(ha, "failed, kmem_zalloc\n");
8915 		cmd->Status = EXT_STATUS_NO_MEMORY;
8916 		cmd->ResponseLen = 0;
8917 		return;
8918 	}
8919 
8920 	if (!(info.Operation & MENLO_OP_GET_INFO)) {
8921 		EL(ha, "Invalid request for 81XX\n");
8922 		kmem_free(tmp_buf, size);
8923 		cmd->Status = EXT_STATUS_ERR;
8924 		cmd->ResponseLen = 0;
8925 		return;
8926 	}
8927 
8928 	rval = ql_get_xgmac_stats(ha, size, (caddr_t)tmp_buf);
8929 
8930 	if (rval != QL_SUCCESS) {
8931 		/* error */
8932 		EL(ha, "failed, get_xgmac_stats =%xh\n", rval);
8933 		kmem_free(tmp_buf, size);
8934 		cmd->Status = EXT_STATUS_ERR;
8935 		cmd->ResponseLen = 0;
8936 		return;
8937 	}
8938 
8939 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)info.pDataBytes,
8940 	    size, mode) != size) {
8941 		EL(ha, "failed, ddi_copyout\n");
8942 		cmd->Status = EXT_STATUS_COPY_ERR;
8943 		cmd->ResponseLen = 0;
8944 	} else {
8945 		cmd->ResponseLen = info.TotalByteCount;
8946 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8947 	}
8948 	kmem_free(tmp_buf, size);
8949 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8950 }
8951 
8952 /*
8953  * ql_get_fcf_list
8954  *	Get FCF list.
8955  *
8956  * Input:
8957  *	ha:	adapter state pointer.
8958  *	cmd:	User space CT arguments pointer.
8959  *	mode:	flags.
8960  */
8961 static void
8962 ql_get_fcf_list(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8963 {
8964 	uint8_t			*tmp_buf;
8965 	int			rval;
8966 	EXT_FCF_LIST		fcf_list = {0};
8967 	ql_fcf_list_desc_t	mb_fcf_list = {0};
8968 
8969 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8970 
8971 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8972 		EL(ha, "invalid request for HBA\n");
8973 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8974 		cmd->ResponseLen = 0;
8975 		return;
8976 	}
8977 	/* Get manage info request. */
8978 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
8979 	    (caddr_t)&fcf_list, sizeof (EXT_FCF_LIST), mode) != 0) {
8980 		EL(ha, "failed, ddi_copyin\n");
8981 		cmd->Status = EXT_STATUS_COPY_ERR;
8982 		cmd->ResponseLen = 0;
8983 		return;
8984 	}
8985 
8986 	if (!(fcf_list.BufSize)) {
8987 		/* Return error */
8988 		EL(ha, "failed, fcf_list BufSize is=%xh\n",
8989 		    fcf_list.BufSize);
8990 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8991 		cmd->ResponseLen = 0;
8992 		return;
8993 	}
8994 	/* Allocate memory for command. */
8995 	tmp_buf = kmem_zalloc(fcf_list.BufSize, KM_SLEEP);
8996 	if (tmp_buf == NULL) {
8997 		EL(ha, "failed, kmem_zalloc\n");
8998 		cmd->Status = EXT_STATUS_NO_MEMORY;
8999 		cmd->ResponseLen = 0;
9000 		return;
9001 	}
9002 	/* build the descriptor */
9003 	if (fcf_list.Options) {
9004 		mb_fcf_list.options = FCF_LIST_RETURN_ONE;
9005 	} else {
9006 		mb_fcf_list.options = FCF_LIST_RETURN_ALL;
9007 	}
9008 	mb_fcf_list.fcf_index = (uint16_t)fcf_list.FcfIndex;
9009 	mb_fcf_list.buffer_size = fcf_list.BufSize;
9010 
9011 	/* Send command */
9012 	rval = ql_get_fcf_list_mbx(ha, &mb_fcf_list, (caddr_t)tmp_buf);
9013 	if (rval != QL_SUCCESS) {
9014 		/* error */
9015 		EL(ha, "failed, get_fcf_list_mbx=%xh\n", rval);
9016 		kmem_free(tmp_buf, fcf_list.BufSize);
9017 		cmd->Status = EXT_STATUS_ERR;
9018 		cmd->ResponseLen = 0;
9019 		return;
9020 	}
9021 
9022 	/* Copy the response */
9023 	if (ql_send_buffer_data((caddr_t)tmp_buf,
9024 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
9025 	    fcf_list.BufSize, mode) != fcf_list.BufSize) {
9026 		EL(ha, "failed, ddi_copyout\n");
9027 		cmd->Status = EXT_STATUS_COPY_ERR;
9028 		cmd->ResponseLen = 0;
9029 	} else {
9030 		cmd->ResponseLen = mb_fcf_list.buffer_size;
9031 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9032 	}
9033 
9034 	kmem_free(tmp_buf, fcf_list.BufSize);
9035 }
9036 
9037 /*
9038  * ql_get_resource_counts
9039  *	Get Resource counts:
9040  *
9041  * Input:
9042  *	ha:	adapter state pointer.
9043  *	cmd:	User space CT arguments pointer.
9044  *	mode:	flags.
9045  */
9046 static void
9047 ql_get_resource_counts(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9048 {
9049 	int			rval;
9050 	ql_mbx_data_t		mr;
9051 	EXT_RESOURCE_CNTS	tmp_rc_cnt = {0};
9052 
9053 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9054 
9055 	if (!(CFG_IST(ha, CFG_CTRL_242581))) {
9056 		EL(ha, "invalid request for HBA\n");
9057 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
9058 		cmd->ResponseLen = 0;
9059 		return;
9060 	}
9061 
9062 	if (cmd->ResponseLen < sizeof (EXT_RESOURCE_CNTS)) {
9063 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9064 		cmd->DetailStatus = sizeof (EXT_RESOURCE_CNTS);
9065 		EL(ha, "failed, ResponseLen < EXT_RESOURCE_CNTS, "
9066 		    "Len=%xh\n", cmd->ResponseLen);
9067 		cmd->ResponseLen = 0;
9068 		return;
9069 	}
9070 
9071 	rval = ql_get_resource_cnts(ha, &mr);
9072 	if (rval != QL_SUCCESS) {
9073 		EL(ha, "resource cnt mbx failed\n");
9074 		cmd->Status = EXT_STATUS_ERR;
9075 		cmd->ResponseLen = 0;
9076 		return;
9077 	}
9078 
9079 	tmp_rc_cnt.OrgTgtXchgCtrlCnt = (uint32_t)mr.mb[1];
9080 	tmp_rc_cnt.CurTgtXchgCtrlCnt = (uint32_t)mr.mb[2];
9081 	tmp_rc_cnt.CurXchgCtrlCnt = (uint32_t)mr.mb[3];
9082 	tmp_rc_cnt.OrgXchgCtrlCnt = (uint32_t)mr.mb[6];
9083 	tmp_rc_cnt.CurIocbBufCnt = (uint32_t)mr.mb[7];
9084 	tmp_rc_cnt.OrgIocbBufCnt = (uint32_t)mr.mb[10];
9085 	tmp_rc_cnt.NoOfSupVPs = (uint32_t)mr.mb[11];
9086 	tmp_rc_cnt.NoOfSupFCFs = (uint32_t)mr.mb[12];
9087 
9088 	rval = ddi_copyout((void *)&tmp_rc_cnt,
9089 	    (void *)(uintptr_t)(cmd->ResponseAdr),
9090 	    sizeof (EXT_RESOURCE_CNTS), mode);
9091 	if (rval != 0) {
9092 		cmd->Status = EXT_STATUS_COPY_ERR;
9093 		cmd->ResponseLen = 0;
9094 		EL(ha, "failed, ddi_copyout\n");
9095 	} else {
9096 		cmd->ResponseLen = sizeof (EXT_RESOURCE_CNTS);
9097 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9098 	}
9099 }
9100